Skip to content

Commit 9253325

Browse files
committed
mm: cachestat: fix folio read-after-free in cache walk
jira LE-1907 cve CVE-2024-26630 Rebuild_History Non-Buildable kernel-5.14.0-427.35.1.el9_4 commit-author Nhat Pham <nphamcs@gmail.com> commit 3a75cb0 In cachestat, we access the folio from the page cache's xarray to compute its page offset, and check for its dirty and writeback flags. However, we do not hold a reference to the folio before performing these actions, which means the folio can concurrently be released and reused as another folio/page/slab. Get around this altogether by just using xarray's existing machinery for the folio page offsets and dirty/writeback states. This changes behavior for tmpfs files to now always report zeroes in their dirty and writeback counters. This is okay as tmpfs doesn't follow conventional writeback cache behavior: its pages get "cleaned" during swapout, after which they're no longer resident etc. Link: https://lkml.kernel.org/r/20240220153409.GA216065@cmpxchg.org Fixes: cf264e1 ("cachestat: implement cachestat syscall") Reported-by: Jann Horn <jannh@google.com> Suggested-by: Matthew Wilcox <willy@infradead.org> Signed-off-by: Nhat Pham <nphamcs@gmail.com> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Tested-by: Jann Horn <jannh@google.com> Cc: <stable@vger.kernel.org> [6.4+] Signed-off-by: Andrew Morton <akpm@linux-foundation.org> (cherry picked from commit 3a75cb0) Signed-off-by: Jonathan Maple <jmaple@ciq.com>
1 parent 33c3842 commit 9253325

File tree

1 file changed

+26
-25
lines changed

1 file changed

+26
-25
lines changed

mm/filemap.c

Lines changed: 26 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -3956,28 +3956,40 @@ static void filemap_cachestat(struct address_space *mapping,
39563956

39573957
rcu_read_lock();
39583958
xas_for_each(&xas, folio, last_index) {
3959+
int order;
39593960
unsigned long nr_pages;
39603961
pgoff_t folio_first_index, folio_last_index;
39613962

3963+
/*
3964+
* Don't deref the folio. It is not pinned, and might
3965+
* get freed (and reused) underneath us.
3966+
*
3967+
* We *could* pin it, but that would be expensive for
3968+
* what should be a fast and lightweight syscall.
3969+
*
3970+
* Instead, derive all information of interest from
3971+
* the rcu-protected xarray.
3972+
*/
3973+
39623974
if (xas_retry(&xas, folio))
39633975
continue;
39643976

3977+
order = xa_get_order(xas.xa, xas.xa_index);
3978+
nr_pages = 1 << order;
3979+
folio_first_index = round_down(xas.xa_index, 1 << order);
3980+
folio_last_index = folio_first_index + nr_pages - 1;
3981+
3982+
/* Folios might straddle the range boundaries, only count covered pages */
3983+
if (folio_first_index < first_index)
3984+
nr_pages -= first_index - folio_first_index;
3985+
3986+
if (folio_last_index > last_index)
3987+
nr_pages -= folio_last_index - last_index;
3988+
39653989
if (xa_is_value(folio)) {
39663990
/* page is evicted */
39673991
void *shadow = (void *)folio;
39683992
bool workingset; /* not used */
3969-
int order = xa_get_order(xas.xa, xas.xa_index);
3970-
3971-
nr_pages = 1 << order;
3972-
folio_first_index = round_down(xas.xa_index, 1 << order);
3973-
folio_last_index = folio_first_index + nr_pages - 1;
3974-
3975-
/* Folios might straddle the range boundaries, only count covered pages */
3976-
if (folio_first_index < first_index)
3977-
nr_pages -= first_index - folio_first_index;
3978-
3979-
if (folio_last_index > last_index)
3980-
nr_pages -= folio_last_index - last_index;
39813993

39823994
cs->nr_evicted += nr_pages;
39833995

@@ -4011,24 +4023,13 @@ static void filemap_cachestat(struct address_space *mapping,
40114023
goto resched;
40124024
}
40134025

4014-
nr_pages = folio_nr_pages(folio);
4015-
folio_first_index = folio_pgoff(folio);
4016-
folio_last_index = folio_first_index + nr_pages - 1;
4017-
4018-
/* Folios might straddle the range boundaries, only count covered pages */
4019-
if (folio_first_index < first_index)
4020-
nr_pages -= first_index - folio_first_index;
4021-
4022-
if (folio_last_index > last_index)
4023-
nr_pages -= folio_last_index - last_index;
4024-
40254026
/* page is in cache */
40264027
cs->nr_cache += nr_pages;
40274028

4028-
if (folio_test_dirty(folio))
4029+
if (xas_get_mark(&xas, PAGECACHE_TAG_DIRTY))
40294030
cs->nr_dirty += nr_pages;
40304031

4031-
if (folio_test_writeback(folio))
4032+
if (xas_get_mark(&xas, PAGECACHE_TAG_WRITEBACK))
40324033
cs->nr_writeback += nr_pages;
40334034

40344035
resched:

0 commit comments

Comments
 (0)