From e8e89fdce3ea85e1523ba29c7f850751ba17fb85 Mon Sep 17 00:00:00 2001 From: CIQ Kernel Automation Date: Mon, 4 May 2026 07:17:55 +0000 Subject: [PATCH 01/12] mmc: sdio: fix possible resource leaks in some error paths jira VULN-35546 cve CVE-2023-52730 commit-author Yang Yingliang commit 605d9fb9556f8f5fb4566f4df1480f280f308ded If sdio_add_func() or sdio_init_func() fails, sdio_remove_func() can not release the resources, because the sdio function is not presented in these two cases, it won't call of_node_put() or put_device(). To fix these leaks, make sdio_func_present() only control whether device_del() needs to be called or not, then always call of_node_put() and put_device(). In error case in sdio_init_func(), the reference of 'card->dev' is not get, to avoid redundant put in sdio_free_func_cis(), move the get_device() to sdio_alloc_func() and put_device() to sdio_release_func(), it can keep the get/put function be balanced. Without this patch, while doing fault inject test, it can get the following leak reports, after this fix, the leak is gone. unreferenced object 0xffff888112514000 (size 2048): comm "kworker/3:2", pid 65, jiffies 4294741614 (age 124.774s) hex dump (first 32 bytes): 00 e0 6f 12 81 88 ff ff 60 58 8d 06 81 88 ff ff ..o.....`X...... 10 40 51 12 81 88 ff ff 10 40 51 12 81 88 ff ff .@Q......@Q..... backtrace: [<000000009e5931da>] kmalloc_trace+0x21/0x110 [<000000002f839ccb>] mmc_alloc_card+0x38/0xb0 [mmc_core] [<0000000004adcbf6>] mmc_sdio_init_card+0xde/0x170 [mmc_core] [<000000007538fea0>] mmc_attach_sdio+0xcb/0x1b0 [mmc_core] [<00000000d4fdeba7>] mmc_rescan+0x54a/0x640 [mmc_core] unreferenced object 0xffff888112511000 (size 2048): comm "kworker/3:2", pid 65, jiffies 4294741623 (age 124.766s) hex dump (first 32 bytes): 00 40 51 12 81 88 ff ff e0 58 8d 06 81 88 ff ff .@Q......X...... 10 10 51 12 81 88 ff ff 10 10 51 12 81 88 ff ff ..Q.......Q..... backtrace: [<000000009e5931da>] kmalloc_trace+0x21/0x110 [<00000000fcbe706c>] sdio_alloc_func+0x35/0x100 [mmc_core] [<00000000c68f4b50>] mmc_attach_sdio.cold.18+0xb1/0x395 [mmc_core] [<00000000d4fdeba7>] mmc_rescan+0x54a/0x640 [mmc_core] Fixes: 3d10a1ba0d37 ("sdio: fix reference counting in sdio_remove_func()") Signed-off-by: Yang Yingliang Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230130125808.3471254-1-yangyingliang@huawei.com Signed-off-by: Ulf Hansson (cherry picked from commit 605d9fb9556f8f5fb4566f4df1480f280f308ded) Signed-off-by: CIQ Kernel Automation --- drivers/mmc/core/sdio_bus.c | 17 ++++++++++++++--- drivers/mmc/core/sdio_cis.c | 12 ------------ 2 files changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/mmc/core/sdio_bus.c b/drivers/mmc/core/sdio_bus.c index a935299a2ee4e..31fa694a5c0ee 100644 --- a/drivers/mmc/core/sdio_bus.c +++ b/drivers/mmc/core/sdio_bus.c @@ -292,6 +292,12 @@ static void sdio_release_func(struct device *dev) sdio_free_func_cis(func); + /* + * We have now removed the link to the tuples in the + * card structure, so remove the reference. + */ + put_device(&func->card->dev); + kfree(func->info); kfree(func->tmpbuf); kfree(func); @@ -322,6 +328,12 @@ struct sdio_func *sdio_alloc_func(struct mmc_card *card) device_initialize(&func->dev); + /* + * We may link to tuples in the card structure, + * we need make sure we have a reference to it. + */ + get_device(&func->card->dev); + func->dev.parent = &card->dev; func->dev.bus = &sdio_bus_type; func->dev.release = sdio_release_func; @@ -375,10 +387,9 @@ int sdio_add_func(struct sdio_func *func) */ void sdio_remove_func(struct sdio_func *func) { - if (!sdio_func_present(func)) - return; + if (sdio_func_present(func)) + device_del(&func->dev); - device_del(&func->dev); of_node_put(func->dev.of_node); put_device(&func->dev); } diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c index b23773583179d..ce524f7e11fb2 100644 --- a/drivers/mmc/core/sdio_cis.c +++ b/drivers/mmc/core/sdio_cis.c @@ -391,12 +391,6 @@ int sdio_read_func_cis(struct sdio_func *func) if (ret) return ret; - /* - * Since we've linked to tuples in the card structure, - * we must make sure we have a reference to it. - */ - get_device(&func->card->dev); - /* * Vendor/device id is optional for function CIS, so * copy it from the card structure as needed. @@ -422,11 +416,5 @@ void sdio_free_func_cis(struct sdio_func *func) } func->tuples = NULL; - - /* - * We have now removed the link to the tuples in the - * card structure, so remove the reference. - */ - put_device(&func->card->dev); } From 61ae24968663d811d00818d0aaa32a3b15fb4375 Mon Sep 17 00:00:00 2001 From: CIQ Kernel Automation Date: Mon, 4 May 2026 07:18:22 +0000 Subject: [PATCH 02/12] rcu-tasks: Avoid pr_info() with spin lock in cblist_init_generic() jira VULN-157616 cve CVE-2023-53558 commit-author Shigeru Yoshida commit 5fc8cbe4cf0fd34ded8045c385790c3bf04f6785 pr_info() is called with rtp->cbs_gbl_lock spin lock locked. Because pr_info() calls printk() that might sleep, this will result in BUG like below: [ 0.206455] cblist_init_generic: Setting adjustable number of callback queues. [ 0.206463] [ 0.206464] ============================= [ 0.206464] [ BUG: Invalid wait context ] [ 0.206465] 5.19.0-00428-g9de1f9c8ca51 #5 Not tainted [ 0.206466] ----------------------------- [ 0.206466] swapper/0/1 is trying to lock: [ 0.206467] ffffffffa0167a58 (&port_lock_key){....}-{3:3}, at: serial8250_console_write+0x327/0x4a0 [ 0.206473] other info that might help us debug this: [ 0.206473] context-{5:5} [ 0.206474] 3 locks held by swapper/0/1: [ 0.206474] #0: ffffffff9eb597e0 (rcu_tasks.cbs_gbl_lock){....}-{2:2}, at: cblist_init_generic.constprop.0+0x14/0x1f0 [ 0.206478] #1: ffffffff9eb579c0 (console_lock){+.+.}-{0:0}, at: _printk+0x63/0x7e [ 0.206482] #2: ffffffff9ea77780 (console_owner){....}-{0:0}, at: console_emit_next_record.constprop.0+0x111/0x330 [ 0.206485] stack backtrace: [ 0.206486] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 5.19.0-00428-g9de1f9c8ca51 #5 [ 0.206488] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.0-1.fc36 04/01/2014 [ 0.206489] Call Trace: [ 0.206490] [ 0.206491] dump_stack_lvl+0x6a/0x9f [ 0.206493] __lock_acquire.cold+0x2d7/0x2fe [ 0.206496] ? stack_trace_save+0x46/0x70 [ 0.206497] lock_acquire+0xd1/0x2f0 [ 0.206499] ? serial8250_console_write+0x327/0x4a0 [ 0.206500] ? __lock_acquire+0x5c7/0x2720 [ 0.206502] _raw_spin_lock_irqsave+0x3d/0x90 [ 0.206504] ? serial8250_console_write+0x327/0x4a0 [ 0.206506] serial8250_console_write+0x327/0x4a0 [ 0.206508] console_emit_next_record.constprop.0+0x180/0x330 [ 0.206511] console_unlock+0xf7/0x1f0 [ 0.206512] vprintk_emit+0xf7/0x330 [ 0.206514] _printk+0x63/0x7e [ 0.206516] cblist_init_generic.constprop.0.cold+0x24/0x32 [ 0.206518] rcu_init_tasks_generic+0x5/0xd9 [ 0.206522] kernel_init_freeable+0x15b/0x2a2 [ 0.206523] ? rest_init+0x160/0x160 [ 0.206526] kernel_init+0x11/0x120 [ 0.206527] ret_from_fork+0x1f/0x30 [ 0.206530] [ 0.207018] cblist_init_generic: Setting shift to 1 and lim to 1. This patch moves pr_info() so that it is called without rtp->cbs_gbl_lock locked. Signed-off-by: Shigeru Yoshida Tested-by: "Zhang, Qiang1" Signed-off-by: Paul E. McKenney (cherry picked from commit 5fc8cbe4cf0fd34ded8045c385790c3bf04f6785) Signed-off-by: CIQ Kernel Automation --- kernel/rcu/tasks.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 852e0d42ee511..9f93fbc13b4b4 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -241,7 +241,6 @@ static void cblist_init_generic(struct rcu_tasks *rtp) if (rcu_task_enqueue_lim < 0) { rcu_task_enqueue_lim = 1; rcu_task_cb_adjust = true; - pr_info("%s: Setting adjustable number of callback queues.\n", __func__); } else if (rcu_task_enqueue_lim == 0) { rcu_task_enqueue_lim = 1; } @@ -272,6 +271,10 @@ static void cblist_init_generic(struct rcu_tasks *rtp) raw_spin_unlock_rcu_node(rtpcp); // irqs remain disabled. } raw_spin_unlock_irqrestore(&rtp->cbs_gbl_lock, flags); + + if (rcu_task_cb_adjust) + pr_info("%s: Setting adjustable number of callback queues.\n", __func__); + pr_info("%s: Setting shift to %d and lim to %d.\n", __func__, data_race(rtp->percpu_enqueue_shift), data_race(rtp->percpu_enqueue_lim)); } From fafb2197f546712538e7f26185e305bcef0c1e40 Mon Sep 17 00:00:00 2001 From: CIQ Kernel Automation Date: Mon, 4 May 2026 07:18:49 +0000 Subject: [PATCH 03/12] ext4: allow ext4_get_group_info() to fail jira VULN-157086 cve CVE-2023-53503 commit-author Theodore Ts'o commit 5354b2af34064a4579be8bc0e2f15a7b70f14b5f upstream-diff | Added fifth argument in 'mb_regenerate_buddy' call to 'ext4_mb_generate_buddy' introduced in commit c9b528c35795 ("ext4: regenerate buddy after block freeing failed if under fc replay") The function missed the extra argument because the changes were added before this change and had to be manually adjusted. Now that this commit introduced the fifth argument in ext4_mb_generate_buddy, that change is kinda reverted. Previously, ext4_get_group_info() would treat an invalid group number as BUG(), since in theory it should never happen. However, if a malicious attaker (or fuzzer) modifies the superblock via the block device while it is the file system is mounted, it is possible for s_first_data_block to get set to a very large number. In that case, when calculating the block group of some block number (such as the starting block of a preallocation region), could result in an underflow and very large block group number. Then the BUG_ON check in ext4_get_group_info() would fire, resutling in a denial of service attack that can be triggered by root or someone with write access to the block device. For a quality of implementation perspective, it's best that even if the system administrator does something that they shouldn't, that it will not trigger a BUG. So instead of BUG'ing, ext4_get_group_info() will call ext4_error and return NULL. We also add fallback code in all of the callers of ext4_get_group_info() that it might NULL. Also, since ext4_get_group_info() was already borderline to be an inline function, un-inline it. The results in a next reduction of the compiled text size of ext4 by roughly 2k. Cc: stable@kernel.org Link: https://lore.kernel.org/r/20230430154311.579720-2-tytso@mit.edu Reported-by: syzbot+e2efa3efc15a1c9e95c3@syzkaller.appspotmail.com Link: https://syzkaller.appspot.com/bug?id=69b28112e098b070f639efb356393af3ffec4220 Signed-off-by: Theodore Ts'o Reviewed-by: Jan Kara (cherry picked from commit 5354b2af34064a4579be8bc0e2f15a7b70f14b5f) Signed-off-by: CIQ Kernel Automation Signed-off-by: Roxana Nicolescu --- fs/ext4/balloc.c | 18 ++++++++++++- fs/ext4/ext4.h | 15 ++--------- fs/ext4/ialloc.c | 12 ++++++--- fs/ext4/mballoc.c | 66 ++++++++++++++++++++++++++++++++++++++--------- fs/ext4/super.c | 2 ++ 5 files changed, 83 insertions(+), 30 deletions(-) diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 1187726ca08ef..09c75a3127c72 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -321,6 +321,22 @@ static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb, return (next_zero_bit < bitmap_size ? next_zero_bit : 0); } +struct ext4_group_info *ext4_get_group_info(struct super_block *sb, + ext4_group_t group) +{ + struct ext4_group_info **grp_info; + long indexv, indexh; + + if (unlikely(group >= EXT4_SB(sb)->s_groups_count)) { + ext4_error(sb, "invalid group %u", group); + return NULL; + } + indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); + indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); + grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv); + return grp_info[indexh]; +} + /* * Return the block number which was discovered to be invalid, or 0 if * the block bitmap is valid. @@ -395,7 +411,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb, if (buffer_verified(bh)) return 0; - if (EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) + if (!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) return -EFSCORRUPTED; ext4_lock_group(sb, block_group); diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 8d3ee9ba688e0..ffd4ce80d2be4 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2741,6 +2741,8 @@ extern void ext4_check_blocks_bitmap(struct super_block *); extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, ext4_group_t block_group, struct buffer_head ** bh); +extern struct ext4_group_info *ext4_get_group_info(struct super_block *sb, + ext4_group_t group); extern int ext4_should_retry_alloc(struct super_block *sb, int *retries); extern struct buffer_head *ext4_read_block_bitmap_nowait(struct super_block *sb, @@ -3348,19 +3350,6 @@ static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size) raw_inode->i_size_high = cpu_to_le32(i_size >> 32); } -static inline -struct ext4_group_info *ext4_get_group_info(struct super_block *sb, - ext4_group_t group) -{ - struct ext4_group_info **grp_info; - long indexv, indexh; - BUG_ON(group >= EXT4_SB(sb)->s_groups_count); - indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); - indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); - grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv); - return grp_info[indexh]; -} - /* * Reading s_groups_count requires using smp_rmb() afterwards. See * the locking protocol documented in the comments of ext4_group_add() diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c index c5e0d700a3060..ccbd48859a6c3 100644 --- a/fs/ext4/ialloc.c +++ b/fs/ext4/ialloc.c @@ -91,7 +91,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb, if (buffer_verified(bh)) return 0; - if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) + if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) return -EFSCORRUPTED; ext4_lock_group(sb, block_group); @@ -293,7 +293,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode) } if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { grp = ext4_get_group_info(sb, block_group); - if (unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) { + if (!grp || unlikely(EXT4_MB_GRP_IBITMAP_CORRUPT(grp))) { fatal = -EFSCORRUPTED; goto error_return; } @@ -1046,7 +1046,7 @@ struct inode *__ext4_new_inode(struct user_namespace *mnt_userns, * Skip groups with already-known suspicious inode * tables */ - if (EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) + if (!grp || EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) goto next_group; } @@ -1183,6 +1183,10 @@ struct inode *__ext4_new_inode(struct user_namespace *mnt_userns, if (!(sbi->s_mount_state & EXT4_FC_REPLAY)) { grp = ext4_get_group_info(sb, group); + if (!grp) { + err = -EFSCORRUPTED; + goto out; + } down_read(&grp->alloc_sem); /* * protect vs itable * lazyinit @@ -1526,7 +1530,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group, } gdp = ext4_get_group_desc(sb, group, &group_desc_bh); - if (!gdp) + if (!gdp || !grp) goto out; /* diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 1bb5a7276e481..09d6bc59e9ee3 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -745,6 +745,8 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, MB_CHECK_ASSERT(e4b->bd_info->bb_fragments == fragments); grp = ext4_get_group_info(sb, e4b->bd_group); + if (!grp) + return NULL; list_for_each(cur, &grp->bb_prealloc_list) { ext4_group_t groupnr; struct ext4_prealloc_space *pa; @@ -1060,9 +1062,9 @@ mb_set_largest_free_order(struct super_block *sb, struct ext4_group_info *grp) static noinline_for_stack void ext4_mb_generate_buddy(struct super_block *sb, - void *buddy, void *bitmap, ext4_group_t group) + void *buddy, void *bitmap, ext4_group_t group, + struct ext4_group_info *grp) { - struct ext4_group_info *grp = ext4_get_group_info(sb, group); struct ext4_sb_info *sbi = EXT4_SB(sb); ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); ext4_grpblk_t i = 0; @@ -1129,7 +1131,7 @@ static void mb_regenerate_buddy(struct ext4_buddy *e4b) (e4b->bd_sb->s_blocksize_bits + 2)); ext4_mb_generate_buddy(e4b->bd_sb, e4b->bd_buddy, - e4b->bd_bitmap, e4b->bd_group); + e4b->bd_bitmap, e4b->bd_group, e4b->bd_info); } /* The buddy information is attached the buddy cache inode @@ -1199,6 +1201,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) break; grinfo = ext4_get_group_info(sb, group); + if (!grinfo) + continue; /* * If page is uptodate then we came here after online resize * which added some new uninitialized group info structs, so @@ -1264,6 +1268,10 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) group, page->index, i * blocksize); trace_ext4_mb_buddy_bitmap_load(sb, group); grinfo = ext4_get_group_info(sb, group); + if (!grinfo) { + err = -EFSCORRUPTED; + goto out; + } grinfo->bb_fragments = 0; memset(grinfo->bb_counters, 0, sizeof(*grinfo->bb_counters) * @@ -1274,7 +1282,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) ext4_lock_group(sb, group); /* init the buddy */ memset(data, 0xff, blocksize); - ext4_mb_generate_buddy(sb, data, incore, group); + ext4_mb_generate_buddy(sb, data, incore, group, grinfo); ext4_unlock_group(sb, group); incore = NULL; } else { @@ -1388,6 +1396,9 @@ int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) might_sleep(); mb_debug(sb, "init group %u\n", group); this_grp = ext4_get_group_info(sb, group); + if (!this_grp) + return -EFSCORRUPTED; + /* * This ensures that we don't reinit the buddy cache * page which map to the group from which we are already @@ -1462,6 +1473,8 @@ ext4_mb_load_buddy_gfp(struct super_block *sb, ext4_group_t group, blocks_per_page = PAGE_SIZE / sb->s_blocksize; grp = ext4_get_group_info(sb, group); + if (!grp) + return -EFSCORRUPTED; e4b->bd_blkbits = sb->s_blocksize_bits; e4b->bd_info = grp; @@ -2197,6 +2210,8 @@ int ext4_mb_find_by_goal(struct ext4_allocation_context *ac, struct ext4_group_info *grp = ext4_get_group_info(ac->ac_sb, group); struct ext4_free_extent ex; + if (!grp) + return -EFSCORRUPTED; if (!(ac->ac_flags & (EXT4_MB_HINT_TRY_GOAL | EXT4_MB_HINT_GOAL_ONLY))) return 0; if (grp->bb_free == 0) @@ -2424,7 +2439,7 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac, BUG_ON(cr < 0 || cr >= 4); - if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) + if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp)) return false; free = grp->bb_free; @@ -2493,6 +2508,8 @@ static int ext4_mb_good_group_nolock(struct ext4_allocation_context *ac, ext4_grpblk_t free; int ret = 0; + if (!grp) + return -EFSCORRUPTED; if (sbi->s_mb_stats) atomic64_inc(&sbi->s_bal_cX_groups_considered[ac->ac_criteria]); if (should_lock) { @@ -2573,7 +2590,7 @@ ext4_group_t ext4_mb_prefetch(struct super_block *sb, ext4_group_t group, * prefetch once, so we avoid getblk() call, which can * be expensive. */ - if (!EXT4_MB_GRP_TEST_AND_SET_READ(grp) && + if (gdp && grp && !EXT4_MB_GRP_TEST_AND_SET_READ(grp) && EXT4_MB_GRP_NEED_INIT(grp) && ext4_free_group_clusters(sb, gdp) > 0 && !(ext4_has_group_desc_csum(sb) && @@ -2617,7 +2634,7 @@ void ext4_mb_prefetch_fini(struct super_block *sb, ext4_group_t group, gdp = ext4_get_group_desc(sb, group, NULL); grp = ext4_get_group_info(sb, group); - if (EXT4_MB_GRP_NEED_INIT(grp) && + if (grp && gdp && EXT4_MB_GRP_NEED_INIT(grp) && ext4_free_group_clusters(sb, gdp) > 0 && !(ext4_has_group_desc_csum(sb) && (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)))) { @@ -2877,6 +2894,8 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) sizeof(struct ext4_group_info); grinfo = ext4_get_group_info(sb, group); + if (!grinfo) + return 0; /* Load the group info in memory only if not already loaded. */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grinfo))) { err = ext4_mb_load_buddy(sb, group, &e4b); @@ -2887,7 +2906,7 @@ static int ext4_mb_seq_groups_show(struct seq_file *seq, void *v) buddy_loaded = 1; } - memcpy(&sg, ext4_get_group_info(sb, group), i); + memcpy(&sg, grinfo, i); if (buddy_loaded) ext4_mb_unload_buddy(&e4b); @@ -3248,8 +3267,12 @@ static int ext4_mb_init_backend(struct super_block *sb) err_freebuddy: cachep = get_groupinfo_cache(sb->s_blocksize_bits); - while (i-- > 0) - kmem_cache_free(cachep, ext4_get_group_info(sb, i)); + while (i-- > 0) { + struct ext4_group_info *grp = ext4_get_group_info(sb, i); + + if (grp) + kmem_cache_free(cachep, grp); + } i = sbi->s_group_info_size; rcu_read_lock(); group_info = rcu_dereference(sbi->s_group_info); @@ -3562,6 +3585,8 @@ int ext4_mb_release(struct super_block *sb) for (i = 0; i < ngroups; i++) { cond_resched(); grinfo = ext4_get_group_info(sb, i); + if (!grinfo) + continue; mb_group_bb_bitmap_free(grinfo); ext4_lock_group(sb, i); count = ext4_mb_cleanup_pa(grinfo); @@ -4781,6 +4806,8 @@ static void ext4_mb_generate_from_freelist(struct super_block *sb, void *bitmap, struct ext4_free_data *entry; grp = ext4_get_group_info(sb, group); + if (!grp) + return; n = rb_first(&(grp->bb_free_root)); while (n) { @@ -4808,6 +4835,9 @@ void ext4_mb_generate_from_pa(struct super_block *sb, void *bitmap, int preallocated = 0; int len; + if (!grp) + return; + /* all form of preallocation discards first load group, * so the only competing code is preallocation use. * we don't need any locking here @@ -5040,6 +5070,8 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac) ei = EXT4_I(ac->ac_inode); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); + if (!grp) + return; pa->pa_node_lock.inode_lock = &ei->i_prealloc_lock; pa->pa_inode = ac->ac_inode; @@ -5089,6 +5121,8 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac) atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated); grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group); + if (!grp) + return; lg = ac->ac_lg; BUG_ON(lg == NULL); @@ -5214,6 +5248,8 @@ ext4_mb_discard_group_preallocations(struct super_block *sb, int err; int free = 0; + if (!grp) + return 0; mb_debug(sb, "discard preallocation for group %u\n", group); if (list_empty(&grp->bb_prealloc_list)) goto out_dbg; @@ -5468,6 +5504,9 @@ static inline void ext4_mb_show_pa(struct super_block *sb) struct ext4_prealloc_space *pa; ext4_grpblk_t start; struct list_head *cur; + + if (!grp) + continue; ext4_lock_group(sb, i); list_for_each(cur, &grp->bb_prealloc_list) { pa = list_entry(cur, struct ext4_prealloc_space, @@ -6241,6 +6280,7 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, struct buffer_head *bitmap_bh = NULL; struct super_block *sb = inode->i_sb; struct ext4_group_desc *gdp; + struct ext4_group_info *grp; unsigned int overflow; ext4_grpblk_t bit; struct buffer_head *gd_bh; @@ -6266,8 +6306,8 @@ static void ext4_mb_clear_bb(handle_t *handle, struct inode *inode, overflow = 0; ext4_get_group_no_and_offset(sb, block, &block_group, &bit); - if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT( - ext4_get_group_info(sb, block_group)))) + grp = ext4_get_group_info(sb, block_group); + if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) return; /* @@ -6869,6 +6909,8 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) for (group = first_group; group <= last_group; group++) { grp = ext4_get_group_info(sb, group); + if (!grp) + continue; /* We only do this if the grp has never been initialized */ if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { ret = ext4_mb_init_group(sb, group, GFP_NOFS); diff --git a/fs/ext4/super.c b/fs/ext4/super.c index e9cebff818cfa..57ad9dc0871fc 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1044,6 +1044,8 @@ void ext4_mark_group_bitmap_corrupted(struct super_block *sb, struct ext4_group_desc *gdp = ext4_get_group_desc(sb, group, NULL); int ret; + if (!grp || !gdp) + return; if (flags & EXT4_GROUP_INFO_BBITMAP_CORRUPT) { ret = ext4_test_and_set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); From ec0ba8b19e53bcd61cb658fc5bcb0d5543f189fc Mon Sep 17 00:00:00 2001 From: CIQ Kernel Automation Date: Mon, 4 May 2026 07:19:58 +0000 Subject: [PATCH 04/12] ext4: correct grp validation in ext4_mb_good_group jira VULN-169012 cve CVE-2023-53861 commit-author Kemeng Shi commit a9ce5993a0f5c0887c8a1b4ffa3b8046fbcfdc93 Group corruption check will access memory of grp and will trigger kernel crash if grp is NULL. So do NULL check before corruption check. Fixes: 5354b2af3406 ("ext4: allow ext4_get_group_info() to fail") Signed-off-by: Kemeng Shi Reviewed-by: Ritesh Harjani (IBM) Link: https://lore.kernel.org/r/20230801143204.2284343-2-shikemeng@huaweicloud.com Signed-off-by: Theodore Ts'o (cherry picked from commit a9ce5993a0f5c0887c8a1b4ffa3b8046fbcfdc93) Signed-off-by: CIQ Kernel Automation --- fs/ext4/mballoc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 09d6bc59e9ee3..c8c1ebe72afa1 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2439,7 +2439,7 @@ static bool ext4_mb_good_group(struct ext4_allocation_context *ac, BUG_ON(cr < 0 || cr >= 4); - if (unlikely(EXT4_MB_GRP_BBITMAP_CORRUPT(grp) || !grp)) + if (unlikely(!grp || EXT4_MB_GRP_BBITMAP_CORRUPT(grp))) return false; free = grp->bb_free; From 49a997317ce9e06660331a9abd7d2ea18f166630 Mon Sep 17 00:00:00 2001 From: CIQ Kernel Automation Date: Mon, 4 May 2026 07:20:34 +0000 Subject: [PATCH 05/12] ext4: drop the call to ext4_error() from ext4_get_group_info() jira VULN-157086 cve-bf CVE-2023-53503 commit-author Fabio M. De Francesco commit f451fd97dd2b78f286379203a47d9d295c467255 A recent patch added a call to ext4_error() which is problematic since some callers of the ext4_get_group_info() function may be holding a spinlock, whereas ext4_error() must never be called in atomic context. This triggered a report from Syzbot: "BUG: sleeping function called from invalid context in ext4_update_super" (see the link below). Therefore, drop the call to ext4_error() from ext4_get_group_info(). In the meantime use eight characters tabs instead of nine characters ones. Reported-by: syzbot+4acc7d910e617b360859@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/00000000000070575805fdc6cdb2@google.com/ Fixes: 5354b2af3406 ("ext4: allow ext4_get_group_info() to fail") Suggested-by: Theodore Ts'o Signed-off-by: Fabio M. De Francesco Link: https://lore.kernel.org/r/20230614100446.14337-1-fmdefrancesco@gmail.com (cherry picked from commit f451fd97dd2b78f286379203a47d9d295c467255) Signed-off-by: CIQ Kernel Automation --- fs/ext4/balloc.c | 20 +++++++++----------- 1 file changed, 9 insertions(+), 11 deletions(-) diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 09c75a3127c72..1f72f977c6dba 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c @@ -324,17 +324,15 @@ static ext4_fsblk_t ext4_valid_block_bitmap_padding(struct super_block *sb, struct ext4_group_info *ext4_get_group_info(struct super_block *sb, ext4_group_t group) { - struct ext4_group_info **grp_info; - long indexv, indexh; - - if (unlikely(group >= EXT4_SB(sb)->s_groups_count)) { - ext4_error(sb, "invalid group %u", group); - return NULL; - } - indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); - indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); - grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv); - return grp_info[indexh]; + struct ext4_group_info **grp_info; + long indexv, indexh; + + if (unlikely(group >= EXT4_SB(sb)->s_groups_count)) + return NULL; + indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb)); + indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1); + grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv); + return grp_info[indexh]; } /* From c5fa00b6aed22404e3c1c11a9e9fa502b75e68fc Mon Sep 17 00:00:00 2001 From: Roxana Nicolescu Date: Wed, 6 May 2026 10:39:45 +0200 Subject: [PATCH 06/12] ext4: remove unused return value of __mb_check_buddy jira VULN-157086 cve-bf CVE-2023-53503 commit-author Kemeng Shi commit 133de5a0d8f8e32b34feaa8beae7a189482f1856 Remove unused return value of __mb_check_buddy. Signed-off-by: Kemeng Shi Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20240105092102.496631-2-shikemeng@huaweicloud.com Signed-off-by: Theodore Ts'o (cherry picked from commit 133de5a0d8f8e32b34feaa8beae7a189482f1856) Signed-off-by: Roxana Nicolescu --- fs/ext4/mballoc.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index c8c1ebe72afa1..f3c3ba826f75e 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -665,7 +665,7 @@ do { \ } \ } while (0) -static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, +static void __mb_check_buddy(struct ext4_buddy *e4b, char *file, const char *function, int line) { struct super_block *sb = e4b->bd_sb; @@ -684,7 +684,7 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, void *buddy2; if (e4b->bd_info->bb_check_counter++ % 10) - return 0; + return; while (order > 1) { buddy = mb_find_buddy(e4b, order, &max); @@ -746,7 +746,7 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, grp = ext4_get_group_info(sb, e4b->bd_group); if (!grp) - return NULL; + return; list_for_each(cur, &grp->bb_prealloc_list) { ext4_group_t groupnr; struct ext4_prealloc_space *pa; @@ -756,7 +756,6 @@ static int __mb_check_buddy(struct ext4_buddy *e4b, char *file, for (i = 0; i < pa->pa_len; i++) MB_CHECK_ASSERT(mb_test_bit(k + i, buddy)); } - return 0; } #undef MB_CHECK_ASSERT #define mb_check_buddy(e4b) __mb_check_buddy(e4b, \ From 969bfb06c61757001874237ab00c3e50bce4373b Mon Sep 17 00:00:00 2001 From: CIQ Kernel Automation Date: Mon, 4 May 2026 07:21:19 +0000 Subject: [PATCH 07/12] net: nsh: Use correct mac_offset to unwind gso skb in nsh_gso_segment() jira VULN-170949 cve CVE-2023-54114 commit-author Dong Chenchen commit c83b49383b595be50647f0c764a48c78b5f3c4f8 As the call trace shows, skb_panic was caused by wrong skb->mac_header in nsh_gso_segment(): invalid opcode: 0000 [#1] PREEMPT SMP KASAN PTI CPU: 3 PID: 2737 Comm: syz Not tainted 6.3.0-next-20230505 #1 RIP: 0010:skb_panic+0xda/0xe0 call Trace: skb_push+0x91/0xa0 nsh_gso_segment+0x4f3/0x570 skb_mac_gso_segment+0x19e/0x270 __skb_gso_segment+0x1e8/0x3c0 validate_xmit_skb+0x452/0x890 validate_xmit_skb_list+0x99/0xd0 sch_direct_xmit+0x294/0x7c0 __dev_queue_xmit+0x16f0/0x1d70 packet_xmit+0x185/0x210 packet_snd+0xc15/0x1170 packet_sendmsg+0x7b/0xa0 sock_sendmsg+0x14f/0x160 The root cause is: nsh_gso_segment() use skb->network_header - nhoff to reset mac_header in skb_gso_error_unwind() if inner-layer protocol gso fails. However, skb->network_header may be reset by inner-layer protocol gso function e.g. mpls_gso_segment. skb->mac_header reset by the inaccurate network_header will be larger than skb headroom. nsh_gso_segment nhoff = skb->network_header - skb->mac_header; __skb_pull(skb,nsh_len) skb_mac_gso_segment mpls_gso_segment skb_reset_network_header(skb);//skb->network_header+=nsh_len return -EINVAL; skb_gso_error_unwind skb_push(skb, nsh_len); skb->mac_header = skb->network_header - nhoff; // skb->mac_header > skb->headroom, cause skb_push panic Use correct mac_offset to restore mac_header and get rid of nhoff. Fixes: c411ed854584 ("nsh: add GSO support") Reported-by: syzbot+632b5d9964208bfef8c0@syzkaller.appspotmail.com Suggested-by: Eric Dumazet Signed-off-by: Dong Chenchen Signed-off-by: David S. Miller (cherry picked from commit c83b49383b595be50647f0c764a48c78b5f3c4f8) Signed-off-by: CIQ Kernel Automation --- net/nsh/nsh.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c index bbfb27ae196b6..f4a38bd6a7e04 100644 --- a/net/nsh/nsh.c +++ b/net/nsh/nsh.c @@ -78,13 +78,12 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb, netdev_features_t features) { struct sk_buff *segs = ERR_PTR(-EINVAL); + u16 mac_offset = skb->mac_header; unsigned int nsh_len, mac_len; __be16 proto; - int nhoff; skb_reset_network_header(skb); - nhoff = skb->network_header - skb->mac_header; mac_len = skb->mac_len; if (unlikely(!pskb_may_pull(skb, NSH_BASE_HDR_LEN))) @@ -109,15 +108,14 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb, segs = skb_mac_gso_segment(skb, features); if (IS_ERR_OR_NULL(segs)) { skb_gso_error_unwind(skb, htons(ETH_P_NSH), nsh_len, - skb->network_header - nhoff, - mac_len); + mac_offset, mac_len); goto out; } for (skb = segs; skb; skb = skb->next) { skb->protocol = htons(ETH_P_NSH); __skb_push(skb, nsh_len); - skb_set_mac_header(skb, -nhoff); + skb->mac_header = mac_offset; skb->network_header = skb->mac_header + mac_len; skb->mac_len = mac_len; } From b5dc3dfc02534bb2f75978a1b96fe9cf0a62629f Mon Sep 17 00:00:00 2001 From: CIQ Kernel Automation Date: Mon, 4 May 2026 07:22:19 +0000 Subject: [PATCH 08/12] i2c: core: Run atomic i2c xfer when !preemptible jira VULN-35813 cve CVE-2023-52791 commit-author Benjamin Bara commit aa49c90894d06e18a1ee7c095edbd2f37c232d02 Since bae1d3a05a8b, i2c transfers are non-atomic if preemption is disabled. However, non-atomic i2c transfers require preemption (e.g. in wait_for_completion() while waiting for the DMA). panic() calls preempt_disable_notrace() before calling emergency_restart(). Therefore, if an i2c device is used for the restart, the xfer should be atomic. This avoids warnings like: [ 12.667612] WARNING: CPU: 1 PID: 1 at kernel/rcu/tree_plugin.h:318 rcu_note_context_switch+0x33c/0x6b0 [ 12.676926] Voluntary context switch within RCU read-side critical section! ... [ 12.742376] schedule_timeout from wait_for_completion_timeout+0x90/0x114 [ 12.749179] wait_for_completion_timeout from tegra_i2c_wait_completion+0x40/0x70 ... [ 12.994527] atomic_notifier_call_chain from machine_restart+0x34/0x58 [ 13.001050] machine_restart from panic+0x2a8/0x32c Use !preemptible() instead, which is basically the same check as pre-v5.2. Fixes: bae1d3a05a8b ("i2c: core: remove use of in_atomic()") Cc: stable@vger.kernel.org # v5.2+ Suggested-by: Dmitry Osipenko Acked-by: Wolfram Sang Reviewed-by: Dmitry Osipenko Tested-by: Nishanth Menon Signed-off-by: Benjamin Bara Link: https://lore.kernel.org/r/20230327-tegra-pmic-reboot-v7-2-18699d5dcd76@skidata.com Signed-off-by: Lee Jones (cherry picked from commit aa49c90894d06e18a1ee7c095edbd2f37c232d02) Signed-off-by: CIQ Kernel Automation --- drivers/i2c/i2c-core.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h index 1247e6e6e9751..05b8b8dfa9bdd 100644 --- a/drivers/i2c/i2c-core.h +++ b/drivers/i2c/i2c-core.h @@ -29,7 +29,7 @@ int i2c_dev_irq_from_resources(const struct resource *resources, */ static inline bool i2c_in_atomic_xfer_mode(void) { - return system_state > SYSTEM_RUNNING && irqs_disabled(); + return system_state > SYSTEM_RUNNING && !preemptible(); } static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap) From c504f216f587a89963fa06ca16d4b84d264ec0fd Mon Sep 17 00:00:00 2001 From: CIQ Kernel Automation Date: Mon, 4 May 2026 07:23:12 +0000 Subject: [PATCH 09/12] i2c: core: Fix atomic xfer check for non-preempt config jira VULN-35813 cve-bf CVE-2023-52791 commit-author Benjamin Bara commit a3368e1186e3ce8e38f78cbca019622095b1f331 Since commit aa49c90894d0 ("i2c: core: Run atomic i2c xfer when !preemptible"), the whole reboot/power off sequence on non-preempt kernels is using atomic i2c xfer, as !preemptible() always results to 1. During device_shutdown(), the i2c might be used a lot and not all busses have implemented an atomic xfer handler. This results in a lot of avoidable noise, like: [ 12.687169] No atomic I2C transfer handler for 'i2c-0' [ 12.692313] WARNING: CPU: 6 PID: 275 at drivers/i2c/i2c-core.h:40 i2c_smbus_xfer+0x100/0x118 ... Fix this by allowing non-atomic xfer when the interrupts are enabled, as it was before. Link: https://lore.kernel.org/r/20231222230106.73f030a5@yea Link: https://lore.kernel.org/r/20240102150350.3180741-1-mwalle@kernel.org Link: https://lore.kernel.org/linux-i2c/13271b9b-4132-46ef-abf8-2c311967bb46@mailbox.org/ Fixes: aa49c90894d0 ("i2c: core: Run atomic i2c xfer when !preemptible") Cc: stable@vger.kernel.org # v5.2+ Signed-off-by: Benjamin Bara Tested-by: Michael Walle Tested-by: Tor Vic [wsa: removed a comment which needs more work, code is ok] Signed-off-by: Wolfram Sang (cherry picked from commit a3368e1186e3ce8e38f78cbca019622095b1f331) Signed-off-by: CIQ Kernel Automation --- drivers/i2c/i2c-core.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h index 05b8b8dfa9bdd..36587f38dff3d 100644 --- a/drivers/i2c/i2c-core.h +++ b/drivers/i2c/i2c-core.h @@ -3,6 +3,7 @@ * i2c-core.h - interfaces internal to the I2C framework */ +#include #include struct i2c_devinfo { @@ -29,7 +30,8 @@ int i2c_dev_irq_from_resources(const struct resource *resources, */ static inline bool i2c_in_atomic_xfer_mode(void) { - return system_state > SYSTEM_RUNNING && !preemptible(); + return system_state > SYSTEM_RUNNING && + (IS_ENABLED(CONFIG_PREEMPT_COUNT) ? !preemptible() : irqs_disabled()); } static inline int __i2c_lock_bus_helper(struct i2c_adapter *adap) From ae91e3274b6cf996940f0630521ed1c4c0cd9675 Mon Sep 17 00:00:00 2001 From: CIQ Kernel Automation Date: Mon, 4 May 2026 07:24:10 +0000 Subject: [PATCH 10/12] RDMA/siw: Fix connection failure handling jira VULN-35016 cve CVE-2023-52513 commit-author Bernard Metzler commit 53a3f777049771496f791504e7dc8ef017cba590 In case immediate MPA request processing fails, the newly created endpoint unlinks the listening endpoint and is ready to be dropped. This special case was not handled correctly by the code handling the later TCP socket close, causing a NULL dereference crash in siw_cm_work_handler() when dereferencing a NULL listener. We now also cancel the useless MPA timeout, if immediate MPA request processing fails. This patch furthermore simplifies MPA processing in general: Scheduling a useless TCP socket read in sk_data_ready() upcall is now surpressed, if the socket is already moved out of TCP_ESTABLISHED state. Fixes: 6c52fdc244b5 ("rdma/siw: connection management") Signed-off-by: Bernard Metzler Link: https://lore.kernel.org/r/20230905145822.446263-1-bmt@zurich.ibm.com Signed-off-by: Leon Romanovsky (cherry picked from commit 53a3f777049771496f791504e7dc8ef017cba590) Signed-off-by: CIQ Kernel Automation --- drivers/infiniband/sw/siw/siw_cm.c | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c index da530c0404da4..cb9e083a1073d 100644 --- a/drivers/infiniband/sw/siw/siw_cm.c +++ b/drivers/infiniband/sw/siw/siw_cm.c @@ -976,6 +976,7 @@ static void siw_accept_newconn(struct siw_cep *cep) siw_cep_put(cep); new_cep->listen_cep = NULL; if (rv) { + siw_cancel_mpatimer(new_cep); siw_cep_set_free(new_cep); goto error; } @@ -1100,9 +1101,12 @@ static void siw_cm_work_handler(struct work_struct *w) /* * Socket close before MPA request received. */ - siw_dbg_cep(cep, "no mpareq: drop listener\n"); - siw_cep_put(cep->listen_cep); - cep->listen_cep = NULL; + if (cep->listen_cep) { + siw_dbg_cep(cep, + "no mpareq: drop listener\n"); + siw_cep_put(cep->listen_cep); + cep->listen_cep = NULL; + } } } release_cep = 1; @@ -1227,7 +1231,11 @@ static void siw_cm_llp_data_ready(struct sock *sk) if (!cep) goto out; - siw_dbg_cep(cep, "state: %d\n", cep->state); + siw_dbg_cep(cep, "cep state: %d, socket state %d\n", + cep->state, sk->sk_state); + + if (sk->sk_state != TCP_ESTABLISHED) + goto out; switch (cep->state) { case SIW_EPSTATE_RDMA_MODE: From 5aed007564d2044f65992200cd768c8ee4c9f710 Mon Sep 17 00:00:00 2001 From: CIQ Kernel Automation Date: Mon, 4 May 2026 07:24:37 +0000 Subject: [PATCH 11/12] perf/core: Bail out early if the request AUX area is out of bound jira VULN-35978 cve CVE-2023-52835 commit-author Shuai Xue commit 54aee5f15b83437f23b2b2469bcf21bdd9823916 When perf-record with a large AUX area, e.g 4GB, it fails with: #perf record -C 0 -m ,4G -e arm_spe_0// -- sleep 1 failed to mmap with 12 (Cannot allocate memory) and it reveals a WARNING with __alloc_pages(): ------------[ cut here ]------------ WARNING: CPU: 44 PID: 17573 at mm/page_alloc.c:5568 __alloc_pages+0x1ec/0x248 Call trace: __alloc_pages+0x1ec/0x248 __kmalloc_large_node+0xc0/0x1f8 __kmalloc_node+0x134/0x1e8 rb_alloc_aux+0xe0/0x298 perf_mmap+0x440/0x660 mmap_region+0x308/0x8a8 do_mmap+0x3c0/0x528 vm_mmap_pgoff+0xf4/0x1b8 ksys_mmap_pgoff+0x18c/0x218 __arm64_sys_mmap+0x38/0x58 invoke_syscall+0x50/0x128 el0_svc_common.constprop.0+0x58/0x188 do_el0_svc+0x34/0x50 el0_svc+0x34/0x108 el0t_64_sync_handler+0xb8/0xc0 el0t_64_sync+0x1a4/0x1a8 'rb->aux_pages' allocated by kcalloc() is a pointer array which is used to maintains AUX trace pages. The allocated page for this array is physically contiguous (and virtually contiguous) with an order of 0..MAX_ORDER. If the size of pointer array crosses the limitation set by MAX_ORDER, it reveals a WARNING. So bail out early with -ENOMEM if the request AUX area is out of bound, e.g.: #perf record -C 0 -m ,4G -e arm_spe_0// -- sleep 1 failed to mmap with 12 (Cannot allocate memory) Signed-off-by: Shuai Xue Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Ingo Molnar (cherry picked from commit 54aee5f15b83437f23b2b2469bcf21bdd9823916) Signed-off-by: CIQ Kernel Automation --- kernel/events/ring_buffer.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c index a0433f37b0243..4a260ceed9c73 100644 --- a/kernel/events/ring_buffer.c +++ b/kernel/events/ring_buffer.c @@ -699,6 +699,12 @@ int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event, watermark = 0; } + /* + * kcalloc_node() is unable to allocate buffer if the size is larger + * than: PAGE_SIZE << MAX_ORDER; directly bail out in this case. + */ + if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_ORDER) + return -ENOMEM; rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL, node); if (!rb->aux_pages) From d8add8ddf1623800bb26ba6c1cdbca88781fb484 Mon Sep 17 00:00:00 2001 From: CIQ Kernel Automation Date: Mon, 4 May 2026 07:25:04 +0000 Subject: [PATCH 12/12] atl1c: Work around the DMA RX overflow issue jira VULN-36010 cve CVE-2023-52834 commit-author Sieng-Piaw Liew commit 86565682e9053e5deb128193ea9e88531bbae9cf This is based on alx driver commit 881d0327db37 ("net: alx: Work around the DMA RX overflow issue"). The alx and atl1c drivers had RX overflow error which was why a custom allocator was created to avoid certain addresses. The simpler workaround then created for alx driver, but not for atl1c due to lack of tester. Instead of using a custom allocator, check the allocated skb address and use skb_reserve() to move away from problematic 0x...fc0 address. Tested on AR8131 on Acer 4540. Signed-off-by: Sieng-Piaw Liew Link: https://lore.kernel.org/r/20230912010711.12036-1-liew.s.piaw@gmail.com Signed-off-by: Paolo Abeni (cherry picked from commit 86565682e9053e5deb128193ea9e88531bbae9cf) Signed-off-by: CIQ Kernel Automation --- drivers/net/ethernet/atheros/atl1c/atl1c.h | 3 - .../net/ethernet/atheros/atl1c/atl1c_main.c | 67 +++++-------------- 2 files changed, 16 insertions(+), 54 deletions(-) diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c.h b/drivers/net/ethernet/atheros/atl1c/atl1c.h index 43d821fe7a542..63ba64dbb7310 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c.h +++ b/drivers/net/ethernet/atheros/atl1c/atl1c.h @@ -504,15 +504,12 @@ struct atl1c_rrd_ring { u16 next_to_use; u16 next_to_clean; struct napi_struct napi; - struct page *rx_page; - unsigned int rx_page_offset; }; /* board specific private data structure */ struct atl1c_adapter { struct net_device *netdev; struct pci_dev *pdev; - unsigned int rx_frag_size; struct atl1c_hw hw; struct atl1c_hw_stats hw_stats; struct mii_if_info mii; /* MII interface info */ diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c index 4331c8f7d50d3..6577934baca33 100644 --- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c @@ -493,15 +493,10 @@ static int atl1c_set_mac_addr(struct net_device *netdev, void *p) static void atl1c_set_rxbufsize(struct atl1c_adapter *adapter, struct net_device *dev) { - unsigned int head_size; int mtu = dev->mtu; adapter->rx_buffer_len = mtu > AT_RX_BUF_SIZE ? roundup(mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN, 8) : AT_RX_BUF_SIZE; - - head_size = SKB_DATA_ALIGN(adapter->rx_buffer_len + NET_SKB_PAD + NET_IP_ALIGN) + - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); - adapter->rx_frag_size = roundup_pow_of_two(head_size); } static netdev_features_t atl1c_fix_features(struct net_device *netdev, @@ -974,7 +969,6 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter) static void atl1c_free_ring_resources(struct atl1c_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; - int i; dma_free_coherent(&pdev->dev, adapter->ring_header.size, adapter->ring_header.desc, adapter->ring_header.dma); @@ -987,12 +981,6 @@ static void atl1c_free_ring_resources(struct atl1c_adapter *adapter) kfree(adapter->tpd_ring[0].buffer_info); adapter->tpd_ring[0].buffer_info = NULL; } - for (i = 0; i < adapter->rx_queue_count; ++i) { - if (adapter->rrd_ring[i].rx_page) { - put_page(adapter->rrd_ring[i].rx_page); - adapter->rrd_ring[i].rx_page = NULL; - } - } } /** @@ -1764,48 +1752,11 @@ static inline void atl1c_rx_checksum(struct atl1c_adapter *adapter, skb_checksum_none_assert(skb); } -static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter, - u32 queue, bool napi_mode) -{ - struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue]; - struct sk_buff *skb; - struct page *page; - - if (adapter->rx_frag_size > PAGE_SIZE) { - if (likely(napi_mode)) - return napi_alloc_skb(&rrd_ring->napi, - adapter->rx_buffer_len); - else - return netdev_alloc_skb_ip_align(adapter->netdev, - adapter->rx_buffer_len); - } - - page = rrd_ring->rx_page; - if (!page) { - page = alloc_page(GFP_ATOMIC); - if (unlikely(!page)) - return NULL; - rrd_ring->rx_page = page; - rrd_ring->rx_page_offset = 0; - } - - skb = build_skb(page_address(page) + rrd_ring->rx_page_offset, - adapter->rx_frag_size); - if (likely(skb)) { - skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); - rrd_ring->rx_page_offset += adapter->rx_frag_size; - if (rrd_ring->rx_page_offset >= PAGE_SIZE) - rrd_ring->rx_page = NULL; - else - get_page(page); - } - return skb; -} - static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue, bool napi_mode) { struct atl1c_rfd_ring *rfd_ring = &adapter->rfd_ring[queue]; + struct atl1c_rrd_ring *rrd_ring = &adapter->rrd_ring[queue]; struct pci_dev *pdev = adapter->pdev; struct atl1c_buffer *buffer_info, *next_info; struct sk_buff *skb; @@ -1824,13 +1775,27 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, u32 queue, while (next_info->flags & ATL1C_BUFFER_FREE) { rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); - skb = atl1c_alloc_skb(adapter, queue, napi_mode); + /* When DMA RX address is set to something like + * 0x....fc0, it will be very likely to cause DMA + * RFD overflow issue. + * + * To work around it, we apply rx skb with 64 bytes + * longer space, and offset the address whenever + * 0x....fc0 is detected. + */ + if (likely(napi_mode)) + skb = napi_alloc_skb(&rrd_ring->napi, adapter->rx_buffer_len + 64); + else + skb = netdev_alloc_skb(adapter->netdev, adapter->rx_buffer_len + 64); if (unlikely(!skb)) { if (netif_msg_rx_err(adapter)) dev_warn(&pdev->dev, "alloc rx buffer failed\n"); break; } + if (((unsigned long)skb->data & 0xfff) == 0xfc0) + skb_reserve(skb, 64); + /* * Make buffer alignment 2 beyond a 16 byte boundary * this will result in a 16 byte aligned IP header after