From fee1e6a735573f44fe014d9ad43c6a34a613e73f Mon Sep 17 00:00:00 2001 From: Zhihao Cheng Date: Tue, 6 Jun 2023 21:59:26 +0800 Subject: [PATCH 001/513] jbd2: Fix wrongly judgement for buffer head removing while doing checkpoint [ Upstream commit e34c8dd238d0c9368b746480f313055f5bab5040 ] Following process, jbd2_journal_commit_transaction // there are several dirty buffer heads in transaction->t_checkpoint_list P1 wb_workfn jbd2_log_do_checkpoint if (buffer_locked(bh)) // false __block_write_full_page trylock_buffer(bh) test_clear_buffer_dirty(bh) if (!buffer_dirty(bh)) __jbd2_journal_remove_checkpoint(jh) if (buffer_write_io_error(bh)) // false >> bh IO error occurs << jbd2_cleanup_journal_tail __jbd2_update_log_tail jbd2_write_superblock // The bh won't be replayed in next mount. , which could corrupt the ext4 image, fetch a reproducer in [Link]. Since writeback process clears buffer dirty after locking buffer head, we can fix it by try locking buffer and check dirtiness while buffer is locked, the buffer head can be removed if it is neither dirty nor locked. Link: https://bugzilla.kernel.org/show_bug.cgi?id=217490 Fixes: 470decc613ab ("[PATCH] jbd2: initial copy of files from jbd") Signed-off-by: Zhihao Cheng Signed-off-by: Zhang Yi Reviewed-by: Jan Kara Link: https://lore.kernel.org/r/20230606135928.434610-5-yi.zhang@huaweicloud.com Signed-off-by: Theodore Ts'o Signed-off-by: Sasha Levin --- fs/jbd2/checkpoint.c | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c index fe8bb031b7d7..d2aba55833f9 100644 --- a/fs/jbd2/checkpoint.c +++ b/fs/jbd2/checkpoint.c @@ -221,20 +221,6 @@ int jbd2_log_do_checkpoint(journal_t *journal) jh = transaction->t_checkpoint_list; bh = jh2bh(jh); - /* - * The buffer may be writing back, or flushing out in the - * last couple of cycles, or re-adding into a new transaction, - * need to check it again until it's unlocked. - */ - if (buffer_locked(bh)) { - get_bh(bh); - spin_unlock(&journal->j_list_lock); - wait_on_buffer(bh); - /* the journal_head may have gone by now */ - BUFFER_TRACE(bh, "brelse"); - __brelse(bh); - goto retry; - } if (jh->b_transaction != NULL) { transaction_t *t = jh->b_transaction; tid_t tid = t->t_tid; @@ -269,7 +255,22 @@ int jbd2_log_do_checkpoint(journal_t *journal) spin_lock(&journal->j_list_lock); goto restart; } - if (!buffer_dirty(bh)) { + if (!trylock_buffer(bh)) { + /* + * The buffer is locked, it may be writing back, or + * flushing out in the last couple of cycles, or + * re-adding into a new transaction, need to check + * it again until it's unlocked. + */ + get_bh(bh); + spin_unlock(&journal->j_list_lock); + wait_on_buffer(bh); + /* the journal_head may have gone by now */ + BUFFER_TRACE(bh, "brelse"); + __brelse(bh); + goto retry; + } else if (!buffer_dirty(bh)) { + unlock_buffer(bh); BUFFER_TRACE(bh, "remove from checkpoint"); /* * If the transaction was released or the checkpoint @@ -279,6 +280,7 @@ int jbd2_log_do_checkpoint(journal_t *journal) !transaction->t_checkpoint_list) goto out; } else { + unlock_buffer(bh); /* * We are about to write the buffer, it could be * raced by some other transaction shrink or buffer From 49a2686adddebe1ae76b4d368383208656ef6606 Mon Sep 17 00:00:00 2001 From: Claudio Imbrenda Date: Wed, 5 Jul 2023 13:19:37 +0200 Subject: [PATCH 002/513] KVM: s390: pv: fix index value of replaced ASCE MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit c2fceb59bbda16468bda82b002383bff59de89ab ] The index field of the struct page corresponding to a guest ASCE should be 0. When replacing the ASCE in s390_replace_asce(), the index of the new ASCE should also be set to 0. Having the wrong index might lead to the wrong addresses being passed around when notifying pte invalidations, and eventually to validity intercepts (VM crash) if the prefix gets unmapped and the notifier gets called with the wrong address. Reviewed-by: Philippe Mathieu-Daudé Fixes: faa2f72cb356 ("KVM: s390: pv: leak the topmost page table when destroy fails") Reviewed-by: Janosch Frank Signed-off-by: Claudio Imbrenda Message-ID: <20230705111937.33472-3-imbrenda@linux.ibm.com> Signed-off-by: Sasha Levin --- arch/s390/mm/gmap.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index ff40bf92db43..a2c872de29a6 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -2791,6 +2791,7 @@ int s390_replace_asce(struct gmap *gmap) page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); if (!page) return -ENOMEM; + page->index = 0; table = page_to_virt(page); memcpy(table, gmap->table, 1UL << (CRST_ALLOC_ORDER + PAGE_SHIFT)); From 21d063d27bf384c1c9216838da8055e88a1be9c4 Mon Sep 17 00:00:00 2001 From: Ondrej Mosnacek Date: Tue, 18 Jul 2023 13:56:07 +0200 Subject: [PATCH 003/513] io_uring: don't audit the capability check in io_uring_create() [ Upstream commit 6adc2272aaaf84f34b652cf77f770c6fcc4b8336 ] The check being unconditional may lead to unwanted denials reported by LSMs when a process has the capability granted by DAC, but denied by an LSM. In the case of SELinux such denials are a problem, since they can't be effectively filtered out via the policy and when not silenced, they produce noise that may hide a true problem or an attack. Since not having the capability merely means that the created io_uring context will be accounted against the current user's RLIMIT_MEMLOCK limit, we can disable auditing of denials for this check by using ns_capable_noaudit() instead of capable(). Fixes: 2b188cc1bb85 ("Add io_uring IO interface") Link: https://bugzilla.redhat.com/show_bug.cgi?id=2193317 Signed-off-by: Ondrej Mosnacek Reviewed-by: Jeff Moyer Link: https://lore.kernel.org/r/20230718115607.65652-1-omosnace@redhat.com Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- io_uring/io_uring.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index d7f87157be9a..f65c6811ede8 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -10602,7 +10602,7 @@ static int io_uring_create(unsigned entries, struct io_uring_params *p, if (!ctx) return -ENOMEM; ctx->compat = in_compat_syscall(); - if (!capable(CAP_IPC_LOCK)) + if (!ns_capable_noaudit(&init_user_ns, CAP_IPC_LOCK)) ctx->user = get_uid(current_user()); /* From f3d2344811fdff1a0107f5dc82c72a0fdb33a037 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 10 Jul 2023 14:34:25 +0200 Subject: [PATCH 004/513] gpio: tps68470: Make tps68470_gpio_output() always set the initial value [ Upstream commit 5a7adc6c1069ce31ef4f606ae9c05592c80a6ab5 ] Make tps68470_gpio_output() call tps68470_gpio_set() for output-only pins too, so that the initial value passed to gpiod_direction_output() is honored for these pins too. Fixes: 275b13a65547 ("gpio: Add support for TPS68470 GPIOs") Reviewed-by: Andy Shevchenko Reviewed-by: Daniel Scally Tested-by: Daniel Scally Reviewed-by: Sakari Ailus Signed-off-by: Hans de Goede Signed-off-by: Bartosz Golaszewski Signed-off-by: Sasha Levin --- drivers/gpio/gpio-tps68470.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpio/gpio-tps68470.c b/drivers/gpio/gpio-tps68470.c index 423b7bc30ae8..03a523a6d6fa 100644 --- a/drivers/gpio/gpio-tps68470.c +++ b/drivers/gpio/gpio-tps68470.c @@ -91,13 +91,13 @@ static int tps68470_gpio_output(struct gpio_chip *gc, unsigned int offset, struct tps68470_gpio_data *tps68470_gpio = gpiochip_get_data(gc); struct regmap *regmap = tps68470_gpio->tps68470_regmap; + /* Set the initial value */ + tps68470_gpio_set(gc, offset, value); + /* rest are always outputs */ if (offset >= TPS68470_N_REGULAR_GPIO) return 0; - /* Set the initial value */ - tps68470_gpio_set(gc, offset, value); - return regmap_update_bits(regmap, TPS68470_GPIO_CTL_REG_A(offset), TPS68470_GPIO_MODE_MASK, TPS68470_GPIO_MODE_OUT_CMOS); From 34fe5fbc208fc8576c4276db71d0c145187f7eb5 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Thu, 17 Nov 2022 13:08:00 +0200 Subject: [PATCH 005/513] pwm: Add a stub for devm_pwmchip_add() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 88da4e8113110d5f4ebdd2f8cd0899e300cd1954 upstream. The devm_pwmchip_add() can be called by a module that optionally instantiates PWM chip. In the case of CONFIG_PWM=n, the compilation can't be performed. Hence, add a necessary stub. Signed-off-by: Andy Shevchenko Acked-by: Thierry Reding Reviewed-by: Mika Westerberg Acked-by: Uwe Kleine-König Signed-off-by: Greg Kroah-Hartman --- include/linux/pwm.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/linux/pwm.h b/include/linux/pwm.h index 725c9b784e60..c7bfa64aeb14 100644 --- a/include/linux/pwm.h +++ b/include/linux/pwm.h @@ -489,6 +489,11 @@ static inline int pwmchip_remove(struct pwm_chip *chip) return -EINVAL; } +static inline int devm_pwmchip_add(struct device *dev, struct pwm_chip *chip) +{ + return -EINVAL; +} + static inline struct pwm_device *pwm_request_from_chip(struct pwm_chip *chip, unsigned int index, const char *label) From a999660042afd12706912b7ce44776d20dd4d489 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Mon, 17 Jul 2023 16:27:43 +0200 Subject: [PATCH 006/513] gpio: mvebu: Make use of devm_pwmchip_add MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 1945063eb59e64d2919cb14d54d081476d9e53bb ] This allows to get rid of a call to pwmchip_remove() in the error path. There is no .remove function for this driver, so this change fixes a resource leak when a gpio-mvebu device is unbound. Fixes: 757642f9a584 ("gpio: mvebu: Add limited PWM support") Signed-off-by: Uwe Kleine-König Reviewed-by: Andy Shevchenko Signed-off-by: Bartosz Golaszewski Signed-off-by: Sasha Levin --- drivers/gpio/gpio-mvebu.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index a245bfd5a617..4d64b3358c50 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -874,7 +874,7 @@ static int mvebu_pwm_probe(struct platform_device *pdev, spin_lock_init(&mvpwm->lock); - return pwmchip_add(&mvpwm->chip); + return devm_pwmchip_add(dev, &mvpwm->chip); } #ifdef CONFIG_DEBUG_FS @@ -1244,8 +1244,7 @@ static int mvebu_gpio_probe(struct platform_device *pdev) if (!mvchip->domain) { dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n", mvchip->chip.label); - err = -ENODEV; - goto err_pwm; + return -ENODEV; } err = irq_alloc_domain_generic_chips( @@ -1297,9 +1296,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) err_domain: irq_domain_remove(mvchip->domain); -err_pwm: - pwmchip_remove(&mvchip->mvpwm->chip); - return err; } From b19e90521286a03bc3793fd598f20277a8f99c85 Mon Sep 17 00:00:00 2001 From: Bartosz Golaszewski Date: Wed, 19 Jul 2023 13:41:01 +0200 Subject: [PATCH 007/513] gpio: mvebu: fix irq domain leak MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 644ee70267a934be27370f9aa618b29af7290544 ] Uwe Kleine-König pointed out we still have one resource leak in the mvebu driver triggered on driver detach. Let's address it with a custom devm action. Fixes: 812d47889a8e ("gpio/mvebu: Use irq_domain_add_linear") Signed-off-by: Bartosz Golaszewski Reviewed-by: Andy Shevchenko Reviewed-by: Uwe Kleine-König Signed-off-by: Sasha Levin --- drivers/gpio/gpio-mvebu.c | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/gpio/gpio-mvebu.c b/drivers/gpio/gpio-mvebu.c index 4d64b3358c50..b965513f44fe 100644 --- a/drivers/gpio/gpio-mvebu.c +++ b/drivers/gpio/gpio-mvebu.c @@ -1112,6 +1112,13 @@ static int mvebu_gpio_probe_syscon(struct platform_device *pdev, return 0; } +static void mvebu_gpio_remove_irq_domain(void *data) +{ + struct irq_domain *domain = data; + + irq_domain_remove(domain); +} + static int mvebu_gpio_probe(struct platform_device *pdev) { struct mvebu_gpio_chip *mvchip; @@ -1247,13 +1254,18 @@ static int mvebu_gpio_probe(struct platform_device *pdev) return -ENODEV; } + err = devm_add_action_or_reset(&pdev->dev, mvebu_gpio_remove_irq_domain, + mvchip->domain); + if (err) + return err; + err = irq_alloc_domain_generic_chips( mvchip->domain, ngpios, 2, np->name, handle_level_irq, IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_LEVEL, 0, 0); if (err) { dev_err(&pdev->dev, "couldn't allocate irq chips %s (DT).\n", mvchip->chip.label); - goto err_domain; + return err; } /* @@ -1293,10 +1305,6 @@ static int mvebu_gpio_probe(struct platform_device *pdev) } return 0; - -err_domain: - irq_domain_remove(mvchip->domain); - return err; } static struct platform_driver mvebu_gpio_driver = { From 89eae1f0aaeb71f9e9ac0add322b405b006ef6f6 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Mon, 19 Jun 2023 17:21:50 +0100 Subject: [PATCH 008/513] btrfs: fix race between quota disable and relocation [ Upstream commit 8a4a0b2a3eaf75ca8854f856ef29690c12b2f531 ] If we disable quotas while we have a relocation of a metadata block group that has extents belonging to the quota root, we can cause the relocation to fail with -ENOENT. This is because relocation builds backref nodes for extents of the quota root and later needs to walk the backrefs and access the quota root - however if in between a task disables quotas, it results in deleting the quota root from the root tree (with btrfs_del_root(), called from btrfs_quota_disable(). This can be sporadically triggered by test case btrfs/255 from fstests: $ ./check btrfs/255 FSTYP -- btrfs PLATFORM -- Linux/x86_64 debian0 6.4.0-rc6-btrfs-next-134+ #1 SMP PREEMPT_DYNAMIC Thu Jun 15 11:59:28 WEST 2023 MKFS_OPTIONS -- /dev/sdc MOUNT_OPTIONS -- /dev/sdc /home/fdmanana/btrfs-tests/scratch_1 btrfs/255 6s ... _check_dmesg: something found in dmesg (see /home/fdmanana/git/hub/xfstests/results//btrfs/255.dmesg) - output mismatch (see /home/fdmanana/git/hub/xfstests/results//btrfs/255.out.bad) # --- tests/btrfs/255.out 2023-03-02 21:47:53.876609426 +0000 # +++ /home/fdmanana/git/hub/xfstests/results//btrfs/255.out.bad 2023-06-16 10:20:39.267563212 +0100 # @@ -1,2 +1,4 @@ # QA output created by 255 # +ERROR: error during balancing '/home/fdmanana/btrfs-tests/scratch_1': No such file or directory # +There may be more info in syslog - try dmesg | tail # Silence is golden # ... (Run 'diff -u /home/fdmanana/git/hub/xfstests/tests/btrfs/255.out /home/fdmanana/git/hub/xfstests/results//btrfs/255.out.bad' to see the entire diff) Ran: btrfs/255 Failures: btrfs/255 Failed 1 of 1 tests To fix this make the quota disable operation take the cleaner mutex, as relocation of a block group also takes this mutex. This is also what we do when deleting a subvolume/snapshot, we take the cleaner mutex in the cleaner kthread (at cleaner_kthread()) and then we call btrfs_del_root() at btrfs_drop_snapshot() while under the protection of the cleaner mutex. Fixes: bed92eae26cc ("Btrfs: qgroup implementation and prototypes") CC: stable@vger.kernel.org # 5.4+ Signed-off-by: Filipe Manana Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/qgroup.c | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index d408d1dfde7c..d46a070275ff 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1201,12 +1201,23 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info) int ret = 0; /* - * We need to have subvol_sem write locked, to prevent races between - * concurrent tasks trying to disable quotas, because we will unlock - * and relock qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes. + * We need to have subvol_sem write locked to prevent races with + * snapshot creation. */ lockdep_assert_held_write(&fs_info->subvol_sem); + /* + * Lock the cleaner mutex to prevent races with concurrent relocation, + * because relocation may be building backrefs for blocks of the quota + * root while we are deleting the root. This is like dropping fs roots + * of deleted snapshots/subvolumes, we need the same protection. + * + * This also prevents races between concurrent tasks trying to disable + * quotas, because we will unlock and relock qgroup_ioctl_lock across + * BTRFS_FS_QUOTA_ENABLED changes. + */ + mutex_lock(&fs_info->cleaner_mutex); + mutex_lock(&fs_info->qgroup_ioctl_lock); if (!fs_info->quota_root) goto out; @@ -1287,6 +1298,7 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info) btrfs_end_transaction(trans); else if (trans) ret = btrfs_end_transaction(trans); + mutex_unlock(&fs_info->cleaner_mutex); return ret; } From 9845744e57feefd22f2b94a4ba202b4511b246da Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Fri, 2 Feb 2018 17:24:57 +0100 Subject: [PATCH 009/513] i2c: Delete error messages for failed memory allocations [ Upstream commit 6b3b21a8542fd2fb6ffc61bc13b9419f0c58ebad ] These issues were detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Wolfram Sang Stable-dep-of: 05f933d5f731 ("i2c: nomadik: Remove a useless call in the remove function") Signed-off-by: Sasha Levin --- drivers/i2c/busses/i2c-ibm_iic.c | 4 +--- drivers/i2c/busses/i2c-nomadik.c | 1 - drivers/i2c/busses/i2c-sh7760.c | 1 - drivers/i2c/busses/i2c-tiny-usb.c | 4 +--- 4 files changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/i2c/busses/i2c-ibm_iic.c b/drivers/i2c/busses/i2c-ibm_iic.c index 9f71daf6db64..c073f5b8833a 100644 --- a/drivers/i2c/busses/i2c-ibm_iic.c +++ b/drivers/i2c/busses/i2c-ibm_iic.c @@ -694,10 +694,8 @@ static int iic_probe(struct platform_device *ofdev) int ret; dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (!dev) { - dev_err(&ofdev->dev, "failed to allocate device data\n"); + if (!dev) return -ENOMEM; - } platform_set_drvdata(ofdev, dev); diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index a2d12a5b1c34..05eaae5aeb18 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -972,7 +972,6 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) dev = devm_kzalloc(&adev->dev, sizeof(struct nmk_i2c_dev), GFP_KERNEL); if (!dev) { - dev_err(&adev->dev, "cannot allocate memory\n"); ret = -ENOMEM; goto err_no_mem; } diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c index 319d1fa617c8..a0ccc5d00987 100644 --- a/drivers/i2c/busses/i2c-sh7760.c +++ b/drivers/i2c/busses/i2c-sh7760.c @@ -445,7 +445,6 @@ static int sh7760_i2c_probe(struct platform_device *pdev) id = kzalloc(sizeof(struct cami2c), GFP_KERNEL); if (!id) { - dev_err(&pdev->dev, "no mem for private data\n"); ret = -ENOMEM; goto out0; } diff --git a/drivers/i2c/busses/i2c-tiny-usb.c b/drivers/i2c/busses/i2c-tiny-usb.c index 7279ca0eaa2d..d1fa9ff5aeab 100644 --- a/drivers/i2c/busses/i2c-tiny-usb.c +++ b/drivers/i2c/busses/i2c-tiny-usb.c @@ -226,10 +226,8 @@ static int i2c_tiny_usb_probe(struct usb_interface *interface, /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); - if (dev == NULL) { - dev_err(&interface->dev, "Out of memory\n"); + if (!dev) goto error; - } dev->usb_dev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = interface; From 24562f0a46ada3e9d0763e3c80b5a11893fe2b98 Mon Sep 17 00:00:00 2001 From: Markus Elfring Date: Fri, 2 Feb 2018 14:50:09 +0100 Subject: [PATCH 010/513] i2c: Improve size determinations [ Upstream commit 06e989578232da33a7fe96b04191b862af8b2cec ] Replace the specification of a data structure by a pointer dereference as the parameter for the operator "sizeof" to make the corresponding size determination a bit safer according to the Linux coding style convention. This issue was detected by using the Coccinelle software. Signed-off-by: Markus Elfring Signed-off-by: Wolfram Sang Stable-dep-of: 05f933d5f731 ("i2c: nomadik: Remove a useless call in the remove function") Signed-off-by: Sasha Levin --- drivers/i2c/busses/i2c-nomadik.c | 2 +- drivers/i2c/busses/i2c-sh7760.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index 05eaae5aeb18..5004b9dd9856 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -970,7 +970,7 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) struct i2c_vendor_data *vendor = id->data; u32 max_fifo_threshold = (vendor->fifodepth / 2) - 1; - dev = devm_kzalloc(&adev->dev, sizeof(struct nmk_i2c_dev), GFP_KERNEL); + dev = devm_kzalloc(&adev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto err_no_mem; diff --git a/drivers/i2c/busses/i2c-sh7760.c b/drivers/i2c/busses/i2c-sh7760.c index a0ccc5d00987..051b904cb35f 100644 --- a/drivers/i2c/busses/i2c-sh7760.c +++ b/drivers/i2c/busses/i2c-sh7760.c @@ -443,7 +443,7 @@ static int sh7760_i2c_probe(struct platform_device *pdev) goto out0; } - id = kzalloc(sizeof(struct cami2c), GFP_KERNEL); + id = kzalloc(sizeof(*id), GFP_KERNEL); if (!id) { ret = -ENOMEM; goto out0; From 4954c870533991aeb93ab42a0bb686921229988b Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Sun, 11 Jun 2023 03:36:59 +0200 Subject: [PATCH 011/513] i2c: nomadik: Remove unnecessary goto label [ Upstream commit 1c5d33fff0d375e4ab7c4261dc62a286babbb4c6 ] The err_no_mem goto label doesn't do anything. Remove it. Signed-off-by: Andi Shyti Reviewed-by: Linus Walleij Signed-off-by: Wolfram Sang Stable-dep-of: 05f933d5f731 ("i2c: nomadik: Remove a useless call in the remove function") Signed-off-by: Sasha Levin --- drivers/i2c/busses/i2c-nomadik.c | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index 5004b9dd9856..8b9577318388 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -971,10 +971,9 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) u32 max_fifo_threshold = (vendor->fifodepth / 2) - 1; dev = devm_kzalloc(&adev->dev, sizeof(*dev), GFP_KERNEL); - if (!dev) { - ret = -ENOMEM; - goto err_no_mem; - } + if (!dev) + return -ENOMEM; + dev->vendor = vendor; dev->adev = adev; nmk_i2c_of_probe(np, dev); @@ -995,30 +994,27 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) dev->virtbase = devm_ioremap(&adev->dev, adev->res.start, resource_size(&adev->res)); - if (!dev->virtbase) { - ret = -ENOMEM; - goto err_no_mem; - } + if (!dev->virtbase) + return -ENOMEM; dev->irq = adev->irq[0]; ret = devm_request_irq(&adev->dev, dev->irq, i2c_irq_handler, 0, DRIVER_NAME, dev); if (ret) { dev_err(&adev->dev, "cannot claim the irq %d\n", dev->irq); - goto err_no_mem; + return ret; } dev->clk = devm_clk_get(&adev->dev, NULL); if (IS_ERR(dev->clk)) { dev_err(&adev->dev, "could not get i2c clock\n"); - ret = PTR_ERR(dev->clk); - goto err_no_mem; + return PTR_ERR(dev->clk); } ret = clk_prepare_enable(dev->clk); if (ret) { dev_err(&adev->dev, "can't prepare_enable clock\n"); - goto err_no_mem; + return ret; } init_hw(dev); @@ -1049,7 +1045,6 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) err_no_adap: clk_disable_unprepare(dev->clk); - err_no_mem: return ret; } From f07d8d345bd2d06ecdc613901c2a6d4c43d22508 Mon Sep 17 00:00:00 2001 From: Andi Shyti Date: Sun, 11 Jun 2023 03:37:00 +0200 Subject: [PATCH 012/513] i2c: nomadik: Use devm_clk_get_enabled() [ Upstream commit 9c7174db4cdd111e10d19eed5c36fd978a14c8a2 ] Replace the pair of functions, devm_clk_get() and clk_prepare_enable(), with a single function devm_clk_get_enabled(). Signed-off-by: Andi Shyti Reviewed-by: Linus Walleij Signed-off-by: Wolfram Sang Stable-dep-of: 05f933d5f731 ("i2c: nomadik: Remove a useless call in the remove function") Signed-off-by: Sasha Levin --- drivers/i2c/busses/i2c-nomadik.c | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index 8b9577318388..2141ba05dfec 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -1005,18 +1005,12 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) return ret; } - dev->clk = devm_clk_get(&adev->dev, NULL); + dev->clk = devm_clk_get_enabled(&adev->dev, NULL); if (IS_ERR(dev->clk)) { - dev_err(&adev->dev, "could not get i2c clock\n"); + dev_err(&adev->dev, "could enable i2c clock\n"); return PTR_ERR(dev->clk); } - ret = clk_prepare_enable(dev->clk); - if (ret) { - dev_err(&adev->dev, "can't prepare_enable clock\n"); - return ret; - } - init_hw(dev); adap = &dev->adap; @@ -1037,16 +1031,11 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) ret = i2c_add_adapter(adap); if (ret) - goto err_no_adap; + return ret; pm_runtime_put(&adev->dev); return 0; - - err_no_adap: - clk_disable_unprepare(dev->clk); - - return ret; } static void nmk_i2c_remove(struct amba_device *adev) @@ -1060,7 +1049,6 @@ static void nmk_i2c_remove(struct amba_device *adev) clear_all_interrupts(dev); /* disable the controller */ i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE); - clk_disable_unprepare(dev->clk); release_mem_region(res->start, resource_size(res)); } From 8b9249d74ca5b790df28e8c9398930c575cc4e47 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Tue, 4 Jul 2023 21:50:28 +0200 Subject: [PATCH 013/513] i2c: nomadik: Remove a useless call in the remove function [ Upstream commit 05f933d5f7318b03ff2028c1704dc867ac16f2c7 ] Since commit 235602146ec9 ("i2c-nomadik: turn the platform driver to an amba driver"), there is no more request_mem_region() call in this driver. So remove the release_mem_region() call from the remove function which is likely a left over. Fixes: 235602146ec9 ("i2c-nomadik: turn the platform driver to an amba driver") Cc: # v3.6+ Acked-by: Linus Walleij Reviewed-by: Andi Shyti Signed-off-by: Christophe JAILLET Signed-off-by: Wolfram Sang Signed-off-by: Sasha Levin --- drivers/i2c/busses/i2c-nomadik.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/i2c/busses/i2c-nomadik.c b/drivers/i2c/busses/i2c-nomadik.c index 2141ba05dfec..9c5d66bd6dc1 100644 --- a/drivers/i2c/busses/i2c-nomadik.c +++ b/drivers/i2c/busses/i2c-nomadik.c @@ -1040,7 +1040,6 @@ static int nmk_i2c_probe(struct amba_device *adev, const struct amba_id *id) static void nmk_i2c_remove(struct amba_device *adev) { - struct resource *res = &adev->res; struct nmk_i2c_dev *dev = amba_get_drvdata(adev); i2c_del_adapter(&dev->adap); @@ -1049,7 +1048,6 @@ static void nmk_i2c_remove(struct amba_device *adev) clear_all_interrupts(dev); /* disable the controller */ i2c_clr_bit(dev->virtbase + I2C_CR, I2C_CR_PE); - release_mem_region(res->start, resource_size(res)); } static struct i2c_vendor_data vendor_stn8815 = { From cf8c18150030970906ba771474972a8b8f83ca1d Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Tue, 20 Jun 2023 14:44:55 -0500 Subject: [PATCH 014/513] PCI/ASPM: Return 0 or -ETIMEDOUT from pcie_retrain_link() [ Upstream commit f5297a01ee805d7fa569d288ed65fc0f9ac9b03d ] "pcie_retrain_link" is not a question with a true/false answer, so "bool" isn't quite the right return type. Return 0 for success or -ETIMEDOUT if the retrain failed. No functional change intended. [bhelgaas: based on Ilpo's patch below] Link: https://lore.kernel.org/r/20230502083923.34562-1-ilpo.jarvinen@linux.intel.com Signed-off-by: Bjorn Helgaas Stable-dep-of: e7e39756363a ("PCI/ASPM: Avoid link retraining race") Signed-off-by: Sasha Levin --- drivers/pci/pcie/aspm.c | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index c58294f53fcd..e7f742ff31c3 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -192,7 +192,7 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) link->clkpm_disable = blacklist ? 1 : 0; } -static bool pcie_retrain_link(struct pcie_link_state *link) +static int pcie_retrain_link(struct pcie_link_state *link) { struct pci_dev *parent = link->pdev; unsigned long end_jiffies; @@ -219,7 +219,9 @@ static bool pcie_retrain_link(struct pcie_link_state *link) break; msleep(1); } while (time_before(jiffies, end_jiffies)); - return !(reg16 & PCI_EXP_LNKSTA_LT); + if (reg16 & PCI_EXP_LNKSTA_LT) + return -ETIMEDOUT; + return 0; } /* @@ -288,15 +290,15 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link) reg16 &= ~PCI_EXP_LNKCTL_CCC; pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); - if (pcie_retrain_link(link)) - return; + if (pcie_retrain_link(link)) { - /* Training failed. Restore common clock configurations */ - pci_err(parent, "ASPM: Could not configure common clock\n"); - list_for_each_entry(child, &linkbus->devices, bus_list) - pcie_capability_write_word(child, PCI_EXP_LNKCTL, + /* Training failed. Restore common clock configurations */ + pci_err(parent, "ASPM: Could not configure common clock\n"); + list_for_each_entry(child, &linkbus->devices, bus_list) + pcie_capability_write_word(child, PCI_EXP_LNKCTL, child_reg[PCI_FUNC(child->devfn)]); - pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg); + pcie_capability_write_word(parent, PCI_EXP_LNKCTL, parent_reg); + } } /* Convert L0s latency encoding to ns */ From 52d274956a8f6a4b72ab88637463fbfdbd8c9236 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= Date: Tue, 20 Jun 2023 14:49:33 -0500 Subject: [PATCH 015/513] PCI/ASPM: Factor out pcie_wait_for_retrain() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 9c7f136433d26592cb4d9cd00b4e15c33d9797c6 ] Factor pcie_wait_for_retrain() out from pcie_retrain_link(). No functional change intended. [bhelgaas: split out from https: //lore.kernel.org/r/20230502083923.34562-1-ilpo.jarvinen@linux.intel.com] Signed-off-by: Ilpo Järvinen Signed-off-by: Bjorn Helgaas Stable-dep-of: e7e39756363a ("PCI/ASPM: Avoid link retraining race") Signed-off-by: Sasha Levin --- drivers/pci/pcie/aspm.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index e7f742ff31c3..5e09cbb56336 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -192,10 +192,26 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist) link->clkpm_disable = blacklist ? 1 : 0; } +static int pcie_wait_for_retrain(struct pci_dev *pdev) +{ + unsigned long end_jiffies; + u16 reg16; + + /* Wait for Link Training to be cleared by hardware */ + end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT; + do { + pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, ®16); + if (!(reg16 & PCI_EXP_LNKSTA_LT)) + return 0; + msleep(1); + } while (time_before(jiffies, end_jiffies)); + + return -ETIMEDOUT; +} + static int pcie_retrain_link(struct pcie_link_state *link) { struct pci_dev *parent = link->pdev; - unsigned long end_jiffies; u16 reg16; pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); @@ -211,17 +227,7 @@ static int pcie_retrain_link(struct pcie_link_state *link) pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); } - /* Wait for link training end. Break out after waiting for timeout */ - end_jiffies = jiffies + LINK_RETRAIN_TIMEOUT; - do { - pcie_capability_read_word(parent, PCI_EXP_LNKSTA, ®16); - if (!(reg16 & PCI_EXP_LNKSTA_LT)) - break; - msleep(1); - } while (time_before(jiffies, end_jiffies)); - if (reg16 & PCI_EXP_LNKSTA_LT) - return -ETIMEDOUT; - return 0; + return pcie_wait_for_retrain(parent); } /* From 05f13e85fbdd13d23dedae03d81767543e4c91ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= Date: Tue, 2 May 2023 11:39:23 +0300 Subject: [PATCH 016/513] PCI/ASPM: Avoid link retraining race MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit e7e39756363ad5bd83ddeae1063193d0f13870fd ] PCIe r6.0.1, sec 7.5.3.7, recommends setting the link control parameters, then waiting for the Link Training bit to be clear before setting the Retrain Link bit. This avoids a race where the LTSSM may not use the updated parameters if it is already in the midst of link training because of other normal link activity. Wait for the Link Training bit to be clear before toggling the Retrain Link bit to ensure that the LTSSM uses the updated link control parameters. [bhelgaas: commit log, return 0 (success)/-ETIMEDOUT instead of bool for both pcie_wait_for_retrain() and the existing pcie_retrain_link()] Suggested-by: Lukas Wunner Fixes: 7d715a6c1ae5 ("PCI: add PCI Express ASPM support") Link: https://lore.kernel.org/r/20230502083923.34562-1-ilpo.jarvinen@linux.intel.com Signed-off-by: Ilpo Järvinen Signed-off-by: Bjorn Helgaas Reviewed-by: Lukas Wunner Cc: stable@vger.kernel.org Signed-off-by: Sasha Levin --- drivers/pci/pcie/aspm.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c index 5e09cbb56336..3078de668f91 100644 --- a/drivers/pci/pcie/aspm.c +++ b/drivers/pci/pcie/aspm.c @@ -212,8 +212,19 @@ static int pcie_wait_for_retrain(struct pci_dev *pdev) static int pcie_retrain_link(struct pcie_link_state *link) { struct pci_dev *parent = link->pdev; + int rc; u16 reg16; + /* + * Ensure the updated LNKCTL parameters are used during link + * training by checking that there is no ongoing link training to + * avoid LTSSM race as recommended in Implementation Note at the + * end of PCIe r6.0.1 sec 7.5.3.7. + */ + rc = pcie_wait_for_retrain(parent); + if (rc) + return rc; + pcie_capability_read_word(parent, PCI_EXP_LNKCTL, ®16); reg16 |= PCI_EXP_LNKCTL_RL; pcie_capability_write_word(parent, PCI_EXP_LNKCTL, reg16); From eb39c4c051dc2c0e5311b349a5f08e155e51e9d9 Mon Sep 17 00:00:00 2001 From: Rick Wertenbroek Date: Tue, 18 Apr 2023 09:46:48 +0200 Subject: [PATCH 017/513] PCI: rockchip: Remove writes to unused registers [ Upstream commit 92a9c57c325dd51682d428ba960d961fec3c8a08 ] Remove write accesses to registers that are marked "unused" (and therefore read-only) in the technical reference manual (TRM) (see RK3399 TRM 17.6.8.1) Link: https://lore.kernel.org/r/20230418074700.1083505-2-rick.wertenbroek@gmail.com Tested-by: Damien Le Moal Signed-off-by: Rick Wertenbroek Signed-off-by: Lorenzo Pieralisi Reviewed-by: Damien Le Moal Stable-dep-of: dc73ed0f1b8b ("PCI: rockchip: Fix window mapping and address translation for endpoint") Signed-off-by: Sasha Levin --- drivers/pci/controller/pcie-rockchip-ep.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index 827d91e73efa..9e17f3dba743 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -61,10 +61,6 @@ static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip, ROCKCHIP_PCIE_AT_OB_REGION_DESC0(region)); rockchip_pcie_write(rockchip, 0, ROCKCHIP_PCIE_AT_OB_REGION_DESC1(region)); - rockchip_pcie_write(rockchip, 0, - ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(region)); - rockchip_pcie_write(rockchip, 0, - ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(region)); } static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn, @@ -114,12 +110,6 @@ static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn, PCIE_CORE_OB_REGION_ADDR0_LO_ADDR); addr1 = upper_32_bits(cpu_addr); } - - /* CPU bus address region */ - rockchip_pcie_write(rockchip, addr0, - ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r)); - rockchip_pcie_write(rockchip, addr1, - ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r)); } static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, From cbd1494e51fd11d6f5d87e2764d3e9ebfb411a1b Mon Sep 17 00:00:00 2001 From: Rick Wertenbroek Date: Tue, 18 Apr 2023 09:46:55 +0200 Subject: [PATCH 018/513] PCI: rockchip: Fix window mapping and address translation for endpoint [ Upstream commit dc73ed0f1b8bddd7f2bf70d123e68ffc99ad71ce ] The RK3399 PCI endpoint core has 33 windows for PCIe space, now in the driver up to 32 fixed size (1M) windows are used and pages are allocated and mapped accordingly. The driver first used a single window and allocated space inside which caused translation issues (between CPU space and PCI space) because a window can only have a single translation at a given time, which if multiple pages are allocated inside will cause conflicts. Now each window is a single region of 1M which will always guarantee that the translation is not in conflict. Set the translation register addresses for physical function. As documented in the technical reference manual (TRM) section 17.5.5 "PCIe Address Translation" and section 17.6.8 "Address Translation Registers Description" Link: https://lore.kernel.org/r/20230418074700.1083505-9-rick.wertenbroek@gmail.com Fixes: cf590b078391 ("PCI: rockchip: Add EP driver for Rockchip PCIe controller") Tested-by: Damien Le Moal Signed-off-by: Rick Wertenbroek Signed-off-by: Lorenzo Pieralisi Reviewed-by: Damien Le Moal Cc: stable@vger.kernel.org Signed-off-by: Sasha Levin --- drivers/pci/controller/pcie-rockchip-ep.c | 128 ++++++++++------------ drivers/pci/controller/pcie-rockchip.h | 35 +++--- 2 files changed, 75 insertions(+), 88 deletions(-) diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index 9e17f3dba743..3d6f828d29fc 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -64,52 +64,29 @@ static void rockchip_pcie_clear_ep_ob_atu(struct rockchip_pcie *rockchip, } static void rockchip_pcie_prog_ep_ob_atu(struct rockchip_pcie *rockchip, u8 fn, - u32 r, u32 type, u64 cpu_addr, - u64 pci_addr, size_t size) + u32 r, u64 cpu_addr, u64 pci_addr, + size_t size) { - u64 sz = 1ULL << fls64(size - 1); - int num_pass_bits = ilog2(sz); - u32 addr0, addr1, desc0, desc1; - bool is_nor_msg = (type == AXI_WRAPPER_NOR_MSG); + int num_pass_bits = fls64(size - 1); + u32 addr0, addr1, desc0; - /* The minimal region size is 1MB */ if (num_pass_bits < 8) num_pass_bits = 8; - cpu_addr -= rockchip->mem_res->start; - addr0 = ((is_nor_msg ? 0x10 : (num_pass_bits - 1)) & - PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) | - (lower_32_bits(cpu_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR); - addr1 = upper_32_bits(is_nor_msg ? cpu_addr : pci_addr); - desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | type; - desc1 = 0; - - if (is_nor_msg) { - rockchip_pcie_write(rockchip, 0, - ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r)); - rockchip_pcie_write(rockchip, 0, - ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r)); - rockchip_pcie_write(rockchip, desc0, - ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r)); - rockchip_pcie_write(rockchip, desc1, - ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r)); - } else { - /* PCI bus address region */ - rockchip_pcie_write(rockchip, addr0, - ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r)); - rockchip_pcie_write(rockchip, addr1, - ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r)); - rockchip_pcie_write(rockchip, desc0, - ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r)); - rockchip_pcie_write(rockchip, desc1, - ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r)); - - addr0 = - ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) | - (lower_32_bits(cpu_addr) & - PCIE_CORE_OB_REGION_ADDR0_LO_ADDR); - addr1 = upper_32_bits(cpu_addr); - } + addr0 = ((num_pass_bits - 1) & PCIE_CORE_OB_REGION_ADDR0_NUM_BITS) | + (lower_32_bits(pci_addr) & PCIE_CORE_OB_REGION_ADDR0_LO_ADDR); + addr1 = upper_32_bits(pci_addr); + desc0 = ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(fn) | AXI_WRAPPER_MEM_WRITE; + + /* PCI bus address region */ + rockchip_pcie_write(rockchip, addr0, + ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r)); + rockchip_pcie_write(rockchip, addr1, + ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r)); + rockchip_pcie_write(rockchip, desc0, + ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r)); + rockchip_pcie_write(rockchip, 0, + ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r)); } static int rockchip_pcie_ep_write_header(struct pci_epc *epc, u8 fn, u8 vfn, @@ -248,26 +225,20 @@ static void rockchip_pcie_ep_clear_bar(struct pci_epc *epc, u8 fn, u8 vfn, ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar)); } +static inline u32 rockchip_ob_region(phys_addr_t addr) +{ + return (addr >> ilog2(SZ_1M)) & 0x1f; +} + static int rockchip_pcie_ep_map_addr(struct pci_epc *epc, u8 fn, u8 vfn, phys_addr_t addr, u64 pci_addr, size_t size) { struct rockchip_pcie_ep *ep = epc_get_drvdata(epc); struct rockchip_pcie *pcie = &ep->rockchip; - u32 r; + u32 r = rockchip_ob_region(addr); - r = find_first_zero_bit(&ep->ob_region_map, BITS_PER_LONG); - /* - * Region 0 is reserved for configuration space and shouldn't - * be used elsewhere per TRM, so leave it out. - */ - if (r >= ep->max_regions - 1) { - dev_err(&epc->dev, "no free outbound region\n"); - return -EINVAL; - } - - rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, AXI_WRAPPER_MEM_WRITE, addr, - pci_addr, size); + rockchip_pcie_prog_ep_ob_atu(pcie, fn, r, addr, pci_addr, size); set_bit(r, &ep->ob_region_map); ep->ob_addr[r] = addr; @@ -282,15 +253,11 @@ static void rockchip_pcie_ep_unmap_addr(struct pci_epc *epc, u8 fn, u8 vfn, struct rockchip_pcie *rockchip = &ep->rockchip; u32 r; - for (r = 0; r < ep->max_regions - 1; r++) + for (r = 0; r < ep->max_regions; r++) if (ep->ob_addr[r] == addr) break; - /* - * Region 0 is reserved for configuration space and shouldn't - * be used elsewhere per TRM, so leave it out. - */ - if (r == ep->max_regions - 1) + if (r == ep->max_regions) return; rockchip_pcie_clear_ep_ob_atu(rockchip, r); @@ -387,7 +354,8 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn, struct rockchip_pcie *rockchip = &ep->rockchip; u32 flags, mme, data, data_mask; u8 msi_count; - u64 pci_addr, pci_addr_mask = 0xff; + u64 pci_addr; + u32 r; /* Check MSI enable bit */ flags = rockchip_pcie_read(&ep->rockchip, @@ -421,21 +389,20 @@ static int rockchip_pcie_ep_send_msi_irq(struct rockchip_pcie_ep *ep, u8 fn, ROCKCHIP_PCIE_EP_FUNC_BASE(fn) + ROCKCHIP_PCIE_EP_MSI_CTRL_REG + PCI_MSI_ADDRESS_LO); - pci_addr &= GENMASK_ULL(63, 2); /* Set the outbound region if needed. */ - if (unlikely(ep->irq_pci_addr != (pci_addr & ~pci_addr_mask) || + if (unlikely(ep->irq_pci_addr != (pci_addr & PCIE_ADDR_MASK) || ep->irq_pci_fn != fn)) { - rockchip_pcie_prog_ep_ob_atu(rockchip, fn, ep->max_regions - 1, - AXI_WRAPPER_MEM_WRITE, + r = rockchip_ob_region(ep->irq_phys_addr); + rockchip_pcie_prog_ep_ob_atu(rockchip, fn, r, ep->irq_phys_addr, - pci_addr & ~pci_addr_mask, - pci_addr_mask + 1); - ep->irq_pci_addr = (pci_addr & ~pci_addr_mask); + pci_addr & PCIE_ADDR_MASK, + ~PCIE_ADDR_MASK + 1); + ep->irq_pci_addr = (pci_addr & PCIE_ADDR_MASK); ep->irq_pci_fn = fn; } - writew(data, ep->irq_cpu_addr + (pci_addr & pci_addr_mask)); + writew(data, ep->irq_cpu_addr + (pci_addr & ~PCIE_ADDR_MASK)); return 0; } @@ -517,6 +484,8 @@ static int rockchip_pcie_parse_ep_dt(struct rockchip_pcie *rockchip, if (err < 0 || ep->max_regions > MAX_REGION_LIMIT) ep->max_regions = MAX_REGION_LIMIT; + ep->ob_region_map = 0; + err = of_property_read_u8(dev->of_node, "max-functions", &ep->epc->max_functions); if (err < 0) @@ -537,7 +506,8 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev) struct rockchip_pcie *rockchip; struct pci_epc *epc; size_t max_regions; - int err; + struct pci_epc_mem_window *windows = NULL; + int err, i; ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); if (!ep) @@ -584,15 +554,27 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev) /* Only enable function 0 by default */ rockchip_pcie_write(rockchip, BIT(0), PCIE_CORE_PHY_FUNC_CFG); - err = pci_epc_mem_init(epc, rockchip->mem_res->start, - resource_size(rockchip->mem_res), PAGE_SIZE); + windows = devm_kcalloc(dev, ep->max_regions, + sizeof(struct pci_epc_mem_window), GFP_KERNEL); + if (!windows) { + err = -ENOMEM; + goto err_uninit_port; + } + for (i = 0; i < ep->max_regions; i++) { + windows[i].phys_base = rockchip->mem_res->start + (SZ_1M * i); + windows[i].size = SZ_1M; + windows[i].page_size = SZ_1M; + } + err = pci_epc_multi_mem_init(epc, windows, ep->max_regions); + devm_kfree(dev, windows); + if (err < 0) { dev_err(dev, "failed to initialize the memory space\n"); goto err_uninit_port; } ep->irq_cpu_addr = pci_epc_mem_alloc_addr(epc, &ep->irq_phys_addr, - SZ_128K); + SZ_1M); if (!ep->irq_cpu_addr) { dev_err(dev, "failed to reserve memory space for MSI\n"); err = -ENOMEM; diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index cbd2fd25ba76..498a40251d0b 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -139,6 +139,7 @@ #define PCIE_RC_RP_ATS_BASE 0x400000 #define PCIE_RC_CONFIG_NORMAL_BASE 0x800000 +#define PCIE_EP_PF_CONFIG_REGS_BASE 0x800000 #define PCIE_RC_CONFIG_BASE 0xa00000 #define PCIE_EP_CONFIG_BASE 0xa00000 #define PCIE_EP_CONFIG_DID_VID (PCIE_EP_CONFIG_BASE + 0x00) @@ -158,10 +159,11 @@ #define PCIE_RC_CONFIG_THP_CAP (PCIE_RC_CONFIG_BASE + 0x274) #define PCIE_RC_CONFIG_THP_CAP_NEXT_MASK GENMASK(31, 20) +#define PCIE_ADDR_MASK 0xffffff00 #define PCIE_CORE_AXI_CONF_BASE 0xc00000 #define PCIE_CORE_OB_REGION_ADDR0 (PCIE_CORE_AXI_CONF_BASE + 0x0) #define PCIE_CORE_OB_REGION_ADDR0_NUM_BITS 0x3f -#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR 0xffffff00 +#define PCIE_CORE_OB_REGION_ADDR0_LO_ADDR PCIE_ADDR_MASK #define PCIE_CORE_OB_REGION_ADDR1 (PCIE_CORE_AXI_CONF_BASE + 0x4) #define PCIE_CORE_OB_REGION_DESC0 (PCIE_CORE_AXI_CONF_BASE + 0x8) #define PCIE_CORE_OB_REGION_DESC1 (PCIE_CORE_AXI_CONF_BASE + 0xc) @@ -169,7 +171,7 @@ #define PCIE_CORE_AXI_INBOUND_BASE 0xc00800 #define PCIE_RP_IB_ADDR0 (PCIE_CORE_AXI_INBOUND_BASE + 0x0) #define PCIE_CORE_IB_REGION_ADDR0_NUM_BITS 0x3f -#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR 0xffffff00 +#define PCIE_CORE_IB_REGION_ADDR0_LO_ADDR PCIE_ADDR_MASK #define PCIE_RP_IB_ADDR1 (PCIE_CORE_AXI_INBOUND_BASE + 0x4) /* Size of one AXI Region (not Region 0) */ @@ -234,13 +236,15 @@ #define ROCKCHIP_PCIE_EP_MSI_CTRL_ME BIT(16) #define ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP BIT(24) #define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1 -#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) (((fn) << 12) & GENMASK(19, 12)) +#define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3 +#define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) \ + (PCIE_EP_PF_CONFIG_REGS_BASE + (((fn) << 12) & GENMASK(19, 12))) +#define ROCKCHIP_PCIE_EP_VIRT_FUNC_BASE(fn) \ + (PCIE_EP_PF_CONFIG_REGS_BASE + 0x10000 + (((fn) << 12) & GENMASK(19, 12))) #define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR0(fn, bar) \ - (PCIE_RC_RP_ATS_BASE + 0x0840 + (fn) * 0x0040 + (bar) * 0x0008) + (PCIE_CORE_AXI_CONF_BASE + 0x0828 + (fn) * 0x0040 + (bar) * 0x0008) #define ROCKCHIP_PCIE_AT_IB_EP_FUNC_BAR_ADDR1(fn, bar) \ - (PCIE_RC_RP_ATS_BASE + 0x0844 + (fn) * 0x0040 + (bar) * 0x0008) -#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \ - (PCIE_RC_RP_ATS_BASE + 0x0000 + ((r) & 0x1f) * 0x0020) + (PCIE_CORE_AXI_CONF_BASE + 0x082c + (fn) * 0x0040 + (bar) * 0x0008) #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN_MASK GENMASK(19, 12) #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_DEVFN(devfn) \ (((devfn) << 12) & \ @@ -248,20 +252,21 @@ #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK GENMASK(27, 20) #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS(bus) \ (((bus) << 20) & ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0_BUS_MASK) +#define PCIE_RC_EP_ATR_OB_REGIONS_1_32 (PCIE_CORE_AXI_CONF_BASE + 0x0020) +#define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR0(r) \ + (PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0000 + ((r) & 0x1f) * 0x0020) #define ROCKCHIP_PCIE_AT_OB_REGION_PCI_ADDR1(r) \ - (PCIE_RC_RP_ATS_BASE + 0x0004 + ((r) & 0x1f) * 0x0020) + (PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0004 + ((r) & 0x1f) * 0x0020) #define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_HARDCODED_RID BIT(23) #define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK GENMASK(31, 24) #define ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN(devfn) \ (((devfn) << 24) & ROCKCHIP_PCIE_AT_OB_REGION_DESC0_DEVFN_MASK) #define ROCKCHIP_PCIE_AT_OB_REGION_DESC0(r) \ - (PCIE_RC_RP_ATS_BASE + 0x0008 + ((r) & 0x1f) * 0x0020) -#define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r) \ - (PCIE_RC_RP_ATS_BASE + 0x000c + ((r) & 0x1f) * 0x0020) -#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR0(r) \ - (PCIE_RC_RP_ATS_BASE + 0x0018 + ((r) & 0x1f) * 0x0020) -#define ROCKCHIP_PCIE_AT_OB_REGION_CPU_ADDR1(r) \ - (PCIE_RC_RP_ATS_BASE + 0x001c + ((r) & 0x1f) * 0x0020) + (PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0008 + ((r) & 0x1f) * 0x0020) +#define ROCKCHIP_PCIE_AT_OB_REGION_DESC1(r) \ + (PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x000c + ((r) & 0x1f) * 0x0020) +#define ROCKCHIP_PCIE_AT_OB_REGION_DESC2(r) \ + (PCIE_RC_EP_ATR_OB_REGIONS_1_32 + 0x0010 + ((r) & 0x1f) * 0x0020) #define ROCKCHIP_PCIE_CORE_EP_FUNC_BAR_CFG0(fn) \ (PCIE_CORE_CTRL_MGMT_BASE + 0x0240 + (fn) * 0x0008) From b409d8df9bea6d870b187046a30676ecb88d0662 Mon Sep 17 00:00:00 2001 From: Rick Wertenbroek Date: Tue, 18 Apr 2023 09:46:57 +0200 Subject: [PATCH 019/513] PCI: rockchip: Don't advertise MSI-X in PCIe capabilities [ Upstream commit a52587e0bee14cbeeadf48a24013828cb04b8df8 ] The RK3399 PCIe endpoint controller cannot generate MSI-X IRQs. This is documented in the RK3399 technical reference manual (TRM) section 17.5.9 "Interrupt Support". MSI-X capability should therefore not be advertised. Remove the MSI-X capability by editing the capability linked-list. The previous entry is the MSI capability, therefore get the next entry from the MSI-X capability entry and set it as next entry for the MSI capability. This in effect removes MSI-X from the list. Linked list before : MSI cap -> MSI-X cap -> PCIe Device cap -> ... Linked list now : MSI cap -> PCIe Device cap -> ... Link: https://lore.kernel.org/r/20230418074700.1083505-11-rick.wertenbroek@gmail.com Fixes: cf590b078391 ("PCI: rockchip: Add EP driver for Rockchip PCIe controller") Tested-by: Damien Le Moal Signed-off-by: Rick Wertenbroek Signed-off-by: Lorenzo Pieralisi Reviewed-by: Damien Le Moal Cc: stable@vger.kernel.org Signed-off-by: Sasha Levin --- drivers/pci/controller/pcie-rockchip-ep.c | 24 +++++++++++++++++++++++ drivers/pci/controller/pcie-rockchip.h | 5 +++++ 2 files changed, 29 insertions(+) diff --git a/drivers/pci/controller/pcie-rockchip-ep.c b/drivers/pci/controller/pcie-rockchip-ep.c index 3d6f828d29fc..0af0e965fb57 100644 --- a/drivers/pci/controller/pcie-rockchip-ep.c +++ b/drivers/pci/controller/pcie-rockchip-ep.c @@ -508,6 +508,7 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev) size_t max_regions; struct pci_epc_mem_window *windows = NULL; int err, i; + u32 cfg_msi, cfg_msix_cp; ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); if (!ep) @@ -583,6 +584,29 @@ static int rockchip_pcie_ep_probe(struct platform_device *pdev) ep->irq_pci_addr = ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR; + /* + * MSI-X is not supported but the controller still advertises the MSI-X + * capability by default, which can lead to the Root Complex side + * allocating MSI-X vectors which cannot be used. Avoid this by skipping + * the MSI-X capability entry in the PCIe capabilities linked-list: get + * the next pointer from the MSI-X entry and set that in the MSI + * capability entry (which is the previous entry). This way the MSI-X + * entry is skipped (left out of the linked-list) and not advertised. + */ + cfg_msi = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE + + ROCKCHIP_PCIE_EP_MSI_CTRL_REG); + + cfg_msi &= ~ROCKCHIP_PCIE_EP_MSI_CP1_MASK; + + cfg_msix_cp = rockchip_pcie_read(rockchip, PCIE_EP_CONFIG_BASE + + ROCKCHIP_PCIE_EP_MSIX_CAP_REG) & + ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK; + + cfg_msi |= cfg_msix_cp; + + rockchip_pcie_write(rockchip, cfg_msi, + PCIE_EP_CONFIG_BASE + ROCKCHIP_PCIE_EP_MSI_CTRL_REG); + rockchip_pcie_write(rockchip, PCIE_CLIENT_CONF_ENABLE, PCIE_CLIENT_CONFIG); diff --git a/drivers/pci/controller/pcie-rockchip.h b/drivers/pci/controller/pcie-rockchip.h index 498a40251d0b..88e2bf65e433 100644 --- a/drivers/pci/controller/pcie-rockchip.h +++ b/drivers/pci/controller/pcie-rockchip.h @@ -228,6 +228,8 @@ #define ROCKCHIP_PCIE_EP_CMD_STATUS 0x4 #define ROCKCHIP_PCIE_EP_CMD_STATUS_IS BIT(19) #define ROCKCHIP_PCIE_EP_MSI_CTRL_REG 0x90 +#define ROCKCHIP_PCIE_EP_MSI_CP1_OFFSET 8 +#define ROCKCHIP_PCIE_EP_MSI_CP1_MASK GENMASK(15, 8) #define ROCKCHIP_PCIE_EP_MSI_FLAGS_OFFSET 16 #define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_OFFSET 17 #define ROCKCHIP_PCIE_EP_MSI_CTRL_MMC_MASK GENMASK(19, 17) @@ -235,6 +237,9 @@ #define ROCKCHIP_PCIE_EP_MSI_CTRL_MME_MASK GENMASK(22, 20) #define ROCKCHIP_PCIE_EP_MSI_CTRL_ME BIT(16) #define ROCKCHIP_PCIE_EP_MSI_CTRL_MASK_MSI_CAP BIT(24) +#define ROCKCHIP_PCIE_EP_MSIX_CAP_REG 0xb0 +#define ROCKCHIP_PCIE_EP_MSIX_CAP_CP_OFFSET 8 +#define ROCKCHIP_PCIE_EP_MSIX_CAP_CP_MASK GENMASK(15, 8) #define ROCKCHIP_PCIE_EP_DUMMY_IRQ_ADDR 0x1 #define ROCKCHIP_PCIE_EP_PCI_LEGACY_IRQ_ADDR 0x3 #define ROCKCHIP_PCIE_EP_FUNC_BASE(fn) \ From 75ce95abc65b341345b6c78de295df06802fdb3d Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 4 Apr 2022 16:06:32 -0400 Subject: [PATCH 020/513] dlm: cleanup plock_op vs plock_xop [ Upstream commit bcbb4ba6c9ba81e6975b642a2cade68044cd8a66 ] Lately the different casting between plock_op and plock_xop and list holders which was involved showed some issues which were hard to see. This patch removes the "plock_xop" structure and introduces a "struct plock_async_data". This structure will be set in "struct plock_op" in case of asynchronous lock handling as the original "plock_xop" was made for. There is no need anymore to cast pointers around for additional fields in case of asynchronous lock handling. As disadvantage another allocation was introduces but only needed in the asynchronous case which is currently only used in combination with nfs lockd. Signed-off-by: Alexander Aring Signed-off-by: David Teigland Stable-dep-of: 59e45c758ca1 ("fs: dlm: interrupt posix locks only when process is killed") Signed-off-by: Sasha Levin --- fs/dlm/plock.c | 77 ++++++++++++++++++++++++++++++-------------------- 1 file changed, 46 insertions(+), 31 deletions(-) diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c index edce0b25cd90..e70e23eca03e 100644 --- a/fs/dlm/plock.c +++ b/fs/dlm/plock.c @@ -19,20 +19,20 @@ static struct list_head recv_list; static wait_queue_head_t send_wq; static wait_queue_head_t recv_wq; -struct plock_op { - struct list_head list; - int done; - struct dlm_plock_info info; - int (*callback)(struct file_lock *fl, int result); -}; - -struct plock_xop { - struct plock_op xop; +struct plock_async_data { void *fl; void *file; struct file_lock flc; + int (*callback)(struct file_lock *fl, int result); }; +struct plock_op { + struct list_head list; + int done; + struct dlm_plock_info info; + /* if set indicates async handling */ + struct plock_async_data *data; +}; static inline void set_version(struct dlm_plock_info *info) { @@ -58,6 +58,12 @@ static int check_version(struct dlm_plock_info *info) return 0; } +static void dlm_release_plock_op(struct plock_op *op) +{ + kfree(op->data); + kfree(op); +} + static void send_op(struct plock_op *op) { set_version(&op->info); @@ -101,22 +107,21 @@ static void do_unlock_close(struct dlm_ls *ls, u64 number, int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, int cmd, struct file_lock *fl) { + struct plock_async_data *op_data; struct dlm_ls *ls; struct plock_op *op; - struct plock_xop *xop; int rv; ls = dlm_find_lockspace_local(lockspace); if (!ls) return -EINVAL; - xop = kzalloc(sizeof(*xop), GFP_NOFS); - if (!xop) { + op = kzalloc(sizeof(*op), GFP_NOFS); + if (!op) { rv = -ENOMEM; goto out; } - op = &xop->xop; op->info.optype = DLM_PLOCK_OP_LOCK; op->info.pid = fl->fl_pid; op->info.ex = (fl->fl_type == F_WRLCK); @@ -125,22 +130,32 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, op->info.number = number; op->info.start = fl->fl_start; op->info.end = fl->fl_end; + /* async handling */ if (fl->fl_lmops && fl->fl_lmops->lm_grant) { + op_data = kzalloc(sizeof(*op_data), GFP_NOFS); + if (!op_data) { + dlm_release_plock_op(op); + rv = -ENOMEM; + goto out; + } + /* fl_owner is lockd which doesn't distinguish processes on the nfs client */ op->info.owner = (__u64) fl->fl_pid; - op->callback = fl->fl_lmops->lm_grant; - locks_init_lock(&xop->flc); - locks_copy_lock(&xop->flc, fl); - xop->fl = fl; - xop->file = file; + op_data->callback = fl->fl_lmops->lm_grant; + locks_init_lock(&op_data->flc); + locks_copy_lock(&op_data->flc, fl); + op_data->fl = fl; + op_data->file = file; + + op->data = op_data; } else { op->info.owner = (__u64)(long) fl->fl_owner; } send_op(op); - if (!op->callback) { + if (!op->data) { rv = wait_event_interruptible(recv_wq, (op->done != 0)); if (rv == -ERESTARTSYS) { log_debug(ls, "dlm_posix_lock: wait killed %llx", @@ -148,7 +163,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, spin_lock(&ops_lock); list_del(&op->list); spin_unlock(&ops_lock); - kfree(xop); + dlm_release_plock_op(op); do_unlock_close(ls, number, file, fl); goto out; } @@ -173,7 +188,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, (unsigned long long)number); } - kfree(xop); + dlm_release_plock_op(op); out: dlm_put_lockspace(ls); return rv; @@ -183,11 +198,11 @@ EXPORT_SYMBOL_GPL(dlm_posix_lock); /* Returns failure iff a successful lock operation should be canceled */ static int dlm_plock_callback(struct plock_op *op) { + struct plock_async_data *op_data = op->data; struct file *file; struct file_lock *fl; struct file_lock *flc; int (*notify)(struct file_lock *fl, int result) = NULL; - struct plock_xop *xop = (struct plock_xop *)op; int rv = 0; spin_lock(&ops_lock); @@ -199,10 +214,10 @@ static int dlm_plock_callback(struct plock_op *op) spin_unlock(&ops_lock); /* check if the following 2 are still valid or make a copy */ - file = xop->file; - flc = &xop->flc; - fl = xop->fl; - notify = op->callback; + file = op_data->file; + flc = &op_data->flc; + fl = op_data->fl; + notify = op_data->callback; if (op->info.rv) { notify(fl, op->info.rv); @@ -233,7 +248,7 @@ static int dlm_plock_callback(struct plock_op *op) } out: - kfree(xop); + dlm_release_plock_op(op); return rv; } @@ -303,7 +318,7 @@ int dlm_posix_unlock(dlm_lockspace_t *lockspace, u64 number, struct file *file, rv = 0; out_free: - kfree(op); + dlm_release_plock_op(op); out: dlm_put_lockspace(ls); fl->fl_flags = fl_flags; @@ -371,7 +386,7 @@ int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file, rv = 0; } - kfree(op); + dlm_release_plock_op(op); out: dlm_put_lockspace(ls); return rv; @@ -407,7 +422,7 @@ static ssize_t dev_read(struct file *file, char __user *u, size_t count, (the process did not make an unlock call). */ if (op->info.flags & DLM_PLOCK_FL_CLOSE) - kfree(op); + dlm_release_plock_op(op); if (copy_to_user(u, &info, sizeof(info))) return -EFAULT; @@ -439,7 +454,7 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count, op->info.owner == info.owner) { list_del_init(&op->list); memcpy(&op->info, &info, sizeof(info)); - if (op->callback) + if (op->data) do_callback = 1; else op->done = 1; From 97e7a0f8dea2088409adb46c4142e79197d10c81 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Mon, 4 Apr 2022 16:06:33 -0400 Subject: [PATCH 021/513] dlm: rearrange async condition return [ Upstream commit a800ba77fd285c6391a82819867ac64e9ab3af46 ] This patch moves the return of FILE_LOCK_DEFERRED a little bit earlier than checking afterwards again if the request was an asynchronous request. Signed-off-by: Alexander Aring Signed-off-by: David Teigland Stable-dep-of: 59e45c758ca1 ("fs: dlm: interrupt posix locks only when process is killed") Signed-off-by: Sasha Levin --- fs/dlm/plock.c | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c index e70e23eca03e..01fb7d8c0bca 100644 --- a/fs/dlm/plock.c +++ b/fs/dlm/plock.c @@ -149,26 +149,25 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, op_data->file = file; op->data = op_data; + + send_op(op); + rv = FILE_LOCK_DEFERRED; + goto out; } else { op->info.owner = (__u64)(long) fl->fl_owner; } send_op(op); - if (!op->data) { - rv = wait_event_interruptible(recv_wq, (op->done != 0)); - if (rv == -ERESTARTSYS) { - log_debug(ls, "dlm_posix_lock: wait killed %llx", - (unsigned long long)number); - spin_lock(&ops_lock); - list_del(&op->list); - spin_unlock(&ops_lock); - dlm_release_plock_op(op); - do_unlock_close(ls, number, file, fl); - goto out; - } - } else { - rv = FILE_LOCK_DEFERRED; + rv = wait_event_interruptible(recv_wq, (op->done != 0)); + if (rv == -ERESTARTSYS) { + log_debug(ls, "%s: wait killed %llx", __func__, + (unsigned long long)number); + spin_lock(&ops_lock); + list_del(&op->list); + spin_unlock(&ops_lock); + dlm_release_plock_op(op); + do_unlock_close(ls, number, file, fl); goto out; } From 354cdda79a774c2242e80924e70532904e270609 Mon Sep 17 00:00:00 2001 From: Alexander Aring Date: Fri, 19 May 2023 11:21:26 -0400 Subject: [PATCH 022/513] fs: dlm: interrupt posix locks only when process is killed [ Upstream commit 59e45c758ca1b9893ac923dd63536da946ac333b ] If a posix lock request is waiting for a result from user space (dlm_controld), do not let it be interrupted unless the process is killed. This reverts commit a6b1533e9a57 ("dlm: make posix locks interruptible"). The problem with the interruptible change is that all locks were cleared on any signal interrupt. If a signal was received that did not terminate the process, the process could continue running after all its dlm posix locks had been cleared. A future patch will add cancelation to allow proper interruption. Cc: stable@vger.kernel.org Fixes: a6b1533e9a57 ("dlm: make posix locks interruptible") Signed-off-by: Alexander Aring Signed-off-by: David Teigland Signed-off-by: Sasha Levin --- fs/dlm/plock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c index 01fb7d8c0bca..f3482e936cc2 100644 --- a/fs/dlm/plock.c +++ b/fs/dlm/plock.c @@ -159,7 +159,7 @@ int dlm_posix_lock(dlm_lockspace_t *lockspace, u64 number, struct file *file, send_op(op); - rv = wait_event_interruptible(recv_wq, (op->done != 0)); + rv = wait_event_killable(recv_wq, (op->done != 0)); if (rv == -ERESTARTSYS) { log_debug(ls, "%s: wait killed %llx", __func__, (unsigned long long)number); From 4400b96587fd8fa526afd5dad4fc708546a08bba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= Date: Tue, 7 Mar 2023 15:46:19 +0100 Subject: [PATCH 023/513] drm/ttm: Don't print error message if eviction was interrupted MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 8ab3b0663e279ab550bc2c0b5d602960e8b94e02 ] Avoid printing an error message if eviction was interrupted by, for example, the user pressing CTRL-C. That may happen if eviction is waiting for something, like for example a free batch-buffer. Signed-off-by: Thomas Hellström Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20230307144621.10748-6-thomas.hellstrom@linux.intel.com Stable-dep-of: e8188c461ee0 ("drm/ttm: Don't leak a resource on eviction error") Signed-off-by: Sasha Levin --- drivers/gpu/drm/ttm/ttm_bo.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index d5a2b69489e7..cf390ab63697 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -557,7 +557,8 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, if (ret == -EMULTIHOP) { ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop); if (ret) { - pr_err("Buffer eviction failed\n"); + if (ret != -ERESTARTSYS && ret != -EINTR) + pr_err("Buffer eviction failed\n"); ttm_resource_free(bo, &evict_mem); goto out; } From 7738335d73d0686ec8995e0448e5d1b48cffb2a4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Thomas=20Hellstr=C3=B6m?= Date: Mon, 26 Jun 2023 11:14:49 +0200 Subject: [PATCH 024/513] drm/ttm: Don't leak a resource on eviction error MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit e8188c461ee015ba0b9ab2fc82dbd5ebca5a5532 ] On eviction errors other than -EMULTIHOP we were leaking a resource. Fix. v2: - Avoid yet another goto (Andi Shyti) Fixes: 403797925768 ("drm/ttm: Fix multihop assert on eviction.") Cc: Andrey Grodzovsky Cc: Christian König Cc: Christian Koenig Cc: Huang Rui Cc: dri-devel@lists.freedesktop.org Cc: # v5.15+ Signed-off-by: Thomas Hellström Reviewed-by: Nirmoy Das #v1 Reviewed-by: Andi Shyti Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20230626091450.14757-4-thomas.hellstrom@linux.intel.com Signed-off-by: Sasha Levin --- drivers/gpu/drm/ttm/ttm_bo.c | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cf390ab63697..6080f4b5c450 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -552,18 +552,18 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, goto out; } -bounce: - ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop); - if (ret == -EMULTIHOP) { + do { + ret = ttm_bo_handle_move_mem(bo, evict_mem, true, ctx, &hop); + if (ret != -EMULTIHOP) + break; + ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop); - if (ret) { - if (ret != -ERESTARTSYS && ret != -EINTR) - pr_err("Buffer eviction failed\n"); - ttm_resource_free(bo, &evict_mem); - goto out; - } - /* try and move to final place now. */ - goto bounce; + } while (!ret); + + if (ret) { + ttm_resource_free(bo, &evict_mem); + if (ret != -ERESTARTSYS && ret != -EINTR) + pr_err("Buffer eviction failed\n"); } out: return ret; From d262770b95c717353d5beb3e6cc1abce98d59f30 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= Date: Fri, 11 Nov 2022 16:25:02 +0200 Subject: [PATCH 025/513] n_tty: Rename tail to old_tail in n_tty_read() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 947d66b68f3c4e7cf8f3f3500807b9d2a0de28ce ] The local tail variable in n_tty_read() is used for one purpose, it keeps the old tail. Thus, rename it appropriately to improve code readability. Signed-off-by: Ilpo Järvinen Reviewed-by: Jiri Slaby Link: https://lore.kernel.org/r/22b37499-ff9a-7fc1-f6e0-58411328d122@linux.intel.com Signed-off-by: Greg Kroah-Hartman Stable-dep-of: 4903fde8047a ("tty: fix hang on tty device with no_room set") Signed-off-by: Sasha Levin --- drivers/tty/n_tty.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 891036bd9f89..8f57448e1ce4 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -2100,7 +2100,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, ssize_t retval = 0; long timeout; bool packet; - size_t tail; + size_t old_tail; /* * Is this a continuation of a read started earler? @@ -2163,7 +2163,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, } packet = tty->ctrl.packet; - tail = ldata->read_tail; + old_tail = ldata->read_tail; add_wait_queue(&tty->read_wait, &wait); while (nr) { @@ -2252,7 +2252,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, if (time) timeout = time; } - if (tail != ldata->read_tail) + if (old_tail != ldata->read_tail) n_tty_kick_worker(tty); up_read(&tty->termios_rwsem); From c556573e4bb16bb6d152b521caa2b29ef65b88bb Mon Sep 17 00:00:00 2001 From: Hui Li Date: Thu, 6 Apr 2023 10:44:50 +0800 Subject: [PATCH 026/513] tty: fix hang on tty device with no_room set MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 4903fde8047a28299d1fc79c1a0dcc255e928f12 ] It is possible to hang pty devices in this case, the reader was blocking at epoll on master side, the writer was sleeping at wait_woken inside n_tty_write on slave side, and the write buffer on tty_port was full, we found that the reader and writer would never be woken again and blocked forever. The problem was caused by a race between reader and kworker: n_tty_read(reader): n_tty_receive_buf_common(kworker): copy_from_read_buf()| |room = N_TTY_BUF_SIZE - (ldata->read_head - tail) |room <= 0 n_tty_kick_worker() | |ldata->no_room = true After writing to slave device, writer wakes up kworker to flush data on tty_port to reader, and the kworker finds that reader has no room to store data so room <= 0 is met. At this moment, reader consumes all the data on reader buffer and calls n_tty_kick_worker to check ldata->no_room which is false and reader quits reading. Then kworker sets ldata->no_room=true and quits too. If write buffer is not full, writer will wake kworker to flush data again after following writes, but if write buffer is full and writer goes to sleep, kworker will never be woken again and tty device is blocked. This problem can be solved with a check for read buffer size inside n_tty_receive_buf_common, if read buffer is empty and ldata->no_room is true, a call to n_tty_kick_worker is necessary to keep flushing data to reader. Cc: Fixes: 42458f41d08f ("n_tty: Ensure reader restarts worker for next reader") Reviewed-by: Ilpo Järvinen Signed-off-by: Hui Li Message-ID: <1680749090-14106-1-git-send-email-caelli@tencent.com> Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/tty/n_tty.c | 25 +++++++++++++++++++++---- 1 file changed, 21 insertions(+), 4 deletions(-) diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 8f57448e1ce4..6259249b1167 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -202,8 +202,8 @@ static void n_tty_kick_worker(struct tty_struct *tty) struct n_tty_data *ldata = tty->disc_data; /* Did the input worker stop? Restart it */ - if (unlikely(ldata->no_room)) { - ldata->no_room = 0; + if (unlikely(READ_ONCE(ldata->no_room))) { + WRITE_ONCE(ldata->no_room, 0); WARN_RATELIMIT(tty->port->itty == NULL, "scheduling with invalid itty\n"); @@ -1661,7 +1661,7 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp, if (overflow && room < 0) ldata->read_head--; room = overflow; - ldata->no_room = flow && !room; + WRITE_ONCE(ldata->no_room, flow && !room); } else overflow = 0; @@ -1692,6 +1692,17 @@ n_tty_receive_buf_common(struct tty_struct *tty, const unsigned char *cp, } else n_tty_check_throttle(tty); + if (unlikely(ldata->no_room)) { + /* + * Barrier here is to ensure to read the latest read_tail in + * chars_in_buffer() and to make sure that read_tail is not loaded + * before ldata->no_room is set. + */ + smp_mb(); + if (!chars_in_buffer(tty)) + n_tty_kick_worker(tty); + } + up_read(&tty->termios_rwsem); return rcvd; @@ -2252,8 +2263,14 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file, if (time) timeout = time; } - if (old_tail != ldata->read_tail) + if (old_tail != ldata->read_tail) { + /* + * Make sure no_room is not read in n_tty_kick_worker() + * before setting ldata->read_tail in copy_from_read_buf(). + */ + smp_mb(); n_tty_kick_worker(tty); + } up_read(&tty->termios_rwsem); remove_wait_queue(&tty->read_wait, &wait); From da60170558b956c1b45dee1c4423da2425037426 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Christian=20K=C3=B6nig?= Date: Fri, 7 Jul 2023 11:25:00 +0200 Subject: [PATCH 027/513] drm/ttm: never consider pinned BOs for eviction&swap MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit a2848d08742c8e8494675892c02c0d22acbe3cf8 ] There is a small window where we have already incremented the pin count but not yet moved the bo from the lru to the pinned list. Signed-off-by: Christian König Reported-by: Pelloux-Prayer, Pierre-Eric Tested-by: Pelloux-Prayer, Pierre-Eric Acked-by: Alex Deucher Cc: stable@vger.kernel.org Link: https://patchwork.freedesktop.org/patch/msgid/20230707120826.3701-1-christian.koenig@amd.com Signed-off-by: Sasha Levin --- drivers/gpu/drm/ttm/ttm_bo.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 6080f4b5c450..4d0ef5ab2531 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -604,6 +604,12 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, { bool ret = false; + if (bo->pin_count) { + *locked = false; + *busy = false; + return false; + } + if (bo->base.resv == ctx->resv) { dma_resv_assert_held(bo->base.resv); if (ctx->allow_res_evict) From 5076cc8bc162b6c5a7f971ee7d587ef05122d49e Mon Sep 17 00:00:00 2001 From: Steve French Date: Sat, 13 Aug 2022 17:22:11 -0500 Subject: [PATCH 028/513] cifs: missing directory in MAINTAINERS file [ Upstream commit 5dd8ce24667a70bb9f7808f5eec0354bd37290c6 ] The include/uapi/linux/cifs directory (not just fs/cifs and fs/smbfs_common) should be included in cifs entry in the MAINTAINERS file. Reviewed-by: Paulo Alcantara (SUSE) Signed-off-by: Steve French Stable-dep-of: df9d70c18616 ("cifs: if deferred close is disabled then close files immediately") Signed-off-by: Sasha Levin --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 2bf1ad0fb2a6..e6b53e76651b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4666,6 +4666,7 @@ T: git git://git.samba.org/sfrench/cifs-2.6.git F: Documentation/admin-guide/cifs/ F: fs/cifs/ F: fs/smbfs_common/ +F: include/uapi/linux/cifs COMPACTPCI HOTPLUG CORE M: Scott Murray From c8117ac42303f7ae99bbe53e4952f7d147cca1fb Mon Sep 17 00:00:00 2001 From: Paulo Alcantara Date: Tue, 4 Oct 2022 18:41:20 -0300 Subject: [PATCH 029/513] cifs: use fs_context for automounts [ Upstream commit 9fd29a5bae6e8f94b410374099a6fddb253d2d5f ] Use filesystem context support to handle dfs links. Signed-off-by: Paulo Alcantara (SUSE) Signed-off-by: Steve French Stable-dep-of: df9d70c18616 ("cifs: if deferred close is disabled then close files immediately") Signed-off-by: Sasha Levin --- fs/cifs/cifs_dfs_ref.c | 100 +++++++++++++++++------------------------ 1 file changed, 40 insertions(+), 60 deletions(-) diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index b0864da9ef43..020e71fe1454 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -258,61 +258,23 @@ char *cifs_compose_mount_options(const char *sb_mountdata, goto compose_mount_options_out; } -/** - * cifs_dfs_do_mount - mounts specified path using DFS full path - * - * Always pass down @fullpath to smb3_do_mount() so we can use the root server - * to perform failover in case we failed to connect to the first target in the - * referral. - * - * @mntpt: directory entry for the path we are trying to automount - * @cifs_sb: parent/root superblock - * @fullpath: full path in UNC format - */ -static struct vfsmount *cifs_dfs_do_mount(struct dentry *mntpt, - struct cifs_sb_info *cifs_sb, - const char *fullpath) -{ - struct vfsmount *mnt; - char *mountdata; - char *devname; - - devname = kstrdup(fullpath, GFP_KERNEL); - if (!devname) - return ERR_PTR(-ENOMEM); - - convert_delimiter(devname, '/'); - - /* TODO: change to call fs_context_for_mount(), fill in context directly, call fc_mount */ - - /* See afs_mntpt_do_automount in fs/afs/mntpt.c for an example */ - - /* strip first '\' from fullpath */ - mountdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options, - fullpath + 1, NULL, NULL); - if (IS_ERR(mountdata)) { - kfree(devname); - return (struct vfsmount *)mountdata; - } - - mnt = vfs_submount(mntpt, &cifs_fs_type, devname, mountdata); - kfree(mountdata); - kfree(devname); - return mnt; -} - /* * Create a vfsmount that we can automount */ -static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) +static struct vfsmount *cifs_dfs_do_automount(struct path *path) { + int rc; + struct dentry *mntpt = path->dentry; + struct fs_context *fc; struct cifs_sb_info *cifs_sb; - void *page; + void *page = NULL; + struct smb3_fs_context *ctx, *cur_ctx; + struct smb3_fs_context tmp; char *full_path; struct vfsmount *mnt; - cifs_dbg(FYI, "in %s\n", __func__); - BUG_ON(IS_ROOT(mntpt)); + if (IS_ROOT(mntpt)) + return ERR_PTR(-ESTALE); /* * The MSDFS spec states that paths in DFS referral requests and @@ -321,29 +283,47 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) * gives us the latter, so we must adjust the result. */ cifs_sb = CIFS_SB(mntpt->d_sb); - if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) { - mnt = ERR_PTR(-EREMOTE); - goto cdda_exit; - } + if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) + return ERR_PTR(-EREMOTE); + + cur_ctx = cifs_sb->ctx; + + fc = fs_context_for_submount(path->mnt->mnt_sb->s_type, mntpt); + if (IS_ERR(fc)) + return ERR_CAST(fc); + + ctx = smb3_fc2context(fc); page = alloc_dentry_path(); /* always use tree name prefix */ full_path = build_path_from_dentry_optional_prefix(mntpt, page, true); if (IS_ERR(full_path)) { mnt = ERR_CAST(full_path); - goto free_full_path; + goto out; } - convert_delimiter(full_path, '\\'); + convert_delimiter(full_path, '/'); cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path); - mnt = cifs_dfs_do_mount(mntpt, cifs_sb, full_path); - cifs_dbg(FYI, "%s: cifs_dfs_do_mount:%s , mnt:%p\n", __func__, full_path + 1, mnt); + tmp = *cur_ctx; + tmp.source = full_path; + tmp.UNC = tmp.prepath = NULL; + + rc = smb3_fs_context_dup(ctx, &tmp); + if (rc) { + mnt = ERR_PTR(rc); + goto out; + } + + rc = smb3_parse_devname(full_path, ctx); + if (!rc) + mnt = fc_mount(fc); + else + mnt = ERR_PTR(rc); -free_full_path: +out: + put_fs_context(fc); free_dentry_path(page); -cdda_exit: - cifs_dbg(FYI, "leaving %s\n" , __func__); return mnt; } @@ -354,9 +334,9 @@ struct vfsmount *cifs_dfs_d_automount(struct path *path) { struct vfsmount *newmnt; - cifs_dbg(FYI, "in %s\n", __func__); + cifs_dbg(FYI, "%s: %pd\n", __func__, path->dentry); - newmnt = cifs_dfs_do_automount(path->dentry); + newmnt = cifs_dfs_do_automount(path); if (IS_ERR(newmnt)) { cifs_dbg(FYI, "leaving %s [automount failed]\n" , __func__); return newmnt; From c600e23fbc405e0ad24d161ca05d61a725a72d6f Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Thu, 16 Mar 2023 07:34:33 +0900 Subject: [PATCH 030/513] ksmbd: remove internal.h include [ Upstream commit 211db0ac9e3dc6c46f2dd53395b34d76af929faf ] Since vfs_path_lookup is exported, It should not be internal. Move vfs_path_lookup prototype in internal.h to linux/namei.h. Suggested-by: Al Viro Reviewed-by: Christian Brauner Signed-off-by: Namjae Jeon Signed-off-by: Al Viro Stable-dep-of: df9d70c18616 ("cifs: if deferred close is disabled then close files immediately") Signed-off-by: Sasha Levin --- fs/internal.h | 2 -- fs/ksmbd/vfs.c | 2 -- include/linux/namei.h | 2 ++ 3 files changed, 2 insertions(+), 4 deletions(-) diff --git a/fs/internal.h b/fs/internal.h index ceb154583a3c..1ff8cfc94467 100644 --- a/fs/internal.h +++ b/fs/internal.h @@ -58,8 +58,6 @@ extern int finish_clean_context(struct fs_context *fc); */ extern int filename_lookup(int dfd, struct filename *name, unsigned flags, struct path *path, struct path *root); -extern int vfs_path_lookup(struct dentry *, struct vfsmount *, - const char *, unsigned int, struct path *); int do_rmdir(int dfd, struct filename *name); int do_unlinkat(int dfd, struct filename *name); int may_linkat(struct user_namespace *mnt_userns, struct path *link); diff --git a/fs/ksmbd/vfs.c b/fs/ksmbd/vfs.c index 52cc6a9627ed..f76acd83c294 100644 --- a/fs/ksmbd/vfs.c +++ b/fs/ksmbd/vfs.c @@ -19,8 +19,6 @@ #include #include -#include "../internal.h" /* for vfs_path_lookup */ - #include "glob.h" #include "oplock.h" #include "connection.h" diff --git a/include/linux/namei.h b/include/linux/namei.h index caeb08a98536..40c693525f79 100644 --- a/include/linux/namei.h +++ b/include/linux/namei.h @@ -63,6 +63,8 @@ extern struct dentry *kern_path_create(int, const char *, struct path *, unsigne extern struct dentry *user_path_create(int, const char __user *, struct path *, unsigned int); extern void done_path_create(struct path *, struct dentry *); extern struct dentry *kern_path_locked(const char *, struct path *); +int vfs_path_lookup(struct dentry *, struct vfsmount *, const char *, + unsigned int, struct path *); extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int); extern struct dentry *lookup_one_len(const char *, struct dentry *, int); From 5cb0349cfcdebb0090add17a6345eaa5c43f4844 Mon Sep 17 00:00:00 2001 From: Bharath SM Date: Fri, 7 Jul 2023 15:29:01 +0000 Subject: [PATCH 031/513] cifs: if deferred close is disabled then close files immediately [ Upstream commit df9d70c18616760c6504b97fec66b6379c172dbb ] If defer close timeout value is set to 0, then there is no need to include files in the deferred close list and utilize the delayed worker for closing. Instead, we can close them immediately. Signed-off-by: Bharath SM Reviewed-by: Shyam Prasad N Cc: stable@vger.kernel.org Signed-off-by: Steve French Signed-off-by: Sasha Levin --- fs/cifs/file.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 4e4f73a90574..e65fbae9e804 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -880,8 +880,8 @@ int cifs_close(struct inode *inode, struct file *file) cfile = file->private_data; file->private_data = NULL; dclose = kmalloc(sizeof(struct cifs_deferred_close), GFP_KERNEL); - if ((cinode->oplock == CIFS_CACHE_RHW_FLG) && - cinode->lease_granted && + if ((cifs_sb->ctx->closetimeo && cinode->oplock == CIFS_CACHE_RHW_FLG) + && cinode->lease_granted && !test_bit(CIFS_INO_CLOSE_ON_LOCK, &cinode->flags) && dclose) { if (test_and_clear_bit(CIFS_INO_MODIFIED_ATTR, &cinode->flags)) { From bae3c43a9d256232f8a0a0126080e9d2913dba54 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Uwe=20Kleine-K=C3=B6nig?= Date: Mon, 8 Nov 2021 14:46:26 +0100 Subject: [PATCH 032/513] pwm: meson: Simplify duplicated per-channel tracking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 5f97f18feac9bd5a8163b108aee52d783114b36f ] The driver tracks per-channel data via struct pwm_device::chip_data and struct meson_pwm::channels[]. The latter holds the actual data, the former is only a pointer to the latter. So simplify by using struct meson_pwm::channels[] consistently. Signed-off-by: Uwe Kleine-König Reviewed-by: Martin Blumenstingl Signed-off-by: Thierry Reding Stable-dep-of: 87a2cbf02d77 ("pwm: meson: fix handling of period/duty if greater than UINT_MAX") Signed-off-by: Sasha Levin --- drivers/pwm/pwm-meson.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c index 76f702c43cbc..37bbad88a3ee 100644 --- a/drivers/pwm/pwm-meson.c +++ b/drivers/pwm/pwm-meson.c @@ -147,12 +147,13 @@ static int meson_pwm_request(struct pwm_chip *chip, struct pwm_device *pwm) return err; } - return pwm_set_chip_data(pwm, channel); + return 0; } static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) { - struct meson_pwm_channel *channel = pwm_get_chip_data(pwm); + struct meson_pwm *meson = to_meson_pwm(chip); + struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm]; if (channel) clk_disable_unprepare(channel->clk); @@ -161,7 +162,7 @@ static void meson_pwm_free(struct pwm_chip *chip, struct pwm_device *pwm) static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm, const struct pwm_state *state) { - struct meson_pwm_channel *channel = pwm_get_chip_data(pwm); + struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm]; unsigned int duty, period, pre_div, cnt, duty_cnt; unsigned long fin_freq; @@ -230,7 +231,7 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm, static void meson_pwm_enable(struct meson_pwm *meson, struct pwm_device *pwm) { - struct meson_pwm_channel *channel = pwm_get_chip_data(pwm); + struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm]; struct meson_pwm_channel_data *channel_data; unsigned long flags; u32 value; @@ -273,8 +274,8 @@ static void meson_pwm_disable(struct meson_pwm *meson, struct pwm_device *pwm) static int meson_pwm_apply(struct pwm_chip *chip, struct pwm_device *pwm, const struct pwm_state *state) { - struct meson_pwm_channel *channel = pwm_get_chip_data(pwm); struct meson_pwm *meson = to_meson_pwm(chip); + struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm]; int err = 0; if (!state) From 7ac170d93becbbe88cf90f95129c99f69482343c Mon Sep 17 00:00:00 2001 From: Heiner Kallweit Date: Wed, 24 May 2023 21:48:36 +0200 Subject: [PATCH 033/513] pwm: meson: fix handling of period/duty if greater than UINT_MAX MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 87a2cbf02d7701255f9fcca7e5bd864a7bb397cf ] state->period/duty are of type u64, and if their value is greater than UINT_MAX, then the cast to uint will cause problems. Fix this by changing the type of the respective local variables to u64. Fixes: b79c3670e120 ("pwm: meson: Don't duplicate the polarity internally") Cc: stable@vger.kernel.org Suggested-by: Uwe Kleine-König Reviewed-by: Martin Blumenstingl Signed-off-by: Heiner Kallweit Signed-off-by: Thierry Reding Signed-off-by: Sasha Levin --- drivers/pwm/pwm-meson.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/pwm/pwm-meson.c b/drivers/pwm/pwm-meson.c index 37bbad88a3ee..ec6a544d6f52 100644 --- a/drivers/pwm/pwm-meson.c +++ b/drivers/pwm/pwm-meson.c @@ -163,8 +163,9 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm, const struct pwm_state *state) { struct meson_pwm_channel *channel = &meson->channels[pwm->hwpwm]; - unsigned int duty, period, pre_div, cnt, duty_cnt; + unsigned int pre_div, cnt, duty_cnt; unsigned long fin_freq; + u64 duty, period; duty = state->duty_cycle; period = state->period; @@ -186,19 +187,19 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm, dev_dbg(meson->chip.dev, "fin_freq: %lu Hz\n", fin_freq); - pre_div = div64_u64(fin_freq * (u64)period, NSEC_PER_SEC * 0xffffLL); + pre_div = div64_u64(fin_freq * period, NSEC_PER_SEC * 0xffffLL); if (pre_div > MISC_CLK_DIV_MASK) { dev_err(meson->chip.dev, "unable to get period pre_div\n"); return -EINVAL; } - cnt = div64_u64(fin_freq * (u64)period, NSEC_PER_SEC * (pre_div + 1)); + cnt = div64_u64(fin_freq * period, NSEC_PER_SEC * (pre_div + 1)); if (cnt > 0xffff) { dev_err(meson->chip.dev, "unable to get period cnt\n"); return -EINVAL; } - dev_dbg(meson->chip.dev, "period=%u pre_div=%u cnt=%u\n", period, + dev_dbg(meson->chip.dev, "period=%llu pre_div=%u cnt=%u\n", period, pre_div, cnt); if (duty == period) { @@ -211,14 +212,13 @@ static int meson_pwm_calc(struct meson_pwm *meson, struct pwm_device *pwm, channel->lo = cnt; } else { /* Then check is we can have the duty with the same pre_div */ - duty_cnt = div64_u64(fin_freq * (u64)duty, - NSEC_PER_SEC * (pre_div + 1)); + duty_cnt = div64_u64(fin_freq * duty, NSEC_PER_SEC * (pre_div + 1)); if (duty_cnt > 0xffff) { dev_err(meson->chip.dev, "unable to get duty cycle\n"); return -EINVAL; } - dev_dbg(meson->chip.dev, "duty=%u pre_div=%u duty_cnt=%u\n", + dev_dbg(meson->chip.dev, "duty=%llu pre_div=%u duty_cnt=%u\n", duty, pre_div, duty_cnt); channel->pre_div = pre_div; From 3a1a229712efa8e72adaa44252c54b5e5e3f3dd7 Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Mon, 14 Nov 2022 13:47:56 +0900 Subject: [PATCH 034/513] tracing/probes: Add symstr type for dynamic events [ Upstream commit b26a124cbfa80f42bfc4e63e1d5643ca98159d66 ] Add 'symstr' type for storing the kernel symbol as a string data instead of the symbol address. This allows us to filter the events by wildcard symbol name. e.g. # echo 'e:wqfunc workqueue.workqueue_execute_start symname=$function:symstr' >> dynamic_events # cat events/eprobes/wqfunc/format name: wqfunc ID: 2110 format: field:unsigned short common_type; offset:0; size:2; signed:0; field:unsigned char common_flags; offset:2; size:1; signed:0; field:unsigned char common_preempt_count; offset:3; size:1; signed:0; field:int common_pid; offset:4; size:4; signed:1; field:__data_loc char[] symname; offset:8; size:4; signed:1; print fmt: " symname=\"%s\"", __get_str(symname) Note that there is already 'symbol' type which just change the print format (so it still stores the symbol address in the tracing ring buffer.) On the other hand, 'symstr' type stores the actual "symbol+offset/size" data as a string. Link: https://lore.kernel.org/all/166679930847.1528100.4124308529180235965.stgit@devnote3/ Signed-off-by: Masami Hiramatsu (Google) Stable-dep-of: 66bcf65d6cf0 ("tracing/probes: Fix to avoid double count of the string length on the array") Signed-off-by: Sasha Levin --- Documentation/trace/kprobetrace.rst | 8 +++-- kernel/trace/trace.c | 2 +- kernel/trace/trace_probe.c | 44 ++++++++++++++++++--------- kernel/trace/trace_probe.h | 16 +++++++--- kernel/trace/trace_probe_tmpl.h | 47 +++++++++++++++++++++++++++-- 5 files changed, 91 insertions(+), 26 deletions(-) diff --git a/Documentation/trace/kprobetrace.rst b/Documentation/trace/kprobetrace.rst index b175d88f31eb..15e4bfa2bd83 100644 --- a/Documentation/trace/kprobetrace.rst +++ b/Documentation/trace/kprobetrace.rst @@ -58,8 +58,8 @@ Synopsis of kprobe_events NAME=FETCHARG : Set NAME as the argument name of FETCHARG. FETCHARG:TYPE : Set TYPE as the type of FETCHARG. Currently, basic types (u8/u16/u32/u64/s8/s16/s32/s64), hexadecimal types - (x8/x16/x32/x64), "string", "ustring" and bitfield - are supported. + (x8/x16/x32/x64), "string", "ustring", "symbol", "symstr" + and bitfield are supported. (\*1) only for the probe on function entry (offs == 0). (\*2) only for return probe. @@ -96,6 +96,10 @@ offset, and container-size (usually 32). The syntax is:: Symbol type('symbol') is an alias of u32 or u64 type (depends on BITS_PER_LONG) which shows given pointer in "symbol+offset" style. +On the other hand, symbol-string type ('symstr') converts the given address to +"symbol+offset/symbolsize" style and stores it as a null-terminated string. +With 'symstr' type, you can filter the event with wildcard pattern of the +symbols, and you don't need to solve symbol name by yourself. For $comm, the default type is "string"; any other type is invalid. .. _user_mem_access: diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 1dda36c7e5eb..ae7005af78c3 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -5609,7 +5609,7 @@ static const char readme_msg[] = "\t +|-[u](), \\imm-value, \\\"imm-string\"\n" "\t type: s8/16/32/64, u8/16/32/64, x8/16/32/64, string, symbol,\n" "\t b@/, ustring,\n" - "\t \\[\\]\n" + "\t symstr, \\[\\]\n" #ifdef CONFIG_HIST_TRIGGERS "\t field: ;\n" "\t stype: u8/u16/u32/u64, s8/s16/s32/s64, pid_t,\n" diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index cb8f9fe5669a..90bae188d092 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -76,9 +76,11 @@ const char PRINT_TYPE_FMT_NAME(string)[] = "\\\"%s\\\""; /* Fetch type information table */ static const struct fetch_type probe_fetch_types[] = { /* Special types */ - __ASSIGN_FETCH_TYPE("string", string, string, sizeof(u32), 1, + __ASSIGN_FETCH_TYPE("string", string, string, sizeof(u32), 1, 1, "__data_loc char[]"), - __ASSIGN_FETCH_TYPE("ustring", string, string, sizeof(u32), 1, + __ASSIGN_FETCH_TYPE("ustring", string, string, sizeof(u32), 1, 1, + "__data_loc char[]"), + __ASSIGN_FETCH_TYPE("symstr", string, string, sizeof(u32), 1, 1, "__data_loc char[]"), /* Basic types */ ASSIGN_FETCH_TYPE(u8, u8, 0), @@ -658,16 +660,26 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size, ret = -EINVAL; /* Store operation */ - if (!strcmp(parg->type->name, "string") || - !strcmp(parg->type->name, "ustring")) { - if (code->op != FETCH_OP_DEREF && code->op != FETCH_OP_UDEREF && - code->op != FETCH_OP_IMM && code->op != FETCH_OP_COMM && - code->op != FETCH_OP_DATA && code->op != FETCH_OP_TP_ARG) { - trace_probe_log_err(offset + (t ? (t - arg) : 0), - BAD_STRING); - goto fail; + if (parg->type->is_string) { + if (!strcmp(parg->type->name, "symstr")) { + if (code->op != FETCH_OP_REG && code->op != FETCH_OP_STACK && + code->op != FETCH_OP_RETVAL && code->op != FETCH_OP_ARG && + code->op != FETCH_OP_DEREF && code->op != FETCH_OP_TP_ARG) { + trace_probe_log_err(offset + (t ? (t - arg) : 0), + BAD_SYMSTRING); + goto fail; + } + } else { + if (code->op != FETCH_OP_DEREF && code->op != FETCH_OP_UDEREF && + code->op != FETCH_OP_IMM && code->op != FETCH_OP_COMM && + code->op != FETCH_OP_DATA && code->op != FETCH_OP_TP_ARG) { + trace_probe_log_err(offset + (t ? (t - arg) : 0), + BAD_STRING); + goto fail; + } } - if ((code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM || + if (!strcmp(parg->type->name, "symstr") || + (code->op == FETCH_OP_IMM || code->op == FETCH_OP_COMM || code->op == FETCH_OP_DATA) || code->op == FETCH_OP_TP_ARG || parg->count) { /* @@ -675,6 +687,8 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size, * must be kept, and if parg->count != 0, this is an * array of string pointers instead of string address * itself. + * For the symstr, it doesn't need to dereference, thus + * it just get the value. */ code++; if (code->op != FETCH_OP_NOP) { @@ -686,6 +700,8 @@ static int traceprobe_parse_probe_arg_body(const char *argv, ssize_t *size, if (!strcmp(parg->type->name, "ustring") || code->op == FETCH_OP_UDEREF) code->op = FETCH_OP_ST_USTRING; + else if (!strcmp(parg->type->name, "symstr")) + code->op = FETCH_OP_ST_SYMSTR; else code->op = FETCH_OP_ST_STRING; code->size = parg->type->size; @@ -915,8 +931,7 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len, for (i = 0; i < tp->nr_args; i++) { parg = tp->args + i; if (parg->count) { - if ((strcmp(parg->type->name, "string") == 0) || - (strcmp(parg->type->name, "ustring") == 0)) + if (parg->type->is_string) fmt = ", __get_str(%s[%d])"; else fmt = ", REC->%s[%d]"; @@ -924,8 +939,7 @@ static int __set_print_fmt(struct trace_probe *tp, char *buf, int len, pos += snprintf(buf + pos, LEN_OR_ZERO, fmt, parg->name, j); } else { - if ((strcmp(parg->type->name, "string") == 0) || - (strcmp(parg->type->name, "ustring") == 0)) + if (parg->type->is_string) fmt = ", __get_str(%s)"; else fmt = ", REC->%s"; diff --git a/kernel/trace/trace_probe.h b/kernel/trace/trace_probe.h index 84d495cbd876..0f0e5005b97a 100644 --- a/kernel/trace/trace_probe.h +++ b/kernel/trace/trace_probe.h @@ -99,6 +99,7 @@ enum fetch_op { FETCH_OP_ST_UMEM, /* Mem: .offset, .size */ FETCH_OP_ST_STRING, /* String: .offset, .size */ FETCH_OP_ST_USTRING, /* User String: .offset, .size */ + FETCH_OP_ST_SYMSTR, /* Kernel Symbol String: .offset, .size */ // Stage 4 (modify) op FETCH_OP_MOD_BF, /* Bitfield: .basesize, .lshift, .rshift */ // Stage 5 (loop) op @@ -134,7 +135,8 @@ struct fetch_insn { struct fetch_type { const char *name; /* Name of type */ size_t size; /* Byte size of type */ - int is_signed; /* Signed flag */ + bool is_signed; /* Signed flag */ + bool is_string; /* String flag */ print_type_func_t print; /* Print functions */ const char *fmt; /* Format string */ const char *fmttype; /* Name in format file */ @@ -178,16 +180,19 @@ DECLARE_BASIC_PRINT_TYPE_FUNC(symbol); #define _ADDR_FETCH_TYPE(t) __ADDR_FETCH_TYPE(t) #define ADDR_FETCH_TYPE _ADDR_FETCH_TYPE(BITS_PER_LONG) -#define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \ - {.name = _name, \ +#define __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, str, _fmttype) \ + {.name = _name, \ .size = _size, \ - .is_signed = sign, \ + .is_signed = (bool)sign, \ + .is_string = (bool)str, \ .print = PRINT_TYPE_FUNC_NAME(ptype), \ .fmt = PRINT_TYPE_FMT_NAME(ptype), \ .fmttype = _fmttype, \ } + +/* Non string types can use these macros */ #define _ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, _fmttype) \ - __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, #_fmttype) + __ASSIGN_FETCH_TYPE(_name, ptype, ftype, _size, sign, 0, #_fmttype) #define ASSIGN_FETCH_TYPE(ptype, ftype, sign) \ _ASSIGN_FETCH_TYPE(#ptype, ptype, ftype, sizeof(ftype), sign, ptype) @@ -432,6 +437,7 @@ extern int traceprobe_define_arg_fields(struct trace_event_call *event_call, C(ARRAY_TOO_BIG, "Array number is too big"), \ C(BAD_TYPE, "Unknown type is specified"), \ C(BAD_STRING, "String accepts only memory argument"), \ + C(BAD_SYMSTRING, "Symbol String doesn't accept data/userdata"), \ C(BAD_BITFIELD, "Invalid bitfield"), \ C(ARG_NAME_TOO_LONG, "Argument name is too long"), \ C(NO_ARG_NAME, "Argument name is not specified"), \ diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h index c293a607d536..21799fa813ca 100644 --- a/kernel/trace/trace_probe_tmpl.h +++ b/kernel/trace/trace_probe_tmpl.h @@ -67,6 +67,37 @@ probe_mem_read(void *dest, void *src, size_t size); static nokprobe_inline int probe_mem_read_user(void *dest, void *src, size_t size); +static nokprobe_inline int +fetch_store_symstrlen(unsigned long addr) +{ + char namebuf[KSYM_SYMBOL_LEN]; + int ret; + + ret = sprint_symbol(namebuf, addr); + if (ret < 0) + return 0; + + return ret + 1; +} + +/* + * Fetch a null-terminated symbol string + offset. Caller MUST set *(u32 *)buf + * with max length and relative data location. + */ +static nokprobe_inline int +fetch_store_symstring(unsigned long addr, void *dest, void *base) +{ + int maxlen = get_loc_len(*(u32 *)dest); + void *__dest; + + if (unlikely(!maxlen)) + return -ENOMEM; + + __dest = get_loc_data(dest, base); + + return sprint_symbol(__dest, addr); +} + /* From the 2nd stage, routine is same */ static nokprobe_inline int process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val, @@ -99,16 +130,22 @@ process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val, stage3: /* 3rd stage: store value to buffer */ if (unlikely(!dest)) { - if (code->op == FETCH_OP_ST_STRING) { + switch (code->op) { + case FETCH_OP_ST_STRING: ret = fetch_store_strlen(val + code->offset); code++; goto array; - } else if (code->op == FETCH_OP_ST_USTRING) { + case FETCH_OP_ST_USTRING: ret += fetch_store_strlen_user(val + code->offset); code++; goto array; - } else + case FETCH_OP_ST_SYMSTR: + ret += fetch_store_symstrlen(val + code->offset); + code++; + goto array; + default: return -EILSEQ; + } } switch (code->op) { @@ -129,6 +166,10 @@ process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val, loc = *(u32 *)dest; ret = fetch_store_string_user(val + code->offset, dest, base); break; + case FETCH_OP_ST_SYMSTR: + loc = *(u32 *)dest; + ret = fetch_store_symstring(val + code->offset, dest, base); + break; default: return -EILSEQ; } From e7b4d24fa090cfe4dc665fc067cd4d62b8e5ec7c Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Tue, 11 Jul 2023 23:15:29 +0900 Subject: [PATCH 035/513] tracing/probes: Fix to avoid double count of the string length on the array [ Upstream commit 66bcf65d6cf0ca6540e2341e88ee7ef02dbdda08 ] If an array is specified with the ustring or symstr, the length of the strings are accumlated on both of 'ret' and 'total', which means the length is double counted. Just set the length to the 'ret' value for avoiding double counting. Link: https://lore.kernel.org/all/168908492917.123124.15076463491122036025.stgit@devnote2/ Reported-by: Dan Carpenter Closes: https://lore.kernel.org/all/8819b154-2ba1-43c3-98a2-cbde20892023@moroto.mountain/ Fixes: 88903c464321 ("tracing/probe: Add ustring type for user-space string") Cc: stable@vger.kernel.org Signed-off-by: Masami Hiramatsu (Google) Reviewed-by: Steven Rostedt (Google) Signed-off-by: Sasha Levin --- kernel/trace/trace_probe_tmpl.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h index 21799fa813ca..98ac09052fea 100644 --- a/kernel/trace/trace_probe_tmpl.h +++ b/kernel/trace/trace_probe_tmpl.h @@ -136,11 +136,11 @@ process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val, code++; goto array; case FETCH_OP_ST_USTRING: - ret += fetch_store_strlen_user(val + code->offset); + ret = fetch_store_strlen_user(val + code->offset); code++; goto array; case FETCH_OP_ST_SYMSTR: - ret += fetch_store_symstrlen(val + code->offset); + ret = fetch_store_symstrlen(val + code->offset); code++; goto array; default: From 5f52389bdd9eafb63b3a2f804e02aeb17b6a5f55 Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Tue, 17 Jan 2023 10:21:28 -0500 Subject: [PATCH 036/513] tracing: Allow synthetic events to pass around stacktraces [ Upstream commit 00cf3d672a9dd409418647e9f98784c339c3ff63 ] Allow a stacktrace from one event to be displayed by the end event of a synthetic event. This is very useful when looking for the longest latency of a sleep or something blocked on I/O. # cd /sys/kernel/tracing/ # echo 's:block_lat pid_t pid; u64 delta; unsigned long[] stack;' > dynamic_events # echo 'hist:keys=next_pid:ts=common_timestamp.usecs,st=stacktrace if prev_state == 1||prev_state == 2' > events/sched/sched_switch/trigger # echo 'hist:keys=prev_pid:delta=common_timestamp.usecs-$ts,s=$st:onmax($delta).trace(block_lat,prev_pid,$delta,$s)' >> events/sched/sched_switch/trigger The above creates a "block_lat" synthetic event that take the stacktrace of when a task schedules out in either the interruptible or uninterruptible states, and on a new per process max $delta (the time it was scheduled out), will print the process id and the stacktrace. # echo 1 > events/synthetic/block_lat/enable # cat trace # TASK-PID CPU# ||||| TIMESTAMP FUNCTION # | | | ||||| | | kworker/u16:0-767 [006] d..4. 560.645045: block_lat: pid=767 delta=66 stack=STACK: => __schedule => schedule => pipe_read => vfs_read => ksys_read => do_syscall_64 => 0x966000aa -0 [003] d..4. 561.132117: block_lat: pid=0 delta=413787 stack=STACK: => __schedule => schedule => schedule_hrtimeout_range_clock => do_sys_poll => __x64_sys_poll => do_syscall_64 => 0x966000aa <...>-153 [006] d..4. 562.068407: block_lat: pid=153 delta=54 stack=STACK: => __schedule => schedule => io_schedule => rq_qos_wait => wbt_wait => __rq_qos_throttle => blk_mq_submit_bio => submit_bio_noacct_nocheck => ext4_bio_write_page => mpage_submit_page => mpage_process_page_bufs => mpage_prepare_extent_to_map => ext4_do_writepages => ext4_writepages => do_writepages => __writeback_single_inode Link: https://lkml.kernel.org/r/20230117152236.010941267@goodmis.org Cc: Masami Hiramatsu Cc: Andrew Morton Cc: Tom Zanussi Cc: Ross Zwisler Cc: Ching-lin Yu Signed-off-by: Steven Rostedt (Google) Stable-dep-of: 797311bce5c2 ("tracing/probes: Fix to record 0-length data_loc in fetch_store_string*() if fails") Signed-off-by: Sasha Levin --- kernel/trace/trace.h | 4 ++ kernel/trace/trace_events_hist.c | 7 ++- kernel/trace/trace_events_synth.c | 80 ++++++++++++++++++++++++++++++- kernel/trace/trace_synth.h | 1 + 4 files changed, 87 insertions(+), 5 deletions(-) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 2c3d9b6ce148..33c55c8826a7 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -113,6 +113,10 @@ enum trace_type { #define MEM_FAIL(condition, fmt, ...) \ DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__) +#define HIST_STACKTRACE_DEPTH 16 +#define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long)) +#define HIST_STACKTRACE_SKIP 5 + /* * syscalls are special, and need special handling, this is why * they are not included in trace_entries.h diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 1b70fc4c703f..c32a53f08922 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -315,10 +315,6 @@ DEFINE_HIST_FIELD_FN(u8); #define for_each_hist_key_field(i, hist_data) \ for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++) -#define HIST_STACKTRACE_DEPTH 16 -#define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long)) -#define HIST_STACKTRACE_SKIP 5 - #define HITCOUNT_IDX 0 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE) @@ -3431,6 +3427,9 @@ static int check_synth_field(struct synth_event *event, && field->is_dynamic) return 0; + if (strstr(hist_field->type, "long[") && field->is_stack) + return 0; + if (strcmp(field->type, hist_field->type) != 0) { if (field->size != hist_field->size || (!field->is_string && field->is_signed != hist_field->is_signed)) diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index 08c7df42ade7..ce8fad956b38 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -165,6 +165,14 @@ static int synth_field_is_string(char *type) return false; } +static int synth_field_is_stack(char *type) +{ + if (strstr(type, "long[") != NULL) + return true; + + return false; +} + static int synth_field_string_size(char *type) { char buf[4], *end, *start; @@ -240,6 +248,8 @@ static int synth_field_size(char *type) size = sizeof(gfp_t); else if (synth_field_is_string(type)) size = synth_field_string_size(type); + else if (synth_field_is_stack(type)) + size = 0; return size; } @@ -284,6 +294,8 @@ static const char *synth_field_fmt(char *type) fmt = "%x"; else if (synth_field_is_string(type)) fmt = "%.*s"; + else if (synth_field_is_stack(type)) + fmt = "%s"; return fmt; } @@ -363,6 +375,23 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter, i == se->n_fields - 1 ? "" : " "); n_u64 += STR_VAR_LEN_MAX / sizeof(u64); } + } else if (se->fields[i]->is_stack) { + u32 offset, data_offset, len; + unsigned long *p, *end; + + offset = (u32)entry->fields[n_u64]; + data_offset = offset & 0xffff; + len = offset >> 16; + + p = (void *)entry + data_offset; + end = (void *)p + len - (sizeof(long) - 1); + + trace_seq_printf(s, "%s=STACK:\n", se->fields[i]->name); + + for (; *p && p < end; p++) + trace_seq_printf(s, "=> %pS\n", (void *)*p); + n_u64++; + } else { struct trace_print_flags __flags[] = { __def_gfpflag_names, {-1, NULL} }; @@ -439,6 +468,43 @@ static unsigned int trace_string(struct synth_trace_event *entry, return len; } +static unsigned int trace_stack(struct synth_trace_event *entry, + struct synth_event *event, + long *stack, + unsigned int data_size, + unsigned int *n_u64) +{ + unsigned int len; + u32 data_offset; + void *data_loc; + + data_offset = struct_size(entry, fields, event->n_u64); + data_offset += data_size; + + for (len = 0; len < HIST_STACKTRACE_DEPTH; len++) { + if (!stack[len]) + break; + } + + /* Include the zero'd element if it fits */ + if (len < HIST_STACKTRACE_DEPTH) + len++; + + len *= sizeof(long); + + /* Find the dynamic section to copy the stack into. */ + data_loc = (void *)entry + data_offset; + memcpy(data_loc, stack, len); + + /* Fill in the field that holds the offset/len combo */ + data_offset |= len << 16; + *(u32 *)&entry->fields[*n_u64] = data_offset; + + (*n_u64)++; + + return len; +} + static notrace void trace_event_raw_event_synth(void *__data, u64 *var_ref_vals, unsigned int *var_ref_idx) @@ -491,6 +557,12 @@ static notrace void trace_event_raw_event_synth(void *__data, event->fields[i]->is_dynamic, data_size, &n_u64); data_size += len; /* only dynamic string increments */ + } if (event->fields[i]->is_stack) { + long *stack = (long *)(long)var_ref_vals[val_idx]; + + len = trace_stack(entry, event, stack, + data_size, &n_u64); + data_size += len; } else { struct synth_field *field = event->fields[i]; u64 val = var_ref_vals[val_idx]; @@ -553,6 +625,9 @@ static int __set_synth_event_print_fmt(struct synth_event *event, event->fields[i]->is_dynamic) pos += snprintf(buf + pos, LEN_OR_ZERO, ", __get_str(%s)", event->fields[i]->name); + else if (event->fields[i]->is_stack) + pos += snprintf(buf + pos, LEN_OR_ZERO, + ", __get_stacktrace(%s)", event->fields[i]->name); else pos += snprintf(buf + pos, LEN_OR_ZERO, ", REC->%s", event->fields[i]->name); @@ -689,7 +764,8 @@ static struct synth_field *parse_synth_field(int argc, char **argv, ret = -EINVAL; goto free; } else if (size == 0) { - if (synth_field_is_string(field->type)) { + if (synth_field_is_string(field->type) || + synth_field_is_stack(field->type)) { char *type; len = sizeof("__data_loc ") + strlen(field->type) + 1; @@ -720,6 +796,8 @@ static struct synth_field *parse_synth_field(int argc, char **argv, if (synth_field_is_string(field->type)) field->is_string = true; + else if (synth_field_is_stack(field->type)) + field->is_stack = true; field->is_signed = synth_field_signed(field->type); out: diff --git a/kernel/trace/trace_synth.h b/kernel/trace/trace_synth.h index b29595fe3ac5..43f6fb6078db 100644 --- a/kernel/trace/trace_synth.h +++ b/kernel/trace/trace_synth.h @@ -18,6 +18,7 @@ struct synth_field { bool is_signed; bool is_string; bool is_dynamic; + bool is_stack; }; struct synth_event { From 30c8ba1da3737cf16a12a08fa6796034a419fd4f Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Tue, 11 Jul 2023 23:15:57 +0900 Subject: [PATCH 037/513] Revert "tracing: Add "(fault)" name injection to kernel probes" [ Upstream commit 4ed8f337dee32df71435689c19d22e4ee846e15a ] This reverts commit 2e9906f84fc7c99388bb7123ade167250d50f1c0. It was turned out that commit 2e9906f84fc7 ("tracing: Add "(fault)" name injection to kernel probes") did not work correctly and probe events still show just '(fault)' (instead of '"(fault)"'). Also, current '(fault)' is more explicit that it faulted. This also moves FAULT_STRING macro to trace.h so that synthetic event can keep using it, and uses it in trace_probe.c too. Link: https://lore.kernel.org/all/168908495772.123124.1250788051922100079.stgit@devnote2/ Link: https://lore.kernel.org/all/20230706230642.3793a593@rorschach.local.home/ Cc: stable@vger.kernel.org Cc: Andrew Morton Cc: Tom Zanussi Signed-off-by: Masami Hiramatsu (Google) Reviewed-by: Steven Rostedt (Google) Stable-dep-of: 797311bce5c2 ("tracing/probes: Fix to record 0-length data_loc in fetch_store_string*() if fails") Signed-off-by: Sasha Levin --- kernel/trace/trace.h | 2 ++ kernel/trace/trace_probe.c | 2 +- kernel/trace/trace_probe_kernel.h | 31 ++++++------------------------- 3 files changed, 9 insertions(+), 26 deletions(-) diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 33c55c8826a7..43058077a4de 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -113,6 +113,8 @@ enum trace_type { #define MEM_FAIL(condition, fmt, ...) \ DO_ONCE_LITE_IF(condition, pr_err, "ERROR: " fmt, ##__VA_ARGS__) +#define FAULT_STRING "(fault)" + #define HIST_STACKTRACE_DEPTH 16 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long)) #define HIST_STACKTRACE_SKIP 5 diff --git a/kernel/trace/trace_probe.c b/kernel/trace/trace_probe.c index 90bae188d092..0888f0644d25 100644 --- a/kernel/trace/trace_probe.c +++ b/kernel/trace/trace_probe.c @@ -64,7 +64,7 @@ int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, void *data, void *ent) int len = *(u32 *)data >> 16; if (!len) - trace_seq_puts(s, "(fault)"); + trace_seq_puts(s, FAULT_STRING); else trace_seq_printf(s, "\"%s\"", (const char *)get_loc_data(data, ent)); diff --git a/kernel/trace/trace_probe_kernel.h b/kernel/trace/trace_probe_kernel.h index 77dbd9ff9782..1d43df29a1f8 100644 --- a/kernel/trace/trace_probe_kernel.h +++ b/kernel/trace/trace_probe_kernel.h @@ -2,8 +2,6 @@ #ifndef __TRACE_PROBE_KERNEL_H_ #define __TRACE_PROBE_KERNEL_H_ -#define FAULT_STRING "(fault)" - /* * This depends on trace_probe.h, but can not include it due to * the way trace_probe_tmpl.h is used by trace_kprobe.c and trace_eprobe.c. @@ -15,16 +13,8 @@ static nokprobe_inline int kern_fetch_store_strlen_user(unsigned long addr) { const void __user *uaddr = (__force const void __user *)addr; - int ret; - ret = strnlen_user_nofault(uaddr, MAX_STRING_SIZE); - /* - * strnlen_user_nofault returns zero on fault, insert the - * FAULT_STRING when that occurs. - */ - if (ret <= 0) - return strlen(FAULT_STRING) + 1; - return ret; + return strnlen_user_nofault(uaddr, MAX_STRING_SIZE); } /* Return the length of string -- including null terminal byte */ @@ -44,18 +34,7 @@ kern_fetch_store_strlen(unsigned long addr) len++; } while (c && ret == 0 && len < MAX_STRING_SIZE); - /* For faults, return enough to hold the FAULT_STRING */ - return (ret < 0) ? strlen(FAULT_STRING) + 1 : len; -} - -static nokprobe_inline void set_data_loc(int ret, void *dest, void *__dest, void *base, int len) -{ - if (ret >= 0) { - *(u32 *)dest = make_data_loc(ret, __dest - base); - } else { - strscpy(__dest, FAULT_STRING, len); - ret = strlen(__dest) + 1; - } + return (ret < 0) ? ret : len; } /* @@ -76,7 +55,8 @@ kern_fetch_store_string_user(unsigned long addr, void *dest, void *base) __dest = get_loc_data(dest, base); ret = strncpy_from_user_nofault(__dest, uaddr, maxlen); - set_data_loc(ret, dest, __dest, base, maxlen); + if (ret >= 0) + *(u32 *)dest = make_data_loc(ret, __dest - base); return ret; } @@ -107,7 +87,8 @@ kern_fetch_store_string(unsigned long addr, void *dest, void *base) * probing. */ ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen); - set_data_loc(ret, dest, __dest, base, maxlen); + if (ret >= 0) + *(u32 *)dest = make_data_loc(ret, __dest - base); return ret; } From ace6bed424643d759767da9fc5b978ae6b143769 Mon Sep 17 00:00:00 2001 From: "Masami Hiramatsu (Google)" Date: Tue, 11 Jul 2023 23:16:07 +0900 Subject: [PATCH 038/513] tracing/probes: Fix to record 0-length data_loc in fetch_store_string*() if fails [ Upstream commit 797311bce5c2ac90b8d65e357603cfd410d36ebb ] Fix to record 0-length data to data_loc in fetch_store_string*() if it fails to get the string data. Currently those expect that the data_loc is updated by store_trace_args() if it returns the error code. However, that does not work correctly if the argument is an array of strings. In that case, store_trace_args() only clears the first entry of the array (which may have no error) and leaves other entries. So it should be cleared by fetch_store_string*() itself. Also, 'dyndata' and 'maxlen' in store_trace_args() should be updated only if it is used (ret > 0 and argument is a dynamic data.) Link: https://lore.kernel.org/all/168908496683.123124.4761206188794205601.stgit@devnote2/ Fixes: 40b53b771806 ("tracing: probeevent: Add array type support") Cc: stable@vger.kernel.org Reviewed-by: Steven Rostedt (Google) Signed-off-by: Masami Hiramatsu (Google) Signed-off-by: Sasha Levin --- kernel/trace/trace_probe_kernel.h | 13 +++++++++---- kernel/trace/trace_probe_tmpl.h | 10 +++------- kernel/trace/trace_uprobe.c | 3 ++- 3 files changed, 14 insertions(+), 12 deletions(-) diff --git a/kernel/trace/trace_probe_kernel.h b/kernel/trace/trace_probe_kernel.h index 1d43df29a1f8..2da70be83831 100644 --- a/kernel/trace/trace_probe_kernel.h +++ b/kernel/trace/trace_probe_kernel.h @@ -37,6 +37,13 @@ kern_fetch_store_strlen(unsigned long addr) return (ret < 0) ? ret : len; } +static nokprobe_inline void set_data_loc(int ret, void *dest, void *__dest, void *base) +{ + if (ret < 0) + ret = 0; + *(u32 *)dest = make_data_loc(ret, __dest - base); +} + /* * Fetch a null-terminated string from user. Caller MUST set *(u32 *)buf * with max length and relative data location. @@ -55,8 +62,7 @@ kern_fetch_store_string_user(unsigned long addr, void *dest, void *base) __dest = get_loc_data(dest, base); ret = strncpy_from_user_nofault(__dest, uaddr, maxlen); - if (ret >= 0) - *(u32 *)dest = make_data_loc(ret, __dest - base); + set_data_loc(ret, dest, __dest, base); return ret; } @@ -87,8 +93,7 @@ kern_fetch_store_string(unsigned long addr, void *dest, void *base) * probing. */ ret = strncpy_from_kernel_nofault(__dest, (void *)addr, maxlen); - if (ret >= 0) - *(u32 *)dest = make_data_loc(ret, __dest - base); + set_data_loc(ret, dest, __dest, base); return ret; } diff --git a/kernel/trace/trace_probe_tmpl.h b/kernel/trace/trace_probe_tmpl.h index 98ac09052fea..3e2f5a43b974 100644 --- a/kernel/trace/trace_probe_tmpl.h +++ b/kernel/trace/trace_probe_tmpl.h @@ -247,13 +247,9 @@ store_trace_args(void *data, struct trace_probe *tp, void *rec, if (unlikely(arg->dynamic)) *dl = make_data_loc(maxlen, dyndata - base); ret = process_fetch_insn(arg->code, rec, dl, base); - if (arg->dynamic) { - if (unlikely(ret < 0)) { - *dl = make_data_loc(0, dyndata - base); - } else { - dyndata += ret; - maxlen -= ret; - } + if (arg->dynamic && likely(ret > 0)) { + dyndata += ret; + maxlen -= ret; } } } diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c index 78ec1c16ccf4..debc65101548 100644 --- a/kernel/trace/trace_uprobe.c +++ b/kernel/trace/trace_uprobe.c @@ -168,7 +168,8 @@ fetch_store_string(unsigned long addr, void *dest, void *base) */ ret++; *(u32 *)dest = make_data_loc(ret, (void *)dst - base); - } + } else + *(u32 *)dest = make_data_loc(0, (void *)dst - base); return ret; } From 2e18fd3f61bea53d4e47f28de5e5f8b5287b0bcc Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Tue, 13 Sep 2022 10:37:21 +0800 Subject: [PATCH 039/513] scsi: qla2xxx: Remove unused declarations for qla2xxx [ Upstream commit 1b80addaae099dc33e683d971aba90eeeaf887a3 ] qla2x00_get_fw_version_str() has been removed since commit abbd8870b9cb ("[SCSI] qla2xxx: Factor-out ISP specific functions to method-based call tables."). qla2x00_release_nvram_protection() has been removed since commit 459c537807bd ("[SCSI] qla2xxx: Add ISP24xx flash-manipulation routines."). qla82xx_rdmem() and qla82xx_wrmem() have been removed since commit 3711333dfbee ("[SCSI] qla2xxx: Updates for ISP82xx."). qla25xx_rd_req_reg(), qla24xx_rd_req_reg(), qla25xx_wrt_rsp_reg(), qla24xx_wrt_rsp_reg(), qla25xx_wrt_req_reg() and qla24xx_wrt_req_reg() have been removed since commit 08029990b25b ("[SCSI] qla2xxx: Refactor request/response-queue register handling."). qla2x00_async_login_done() has been removed since commit 726b85487067 ("qla2xxx: Add framework for async fabric discovery"). qlt_24xx_process_response_error() has been removed since commit c5419e2618b9 ("scsi: qla2xxx: Combine Active command arrays."). Remove the declarations for them from header file. Link: https://lore.kernel.org/r/20220913023722.547249-2-cuigaosheng1@huawei.com Signed-off-by: Gaosheng Cui Signed-off-by: Martin K. Petersen Stable-dep-of: 6a87679626b5 ("scsi: qla2xxx: Fix task management cmd fail due to unavailable resource") Signed-off-by: Sasha Levin --- drivers/scsi/qla2xxx/qla_gbl.h | 12 ------------ drivers/scsi/qla2xxx/qla_target.h | 2 -- 2 files changed, 14 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index f82e4a348330..4ed0777cd2a6 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -70,8 +70,6 @@ extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *); extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *, uint16_t *); extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t); -extern void qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *, - uint16_t *); struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *, enum qla_work_type); extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *); @@ -278,7 +276,6 @@ extern int qla24xx_vport_create_req_sanity_check(struct fc_vport *); extern scsi_qla_host_t *qla24xx_create_vhost(struct fc_vport *); extern void qla2x00_sp_free_dma(srb_t *sp); -extern char *qla2x00_get_fw_version_str(struct scsi_qla_host *, char *); extern void qla2x00_mark_device_lost(scsi_qla_host_t *, fc_port_t *, int); extern void qla2x00_mark_all_devices_lost(scsi_qla_host_t *); @@ -611,7 +608,6 @@ void __qla_consume_iocb(struct scsi_qla_host *vha, void **pkt, struct rsp_que ** /* * Global Function Prototypes in qla_sup.c source file. */ -extern void qla2x00_release_nvram_protection(scsi_qla_host_t *); extern int qla24xx_read_flash_data(scsi_qla_host_t *, uint32_t *, uint32_t, uint32_t); extern uint8_t *qla2x00_read_nvram_data(scsi_qla_host_t *, void *, uint32_t, @@ -781,12 +777,6 @@ extern void qla2x00_init_response_q_entries(struct rsp_que *); extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *); extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *); extern int qla25xx_delete_queues(struct scsi_qla_host *); -extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t); -extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t); -extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); -extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t); -extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); -extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t); /* qlafx00 related functions */ extern int qlafx00_pci_config(struct scsi_qla_host *); @@ -871,8 +861,6 @@ extern void qla82xx_init_flags(struct qla_hw_data *); extern void qla82xx_set_drv_active(scsi_qla_host_t *); extern int qla82xx_wr_32(struct qla_hw_data *, ulong, u32); extern int qla82xx_rd_32(struct qla_hw_data *, ulong); -extern int qla82xx_rdmem(struct qla_hw_data *, u64, void *, int); -extern int qla82xx_wrmem(struct qla_hw_data *, u64, void *, int); /* ISP 8021 IDC */ extern void qla82xx_clear_drv_active(struct qla_hw_data *); diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h index 156b950ca7e7..aa8343444837 100644 --- a/drivers/scsi/qla2xxx/qla_target.h +++ b/drivers/scsi/qla2xxx/qla_target.h @@ -1080,8 +1080,6 @@ extern void qlt_81xx_config_nvram_stage2(struct scsi_qla_host *, struct init_cb_81xx *); extern void qlt_81xx_config_nvram_stage1(struct scsi_qla_host *, struct nvram_81xx *); -extern int qlt_24xx_process_response_error(struct scsi_qla_host *, - struct sts_entry_24xx *); extern void qlt_modify_vp_config(struct scsi_qla_host *, struct vp_config_entry_24xx *); extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *); From 25cea82ea25db447c1868e8fc71947df8021e59e Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 28 Apr 2023 00:53:33 -0700 Subject: [PATCH 040/513] scsi: qla2xxx: Multi-que support for TMF [ Upstream commit d90171dd0da50212f5950cc708240831e82f2f91 ] Add queue flush for task management command, before placing it on the wire. Do IO flush for all Request Q's. Reported-by: kernel test robot Link: https://lore.kernel.org/oe-kbuild-all/202304271702.GpIL391S-lkp@intel.com/ Cc: stable@vger.kernel.org Signed-off-by: Quinn Tran Signed-off-by: Nilesh Javali Link: https://lore.kernel.org/r/20230428075339.32551-2-njavali@marvell.com Reviewed-by: Himanshu Madhani > Signed-off-by: Martin K. Petersen Stable-dep-of: 6a87679626b5 ("scsi: qla2xxx: Fix task management cmd fail due to unavailable resource") Signed-off-by: Sasha Levin --- drivers/scsi/qla2xxx/qla_def.h | 8 ++++ drivers/scsi/qla2xxx/qla_gbl.h | 2 +- drivers/scsi/qla2xxx/qla_init.c | 69 ++++++++++++++++++++++++++------- drivers/scsi/qla2xxx/qla_iocb.c | 5 ++- 4 files changed, 66 insertions(+), 18 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index e674d3fb5926..d4b8d9dd8106 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -452,6 +452,14 @@ static inline be_id_t port_id_to_be_id(port_id_t port_id) return res; } +struct tmf_arg { + struct qla_qpair *qpair; + struct fc_port *fcport; + struct scsi_qla_host *vha; + u64 lun; + u32 flags; +}; + struct els_logo_payload { uint8_t opcode; uint8_t rsvd[3]; diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h index 4ed0777cd2a6..9e467262c0f1 100644 --- a/drivers/scsi/qla2xxx/qla_gbl.h +++ b/drivers/scsi/qla2xxx/qla_gbl.h @@ -69,7 +69,7 @@ extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *); extern int qla2x00_async_prlo(struct scsi_qla_host *, fc_port_t *); extern int qla2x00_async_adisc(struct scsi_qla_host *, fc_port_t *, uint16_t *); -extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint32_t, uint32_t); +extern int qla2x00_async_tm_cmd(fc_port_t *, uint32_t, uint64_t, uint32_t); struct qla_work_evt *qla2x00_alloc_work(struct scsi_qla_host *, enum qla_work_type); extern int qla24xx_async_gnl(struct scsi_qla_host *, fc_port_t *); diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 37cb469dc925..f64c238e3a45 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2022,17 +2022,19 @@ static void qla2x00_tmf_sp_done(srb_t *sp, int res) complete(&tmf->u.tmf.comp); } -int -qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, - uint32_t tag) +static int +__qla2x00_async_tm_cmd(struct tmf_arg *arg) { - struct scsi_qla_host *vha = fcport->vha; + struct scsi_qla_host *vha = arg->vha; struct srb_iocb *tm_iocb; srb_t *sp; + unsigned long flags; int rval = QLA_FUNCTION_FAILED; + fc_port_t *fcport = arg->fcport; + /* ref: INIT */ - sp = qla2x00_get_sp(vha, fcport, GFP_KERNEL); + sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL); if (!sp) goto done; @@ -2045,15 +2047,15 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, tm_iocb = &sp->u.iocb_cmd; init_completion(&tm_iocb->u.tmf.comp); - tm_iocb->u.tmf.flags = flags; - tm_iocb->u.tmf.lun = lun; + tm_iocb->u.tmf.flags = arg->flags; + tm_iocb->u.tmf.lun = arg->lun; + rval = qla2x00_start_sp(sp); ql_dbg(ql_dbg_taskm, vha, 0x802f, - "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x.\n", + "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x ctrl=%x.\n", sp->handle, fcport->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa); + fcport->d_id.b.area, fcport->d_id.b.al_pa, arg->flags); - rval = qla2x00_start_sp(sp); if (rval != QLA_SUCCESS) goto done_free_sp; wait_for_completion(&tm_iocb->u.tmf.comp); @@ -2067,12 +2069,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { flags = tm_iocb->u.tmf.flags; - lun = (uint16_t)tm_iocb->u.tmf.lun; + if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET| + TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) + flags = MK_SYNC_ID_LUN; + else + flags = MK_SYNC_ID; - /* Issue Marker IOCB */ - qla2x00_marker(vha, vha->hw->base_qpair, - fcport->loop_id, lun, - flags == TCF_LUN_RESET ? MK_SYNC_ID_LUN : MK_SYNC_ID); + qla2x00_marker(vha, sp->qpair, + sp->fcport->loop_id, arg->lun, flags); } done_free_sp: @@ -2082,6 +2086,41 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint32_t lun, return rval; } +int +qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun, + uint32_t tag) +{ + struct scsi_qla_host *vha = fcport->vha; + struct qla_qpair *qpair; + struct tmf_arg a; + struct completion comp; + int i, rval; + + init_completion(&comp); + a.vha = fcport->vha; + a.fcport = fcport; + a.lun = lun; + + if (vha->hw->mqenable) { + for (i = 0; i < vha->hw->num_qpairs; i++) { + qpair = vha->hw->queue_pair_map[i]; + if (!qpair) + continue; + a.qpair = qpair; + a.flags = flags|TCF_NOTMCMD_TO_TARGET; + rval = __qla2x00_async_tm_cmd(&a); + if (rval) + break; + } + } + + a.qpair = vha->hw->base_qpair; + a.flags = flags; + rval = __qla2x00_async_tm_cmd(&a); + + return rval; +} + int qla24xx_async_abort_command(srb_t *sp) { diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 7a4298388ef1..ac82392c22ee 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -2542,7 +2542,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) scsi_qla_host_t *vha = fcport->vha; struct qla_hw_data *ha = vha->hw; struct srb_iocb *iocb = &sp->u.iocb_cmd; - struct req_que *req = vha->req; + struct req_que *req = sp->qpair->req; flags = iocb->u.tmf.flags; lun = iocb->u.tmf.lun; @@ -2558,7 +2558,8 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk) tsk->port_id[2] = fcport->d_id.b.domain; tsk->vp_index = fcport->vha->vp_idx; - if (flags == TCF_LUN_RESET) { + if (flags & (TCF_LUN_RESET | TCF_ABORT_TASK_SET| + TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) { int_to_scsilun(lun, &tsk->lun); host_to_fcp_swap((uint8_t *)&tsk->lun, sizeof(tsk->lun)); From 01366f0b656aedb7f9265abf5b3974480c571fb7 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 28 Apr 2023 00:53:34 -0700 Subject: [PATCH 041/513] scsi: qla2xxx: Fix task management cmd failure [ Upstream commit 9803fb5d27597ea98f2e05b0b6cfc48ae808458e ] Task management cmd failed with status 30h which means FW is not able to finish processing one task management before another task management for the same lun. Hence add wait for completion of marker to space it out. Reported-by: kernel test robot Link: https://lore.kernel.org/oe-kbuild-all/202304271802.uCZfwQC1-lkp@intel.com/ Cc: stable@vger.kernel.org Signed-off-by: Quinn Tran Signed-off-by: Nilesh Javali Link: https://lore.kernel.org/r/20230428075339.32551-3-njavali@marvell.com Reviewed-by: Himanshu Madhani > Signed-off-by: Martin K. Petersen Stable-dep-of: 6a87679626b5 ("scsi: qla2xxx: Fix task management cmd fail due to unavailable resource") Signed-off-by: Sasha Levin --- drivers/scsi/qla2xxx/qla_def.h | 6 ++ drivers/scsi/qla2xxx/qla_init.c | 102 +++++++++++++++++++++++++++----- drivers/scsi/qla2xxx/qla_iocb.c | 28 +++++++-- drivers/scsi/qla2xxx/qla_isr.c | 26 +++++++- 4 files changed, 139 insertions(+), 23 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index d4b8d9dd8106..ca63aa8f33b5 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -458,6 +458,7 @@ struct tmf_arg { struct scsi_qla_host *vha; u64 lun; u32 flags; + uint8_t modifier; }; struct els_logo_payload { @@ -539,6 +540,10 @@ struct srb_iocb { uint32_t data; struct completion comp; __le16 comp_status; + + uint8_t modifier; + uint8_t vp_index; + uint16_t loop_id; } tmf; struct { #define SRB_FXDISC_REQ_DMA_VALID BIT_0 @@ -642,6 +647,7 @@ struct srb_iocb { #define SRB_SA_UPDATE 25 #define SRB_ELS_CMD_HST_NOLOGIN 26 #define SRB_SA_REPLACE 27 +#define SRB_MARKER 28 struct qla_els_pt_arg { u8 els_opcode; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index f64c238e3a45..9d9b16f7f34a 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2015,6 +2015,80 @@ qla2x00_tmf_iocb_timeout(void *data) } } +static void qla_marker_sp_done(srb_t *sp, int res) +{ + struct srb_iocb *tmf = &sp->u.iocb_cmd; + + if (res != QLA_SUCCESS) + ql_dbg(ql_dbg_taskm, sp->vha, 0x8004, + "Async-marker fail hdl=%x portid=%06x ctrl=%x lun=%lld qp=%d.\n", + sp->handle, sp->fcport->d_id.b24, sp->u.iocb_cmd.u.tmf.flags, + sp->u.iocb_cmd.u.tmf.lun, sp->qpair->id); + + complete(&tmf->u.tmf.comp); +} + +#define START_SP_W_RETRIES(_sp, _rval) \ +{\ + int cnt = 5; \ + do { \ + _rval = qla2x00_start_sp(_sp); \ + if (_rval == EAGAIN) \ + msleep(1); \ + else \ + break; \ + cnt--; \ + } while (cnt); \ +} + +static int +qla26xx_marker(struct tmf_arg *arg) +{ + struct scsi_qla_host *vha = arg->vha; + struct srb_iocb *tm_iocb; + srb_t *sp; + int rval = QLA_FUNCTION_FAILED; + fc_port_t *fcport = arg->fcport; + + /* ref: INIT */ + sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL); + if (!sp) + goto done; + + sp->type = SRB_MARKER; + sp->name = "marker"; + qla2x00_init_async_sp(sp, qla2x00_get_async_timeout(vha), qla_marker_sp_done); + sp->u.iocb_cmd.timeout = qla2x00_tmf_iocb_timeout; + + tm_iocb = &sp->u.iocb_cmd; + init_completion(&tm_iocb->u.tmf.comp); + tm_iocb->u.tmf.modifier = arg->modifier; + tm_iocb->u.tmf.lun = arg->lun; + tm_iocb->u.tmf.loop_id = fcport->loop_id; + tm_iocb->u.tmf.vp_index = vha->vp_idx; + + START_SP_W_RETRIES(sp, rval); + + ql_dbg(ql_dbg_taskm, vha, 0x8006, + "Async-marker hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n", + sp->handle, fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, sp->qpair->id, rval); + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8031, + "Marker IOCB failed (%x).\n", rval); + goto done_free_sp; + } + + wait_for_completion(&tm_iocb->u.tmf.comp); + +done_free_sp: + /* ref: INIT */ + kref_put(&sp->cmd_kref, qla2x00_sp_release); +done: + return rval; +} + static void qla2x00_tmf_sp_done(srb_t *sp, int res) { struct srb_iocb *tmf = &sp->u.iocb_cmd; @@ -2028,7 +2102,6 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg) struct scsi_qla_host *vha = arg->vha; struct srb_iocb *tm_iocb; srb_t *sp; - unsigned long flags; int rval = QLA_FUNCTION_FAILED; fc_port_t *fcport = arg->fcport; @@ -2050,11 +2123,12 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg) tm_iocb->u.tmf.flags = arg->flags; tm_iocb->u.tmf.lun = arg->lun; - rval = qla2x00_start_sp(sp); + START_SP_W_RETRIES(sp, rval); + ql_dbg(ql_dbg_taskm, vha, 0x802f, - "Async-tmf hdl=%x loop-id=%x portid=%02x%02x%02x ctrl=%x.\n", - sp->handle, fcport->loop_id, fcport->d_id.b.domain, - fcport->d_id.b.area, fcport->d_id.b.al_pa, arg->flags); + "Async-tmf hdl=%x loop-id=%x portid=%06x ctrl=%x lun=%lld qp=%d rval=%x.\n", + sp->handle, fcport->loop_id, fcport->d_id.b24, + arg->flags, arg->lun, sp->qpair->id, rval); if (rval != QLA_SUCCESS) goto done_free_sp; @@ -2067,17 +2141,8 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg) "TM IOCB failed (%x).\n", rval); } - if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) { - flags = tm_iocb->u.tmf.flags; - if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET| - TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) - flags = MK_SYNC_ID_LUN; - else - flags = MK_SYNC_ID; - - qla2x00_marker(vha, sp->qpair, - sp->fcport->loop_id, arg->lun, flags); - } + if (!test_bit(UNLOADING, &vha->dpc_flags) && !IS_QLAFX00(vha->hw)) + rval = qla26xx_marker(arg); done_free_sp: /* ref: INIT */ @@ -2101,6 +2166,11 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun, a.fcport = fcport; a.lun = lun; + if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) + a.modifier = MK_SYNC_ID_LUN; + else + a.modifier = MK_SYNC_ID; + if (vha->hw->mqenable) { for (i = 0; i < vha->hw->num_qpairs; i++) { qpair = vha->hw->queue_pair_map[i]; diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index ac82392c22ee..c9a686f06d29 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -522,21 +522,25 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct qla_qpair *qpair, return (QLA_FUNCTION_FAILED); } + mrk24 = (struct mrk_entry_24xx *)mrk; + mrk->entry_type = MARKER_TYPE; mrk->modifier = type; if (type != MK_SYNC_ALL) { if (IS_FWI2_CAPABLE(ha)) { - mrk24 = (struct mrk_entry_24xx *) mrk; mrk24->nport_handle = cpu_to_le16(loop_id); int_to_scsilun(lun, (struct scsi_lun *)&mrk24->lun); host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun)); mrk24->vp_index = vha->vp_idx; - mrk24->handle = make_handle(req->id, mrk24->handle); } else { SET_TARGET_ID(ha, mrk->target, loop_id); mrk->lun = cpu_to_le16((uint16_t)lun); } } + + if (IS_FWI2_CAPABLE(ha)) + mrk24->handle = QLA_SKIP_HANDLE; + wmb(); qla2x00_start_iocbs(vha, req); @@ -3860,9 +3864,9 @@ int qla_get_iocbs_resource(struct srb *sp) case SRB_NACK_LOGO: case SRB_LOGOUT_CMD: case SRB_CTRL_VP: - push_it_through = true; - fallthrough; + case SRB_MARKER: default: + push_it_through = true; get_exch = false; } @@ -3878,6 +3882,19 @@ int qla_get_iocbs_resource(struct srb *sp) return qla_get_fw_resources(sp->qpair, &sp->iores); } +static void +qla_marker_iocb(srb_t *sp, struct mrk_entry_24xx *mrk) +{ + mrk->entry_type = MARKER_TYPE; + mrk->modifier = sp->u.iocb_cmd.u.tmf.modifier; + if (sp->u.iocb_cmd.u.tmf.modifier != MK_SYNC_ALL) { + mrk->nport_handle = cpu_to_le16(sp->u.iocb_cmd.u.tmf.loop_id); + int_to_scsilun(sp->u.iocb_cmd.u.tmf.lun, (struct scsi_lun *)&mrk->lun); + host_to_fcp_swap(mrk->lun, sizeof(mrk->lun)); + mrk->vp_index = sp->u.iocb_cmd.u.tmf.vp_index; + } +} + int qla2x00_start_sp(srb_t *sp) { @@ -3981,6 +3998,9 @@ qla2x00_start_sp(srb_t *sp) case SRB_SA_REPLACE: qla24xx_sa_replace_iocb(sp, pkt); break; + case SRB_MARKER: + qla_marker_iocb(sp, pkt); + break; default: break; } diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c index 665959938e5e..08d43f43995e 100644 --- a/drivers/scsi/qla2xxx/qla_isr.c +++ b/drivers/scsi/qla2xxx/qla_isr.c @@ -3737,6 +3737,28 @@ static int qla_chk_cont_iocb_avail(struct scsi_qla_host *vha, return rc; } +static void qla_marker_iocb_entry(scsi_qla_host_t *vha, struct req_que *req, + struct mrk_entry_24xx *pkt) +{ + const char func[] = "MRK-IOCB"; + srb_t *sp; + int res = QLA_SUCCESS; + + if (!IS_FWI2_CAPABLE(vha->hw)) + return; + + sp = qla2x00_get_sp_from_handle(vha, func, req, pkt); + if (!sp) + return; + + if (pkt->entry_status) { + ql_dbg(ql_dbg_taskm, vha, 0x8025, "marker failure.\n"); + res = QLA_COMMAND_ERROR; + } + sp->u.iocb_cmd.u.tmf.data = res; + sp->done(sp, res); +} + /** * qla24xx_process_response_queue() - Process response queue entries. * @vha: SCSI driver HA context @@ -3858,9 +3880,7 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha, (struct nack_to_isp *)pkt); break; case MARKER_TYPE: - /* Do nothing in this case, this check is to prevent it - * from falling into default case - */ + qla_marker_iocb_entry(vha, rsp->req, (struct mrk_entry_24xx *)pkt); break; case ABORT_IOCB_TYPE: qla24xx_abort_iocb_entry(vha, rsp->req, From f90d44e5bbbee58167ad2d6e0ca4f905aee43ee3 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 28 Apr 2023 00:53:35 -0700 Subject: [PATCH 042/513] scsi: qla2xxx: Fix task management cmd fail due to unavailable resource [ Upstream commit 6a87679626b51b53fbb6be417ad8eb083030b617 ] Task management command failed with status 2Ch which is a result of too many task management commands sent to the same target. Hence limit task management commands to 8 per target. Reported-by: kernel test robot Link: https://lore.kernel.org/oe-kbuild-all/202304271952.NKNmoFzv-lkp@intel.com/ Cc: stable@vger.kernel.org Signed-off-by: Quinn Tran Signed-off-by: Nilesh Javali Link: https://lore.kernel.org/r/20230428075339.32551-4-njavali@marvell.com Reviewed-by: Himanshu Madhani Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/qla2xxx/qla_def.h | 3 ++ drivers/scsi/qla2xxx/qla_init.c | 63 ++++++++++++++++++++++++++++++--- 2 files changed, 61 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index ca63aa8f33b5..bbeb116c16cc 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -2523,6 +2523,7 @@ enum rscn_addr_format { typedef struct fc_port { struct list_head list; struct scsi_qla_host *vha; + struct list_head tmf_pending; unsigned int conf_compl_supported:1; unsigned int deleted:2; @@ -2543,6 +2544,8 @@ typedef struct fc_port { unsigned int do_prli_nvme:1; uint8_t nvme_flag; + uint8_t active_tmf; +#define MAX_ACTIVE_TMF 8 uint8_t node_name[WWN_SIZE]; uint8_t port_name[WWN_SIZE]; diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 9d9b16f7f34a..2c42ecb2a64a 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -2151,6 +2151,54 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg) return rval; } +static void qla_put_tmf(fc_port_t *fcport) +{ + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + fcport->active_tmf--; + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); +} + +static +int qla_get_tmf(fc_port_t *fcport) +{ + struct scsi_qla_host *vha = fcport->vha; + struct qla_hw_data *ha = vha->hw; + unsigned long flags; + int rc = 0; + LIST_HEAD(tmf_elem); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + list_add_tail(&tmf_elem, &fcport->tmf_pending); + + while (fcport->active_tmf >= MAX_ACTIVE_TMF) { + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + msleep(1); + + spin_lock_irqsave(&ha->tgt.sess_lock, flags); + if (fcport->deleted) { + rc = EIO; + break; + } + if (fcport->active_tmf < MAX_ACTIVE_TMF && + list_is_first(&tmf_elem, &fcport->tmf_pending)) + break; + } + + list_del(&tmf_elem); + + if (!rc) + fcport->active_tmf++; + + spin_unlock_irqrestore(&ha->tgt.sess_lock, flags); + + return rc; +} + int qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun, uint32_t tag) @@ -2158,18 +2206,19 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun, struct scsi_qla_host *vha = fcport->vha; struct qla_qpair *qpair; struct tmf_arg a; - struct completion comp; int i, rval; - init_completion(&comp); a.vha = fcport->vha; a.fcport = fcport; a.lun = lun; - - if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) + if (flags & (TCF_LUN_RESET|TCF_ABORT_TASK_SET|TCF_CLEAR_TASK_SET|TCF_CLEAR_ACA)) { a.modifier = MK_SYNC_ID_LUN; - else + + if (qla_get_tmf(fcport)) + return QLA_FUNCTION_FAILED; + } else { a.modifier = MK_SYNC_ID; + } if (vha->hw->mqenable) { for (i = 0; i < vha->hw->num_qpairs; i++) { @@ -2188,6 +2237,9 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun, a.flags = flags; rval = __qla2x00_async_tm_cmd(&a); + if (a.modifier == MK_SYNC_ID_LUN) + qla_put_tmf(fcport); + return rval; } @@ -5423,6 +5475,7 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags) INIT_WORK(&fcport->reg_work, qla_register_fcport_fn); INIT_LIST_HEAD(&fcport->gnl_entry); INIT_LIST_HEAD(&fcport->list); + INIT_LIST_HEAD(&fcport->tmf_pending); INIT_LIST_HEAD(&fcport->sess_cmd_list); spin_lock_init(&fcport->sess_cmd_lock); From 58daf4e8709d375e680320aa587604942fbaf54e Mon Sep 17 00:00:00 2001 From: Arun Easi Date: Wed, 15 Jun 2022 22:35:05 -0700 Subject: [PATCH 043/513] scsi: qla2xxx: Add debug prints in the device remove path [ Upstream commit f12d2d130efc49464ef0666789bfeb9073162743 ] Add a debug print in the devloss callback. Link: https://lore.kernel.org/r/20220616053508.27186-9-njavali@marvell.com Signed-off-by: Arun Easi Signed-off-by: Nilesh Javali Signed-off-by: Martin K. Petersen Stable-dep-of: 9ae615c5bfd3 ("scsi: qla2xxx: Fix hang in task management") Signed-off-by: Sasha Levin --- drivers/scsi/qla2xxx/qla_attr.c | 3 +++ drivers/scsi/qla2xxx/qla_def.h | 6 ++++++ 2 files changed, 9 insertions(+) diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c index de57d45ffc5c..4a5df867057b 100644 --- a/drivers/scsi/qla2xxx/qla_attr.c +++ b/drivers/scsi/qla2xxx/qla_attr.c @@ -2705,6 +2705,9 @@ qla2x00_dev_loss_tmo_callbk(struct fc_rport *rport) if (!fcport) return; + ql_dbg(ql_dbg_async, fcport->vha, 0x5101, + DBG_FCPORT_PRFMT(fcport, "dev_loss_tmo expiry, rport_state=%d", + rport->port_state)); /* * Now that the rport has been deleted, set the fcport state to diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index bbeb116c16cc..2a2bca2fb57e 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -5475,4 +5475,10 @@ struct ql_vnd_tgt_stats_resp { #define IS_SESSION_DELETED(_fcport) (_fcport->disc_state == DSC_DELETE_PEND || \ _fcport->disc_state == DSC_DELETED) +#define DBG_FCPORT_PRFMT(_fp, _fmt, _args...) \ + "%s: %8phC: " _fmt " (state=%d disc_state=%d scan_state=%d loopid=0x%x deleted=%d flags=0x%x)\n", \ + __func__, _fp->port_name, ##_args, atomic_read(&_fp->state), \ + _fp->disc_state, _fp->scan_state, _fp->loop_id, _fp->deleted, \ + _fp->flags + #endif From aa56bcff46a1f31e02e85966d98b66a5e9753d25 Mon Sep 17 00:00:00 2001 From: Quinn Tran Date: Fri, 28 Apr 2023 00:53:36 -0700 Subject: [PATCH 044/513] scsi: qla2xxx: Fix hang in task management [ Upstream commit 9ae615c5bfd37bd091772969b1153de5335ea986 ] Task management command hangs where a side band chip reset failed to nudge the TMF from it's current send path. Add additional error check to block TMF from entering during chip reset and along the TMF path to cause it to bail out, skip over abort of marker. Cc: stable@vger.kernel.org Signed-off-by: Quinn Tran Signed-off-by: Nilesh Javali Link: https://lore.kernel.org/r/20230428075339.32551-5-njavali@marvell.com Reviewed-by: Himanshu Madhani Signed-off-by: Martin K. Petersen Signed-off-by: Sasha Levin --- drivers/scsi/qla2xxx/qla_def.h | 4 +++ drivers/scsi/qla2xxx/qla_init.c | 60 +++++++++++++++++++++++++++++++-- 2 files changed, 61 insertions(+), 3 deletions(-) diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h index 2a2bca2fb57e..83228ce822af 100644 --- a/drivers/scsi/qla2xxx/qla_def.h +++ b/drivers/scsi/qla2xxx/qla_def.h @@ -5481,4 +5481,8 @@ struct ql_vnd_tgt_stats_resp { _fp->disc_state, _fp->scan_state, _fp->loop_id, _fp->deleted, \ _fp->flags +#define TMF_NOT_READY(_fcport) \ + (!_fcport || IS_SESSION_DELETED(_fcport) || atomic_read(&_fcport->state) != FCS_ONLINE || \ + !_fcport->vha->hw->flags.fw_started) + #endif diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c index 2c42ecb2a64a..a97872b6350c 100644 --- a/drivers/scsi/qla2xxx/qla_init.c +++ b/drivers/scsi/qla2xxx/qla_init.c @@ -1998,6 +1998,11 @@ qla2x00_tmf_iocb_timeout(void *data) int rc, h; unsigned long flags; + if (sp->type == SRB_MARKER) { + complete(&tmf->u.tmf.comp); + return; + } + rc = qla24xx_async_abort_cmd(sp, false); if (rc) { spin_lock_irqsave(sp->qpair->qp_lock_ptr, flags); @@ -2025,6 +2030,7 @@ static void qla_marker_sp_done(srb_t *sp, int res) sp->handle, sp->fcport->d_id.b24, sp->u.iocb_cmd.u.tmf.flags, sp->u.iocb_cmd.u.tmf.lun, sp->qpair->id); + sp->u.iocb_cmd.u.tmf.data = res; complete(&tmf->u.tmf.comp); } @@ -2041,6 +2047,11 @@ static void qla_marker_sp_done(srb_t *sp, int res) } while (cnt); \ } +/** + * qla26xx_marker: send marker IOCB and wait for the completion of it. + * @arg: pointer to argument list. + * It is assume caller will provide an fcport pointer and modifier + */ static int qla26xx_marker(struct tmf_arg *arg) { @@ -2050,6 +2061,14 @@ qla26xx_marker(struct tmf_arg *arg) int rval = QLA_FUNCTION_FAILED; fc_port_t *fcport = arg->fcport; + if (TMF_NOT_READY(arg->fcport)) { + ql_dbg(ql_dbg_taskm, vha, 0x8039, + "FC port not ready for marker loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n", + fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, arg->qpair->id); + return QLA_SUSPENDED; + } + /* ref: INIT */ sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL); if (!sp) @@ -2076,11 +2095,19 @@ qla26xx_marker(struct tmf_arg *arg) if (rval != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x8031, - "Marker IOCB failed (%x).\n", rval); + "Marker IOCB send failure (%x).\n", rval); goto done_free_sp; } wait_for_completion(&tm_iocb->u.tmf.comp); + rval = tm_iocb->u.tmf.data; + + if (rval != QLA_SUCCESS) { + ql_log(ql_log_warn, vha, 0x8019, + "Marker failed hdl=%x loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d rval %d.\n", + sp->handle, fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, sp->qpair->id, rval); + } done_free_sp: /* ref: INIT */ @@ -2093,6 +2120,8 @@ static void qla2x00_tmf_sp_done(srb_t *sp, int res) { struct srb_iocb *tmf = &sp->u.iocb_cmd; + if (res) + tmf->u.tmf.data = res; complete(&tmf->u.tmf.comp); } @@ -2106,6 +2135,14 @@ __qla2x00_async_tm_cmd(struct tmf_arg *arg) fc_port_t *fcport = arg->fcport; + if (TMF_NOT_READY(arg->fcport)) { + ql_dbg(ql_dbg_taskm, vha, 0x8032, + "FC port not ready for TM command loop-id=%x portid=%06x modifier=%x lun=%lld qp=%d.\n", + fcport->loop_id, fcport->d_id.b24, + arg->modifier, arg->lun, arg->qpair->id); + return QLA_SUSPENDED; + } + /* ref: INIT */ sp = qla2xxx_get_qpair_sp(vha, arg->qpair, fcport, GFP_KERNEL); if (!sp) @@ -2180,7 +2217,9 @@ int qla_get_tmf(fc_port_t *fcport) msleep(1); spin_lock_irqsave(&ha->tgt.sess_lock, flags); - if (fcport->deleted) { + if (TMF_NOT_READY(fcport)) { + ql_log(ql_log_warn, vha, 0x802c, + "Unable to acquire TM resource due to disruption.\n"); rc = EIO; break; } @@ -2206,7 +2245,10 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun, struct scsi_qla_host *vha = fcport->vha; struct qla_qpair *qpair; struct tmf_arg a; - int i, rval; + int i, rval = QLA_SUCCESS; + + if (TMF_NOT_READY(fcport)) + return QLA_SUSPENDED; a.vha = fcport->vha; a.fcport = fcport; @@ -2225,6 +2267,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun, qpair = vha->hw->queue_pair_map[i]; if (!qpair) continue; + + if (TMF_NOT_READY(fcport)) { + ql_log(ql_log_warn, vha, 0x8026, + "Unable to send TM due to disruption.\n"); + rval = QLA_SUSPENDED; + break; + } + a.qpair = qpair; a.flags = flags|TCF_NOTMCMD_TO_TARGET; rval = __qla2x00_async_tm_cmd(&a); @@ -2233,10 +2283,14 @@ qla2x00_async_tm_cmd(fc_port_t *fcport, uint32_t flags, uint64_t lun, } } + if (rval) + goto bailout; + a.qpair = vha->hw->base_qpair; a.flags = flags; rval = __qla2x00_async_tm_cmd(&a); +bailout: if (a.modifier == MK_SYNC_ID_LUN) qla_put_tmf(fcport); From fc399b0fdf2db754b6a50126498d91c525589eb0 Mon Sep 17 00:00:00 2001 From: Flora Cui Date: Wed, 24 Nov 2021 10:34:57 +0800 Subject: [PATCH 045/513] drm/amdgpu: fix vkms crtc settings [ Upstream commit deefd07eedb7baa25956c8365373e6a58c81565a ] otherwise adev->mode_info.crtcs[] is NULL Signed-off-by: Flora Cui Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Stable-dep-of: b42ae87a7b38 ("drm/amdgpu/vkms: relax timer deactivation by hrtimer_try_to_cancel") Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c | 42 ++++++++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h | 5 ++- 2 files changed, 30 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index 7d58bf410be0..24251cdf9507 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -16,6 +16,8 @@ #include "ivsrcid/ivsrcid_vislands30.h" #include "amdgpu_vkms.h" #include "amdgpu_display.h" +#include "atom.h" +#include "amdgpu_irq.h" /** * DOC: amdgpu_vkms @@ -41,14 +43,13 @@ static const u32 amdgpu_vkms_formats[] = { static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer) { - struct amdgpu_vkms_output *output = container_of(timer, - struct amdgpu_vkms_output, - vblank_hrtimer); - struct drm_crtc *crtc = &output->crtc; + struct amdgpu_crtc *amdgpu_crtc = container_of(timer, struct amdgpu_crtc, vblank_timer); + struct drm_crtc *crtc = &amdgpu_crtc->base; + struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); u64 ret_overrun; bool ret; - ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer, + ret_overrun = hrtimer_forward_now(&amdgpu_crtc->vblank_timer, output->period_ns); WARN_ON(ret_overrun != 1); @@ -65,22 +66,21 @@ static int amdgpu_vkms_enable_vblank(struct drm_crtc *crtc) unsigned int pipe = drm_crtc_index(crtc); struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); drm_calc_timestamping_constants(crtc, &crtc->mode); - hrtimer_init(&out->vblank_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - out->vblank_hrtimer.function = &amdgpu_vkms_vblank_simulate; out->period_ns = ktime_set(0, vblank->framedur_ns); - hrtimer_start(&out->vblank_hrtimer, out->period_ns, HRTIMER_MODE_REL); + hrtimer_start(&amdgpu_crtc->vblank_timer, out->period_ns, HRTIMER_MODE_REL); return 0; } static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc) { - struct amdgpu_vkms_output *out = drm_crtc_to_amdgpu_vkms_output(crtc); + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - hrtimer_cancel(&out->vblank_hrtimer); + hrtimer_cancel(&amdgpu_crtc->vblank_timer); } static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, @@ -92,13 +92,14 @@ static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, unsigned int pipe = crtc->index; struct amdgpu_vkms_output *output = drm_crtc_to_amdgpu_vkms_output(crtc); struct drm_vblank_crtc *vblank = &dev->vblank[pipe]; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); if (!READ_ONCE(vblank->enabled)) { *vblank_time = ktime_get(); return true; } - *vblank_time = READ_ONCE(output->vblank_hrtimer.node.expires); + *vblank_time = READ_ONCE(amdgpu_crtc->vblank_timer.node.expires); if (WARN_ON(*vblank_time == vblank->time)) return true; @@ -166,6 +167,8 @@ static const struct drm_crtc_helper_funcs amdgpu_vkms_crtc_helper_funcs = { static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, struct drm_plane *primary, struct drm_plane *cursor) { + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); int ret; ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor, @@ -177,6 +180,17 @@ static int amdgpu_vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc, drm_crtc_helper_add(crtc, &amdgpu_vkms_crtc_helper_funcs); + amdgpu_crtc->crtc_id = drm_crtc_index(crtc); + adev->mode_info.crtcs[drm_crtc_index(crtc)] = amdgpu_crtc; + + amdgpu_crtc->pll_id = ATOM_PPLL_INVALID; + amdgpu_crtc->encoder = NULL; + amdgpu_crtc->connector = NULL; + amdgpu_crtc->vsync_timer_enabled = AMDGPU_IRQ_STATE_DISABLE; + + hrtimer_init(&amdgpu_crtc->vblank_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + amdgpu_crtc->vblank_timer.function = &amdgpu_vkms_vblank_simulate; + return ret; } @@ -402,7 +416,7 @@ int amdgpu_vkms_output_init(struct drm_device *dev, { struct drm_connector *connector = &output->connector; struct drm_encoder *encoder = &output->encoder; - struct drm_crtc *crtc = &output->crtc; + struct drm_crtc *crtc = &output->crtc.base; struct drm_plane *primary, *cursor = NULL; int ret; @@ -505,8 +519,8 @@ static int amdgpu_vkms_sw_fini(void *handle) int i = 0; for (i = 0; i < adev->mode_info.num_crtc; i++) - if (adev->amdgpu_vkms_output[i].vblank_hrtimer.function) - hrtimer_cancel(&adev->amdgpu_vkms_output[i].vblank_hrtimer); + if (adev->mode_info.crtcs[i]) + hrtimer_cancel(&adev->mode_info.crtcs[i]->vblank_timer); kfree(adev->mode_info.bios_hardcoded_edid); kfree(adev->amdgpu_vkms_output); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h index 97f1b79c0724..4f8722ff37c2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.h @@ -10,15 +10,14 @@ #define YRES_MAX 16384 #define drm_crtc_to_amdgpu_vkms_output(target) \ - container_of(target, struct amdgpu_vkms_output, crtc) + container_of(target, struct amdgpu_vkms_output, crtc.base) extern const struct amdgpu_ip_block_version amdgpu_vkms_ip_block; struct amdgpu_vkms_output { - struct drm_crtc crtc; + struct amdgpu_crtc crtc; struct drm_encoder encoder; struct drm_connector connector; - struct hrtimer vblank_hrtimer; ktime_t period_ns; struct drm_pending_vblank_event *event; }; From d6f92582816ce075983363f6a8f1be59273074ac Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Thu, 6 Jul 2023 15:57:21 +0800 Subject: [PATCH 046/513] drm/amdgpu/vkms: relax timer deactivation by hrtimer_try_to_cancel MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit b42ae87a7b3878afaf4c3852ca66c025a5b996e0 ] In below thousands of screen rotation loop tests with virtual display enabled, a CPU hard lockup issue may happen, leading system to unresponsive and crash. do { xrandr --output Virtual --rotate inverted xrandr --output Virtual --rotate right xrandr --output Virtual --rotate left xrandr --output Virtual --rotate normal } while (1); NMI watchdog: Watchdog detected hard LOCKUP on cpu 1 ? hrtimer_run_softirq+0x140/0x140 ? store_vblank+0xe0/0xe0 [drm] hrtimer_cancel+0x15/0x30 amdgpu_vkms_disable_vblank+0x15/0x30 [amdgpu] drm_vblank_disable_and_save+0x185/0x1f0 [drm] drm_crtc_vblank_off+0x159/0x4c0 [drm] ? record_print_text.cold+0x11/0x11 ? wait_for_completion_timeout+0x232/0x280 ? drm_crtc_wait_one_vblank+0x40/0x40 [drm] ? bit_wait_io_timeout+0xe0/0xe0 ? wait_for_completion_interruptible+0x1d7/0x320 ? mutex_unlock+0x81/0xd0 amdgpu_vkms_crtc_atomic_disable It's caused by a stuck in lock dependency in such scenario on different CPUs. CPU1 CPU2 drm_crtc_vblank_off hrtimer_interrupt grab event_lock (irq disabled) __hrtimer_run_queues grab vbl_lock/vblank_time_block amdgpu_vkms_vblank_simulate amdgpu_vkms_disable_vblank drm_handle_vblank hrtimer_cancel grab dev->event_lock So CPU1 stucks in hrtimer_cancel as timer callback is running endless on current clock base, as that timer queue on CPU2 has no chance to finish it because of failing to hold the lock. So NMI watchdog will throw the errors after its threshold, and all later CPUs are impacted/blocked. So use hrtimer_try_to_cancel to fix this, as disable_vblank callback does not need to wait the handler to finish. And also it's not necessary to check the return value of hrtimer_try_to_cancel, because even if it's -1 which means current timer callback is running, it will be reprogrammed in hrtimer_start with calling enable_vblank to make it works. v2: only re-arm timer when vblank is enabled (Christian) and add a Fixes tag as well v3: drop warn printing (Christian) v4: drop superfluous check of blank->enabled in timer function, as it's guaranteed in drm_handle_vblank (Christian) Fixes: 84ec374bd580 ("drm/amdgpu: create amdgpu_vkms (v4)") Cc: stable@vger.kernel.org Suggested-by: Christian König Signed-off-by: Guchun Chen Reviewed-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c index 24251cdf9507..4e8274de8fc0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vkms.c @@ -54,8 +54,9 @@ static enum hrtimer_restart amdgpu_vkms_vblank_simulate(struct hrtimer *timer) WARN_ON(ret_overrun != 1); ret = drm_crtc_handle_vblank(crtc); + /* Don't queue timer again when vblank is disabled. */ if (!ret) - DRM_ERROR("amdgpu_vkms failure on handling vblank"); + return HRTIMER_NORESTART; return HRTIMER_RESTART; } @@ -80,7 +81,7 @@ static void amdgpu_vkms_disable_vblank(struct drm_crtc *crtc) { struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); - hrtimer_cancel(&amdgpu_crtc->vblank_timer); + hrtimer_try_to_cancel(&amdgpu_crtc->vblank_timer); } static bool amdgpu_vkms_get_vblank_timestamp(struct drm_crtc *crtc, From 450ef59bef9a427d85ca92c3909cfc8dff92a0af Mon Sep 17 00:00:00 2001 From: Yuan Can Date: Thu, 22 Sep 2022 11:12:28 +0000 Subject: [PATCH 047/513] phy: qcom-snps: Use dev_err_probe() to simplify code [ Upstream commit 668dc8afce43d4bc01feb3e929d6d5ffcb14f899 ] In the probe path, dev_err() can be replaced with dev_err_probe() which will check if error code is -EPROBE_DEFER and prints the error name. It also sets the defer probe reason which can be checked later through debugfs. Signed-off-by: Yuan Can Reviewed-by: Dmitry Baryshkov Reviewed-by: Andrew Halaney Link: https://lore.kernel.org/r/20220922111228.36355-8-yuancan@huawei.com Signed-off-by: Vinod Koul Stable-dep-of: 8a0eb8f9b9a0 ("phy: qcom-snps-femto-v2: properly enable ref clock") Signed-off-by: Sasha Levin --- drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c index 7e61202aa234..54846259405a 100644 --- a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c +++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c @@ -304,12 +304,9 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev) return PTR_ERR(hsphy->base); hsphy->ref_clk = devm_clk_get(dev, "ref"); - if (IS_ERR(hsphy->ref_clk)) { - ret = PTR_ERR(hsphy->ref_clk); - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get ref clk, %d\n", ret); - return ret; - } + if (IS_ERR(hsphy->ref_clk)) + return dev_err_probe(dev, PTR_ERR(hsphy->ref_clk), + "failed to get ref clk\n"); hsphy->phy_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(hsphy->phy_reset)) { @@ -322,12 +319,9 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev) hsphy->vregs[i].supply = qcom_snps_hsphy_vreg_names[i]; ret = devm_regulator_bulk_get(dev, num, hsphy->vregs); - if (ret) { - if (ret != -EPROBE_DEFER) - dev_err(dev, "failed to get regulator supplies: %d\n", - ret); - return ret; - } + if (ret) + return dev_err_probe(dev, ret, + "failed to get regulator supplies\n"); pm_runtime_set_active(dev); pm_runtime_enable(dev); From e7c0c5af517fb458de8bb1fbca66fa7a747bff8d Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Sun, 7 May 2023 16:48:18 +0200 Subject: [PATCH 048/513] phy: qcom-snps: correct struct qcom_snps_hsphy kerneldoc [ Upstream commit 2a881183dc5ab2474ef602e48fe7af34db460d95 ] Update kerneldoc of struct qcom_snps_hsphy to fix: drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c:135: warning: Function parameter or member 'update_seq_cfg' not described in 'qcom_snps_hsphy' Signed-off-by: Krzysztof Kozlowski Reviewed-by: Konrad Dybcio Link: https://lore.kernel.org/r/20230507144818.193039-1-krzysztof.kozlowski@linaro.org Signed-off-by: Vinod Koul Stable-dep-of: 8a0eb8f9b9a0 ("phy: qcom-snps-femto-v2: properly enable ref clock") Signed-off-by: Sasha Levin --- drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c index 54846259405a..136b45903c79 100644 --- a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c +++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c @@ -73,11 +73,11 @@ static const char * const qcom_snps_hsphy_vreg_names[] = { * * @cfg_ahb_clk: AHB2PHY interface clock * @ref_clk: phy reference clock - * @iface_clk: phy interface clock * @phy_reset: phy reset control * @vregs: regulator supplies bulk data * @phy_initialized: if PHY has been initialized correctly * @mode: contains the current mode the PHY is in + * @update_seq_cfg: tuning parameters for phy init */ struct qcom_snps_hsphy { struct phy *phy; From ac3fe4c2a7086e75bacc3adeff9cfd11ba98c25d Mon Sep 17 00:00:00 2001 From: Adrien Thierry Date: Thu, 29 Jun 2023 10:45:38 -0400 Subject: [PATCH 049/513] phy: qcom-snps-femto-v2: keep cfg_ahb_clk enabled during runtime suspend [ Upstream commit 45d89a344eb46db9dce851c28e14f5e3c635c251 ] In the dwc3 core, both system and runtime suspend end up calling dwc3_suspend_common(). From there, what happens for the PHYs depends on the USB mode and whether the controller is entering system or runtime suspend. HOST mode: (1) system suspend on a non-wakeup-capable controller The [1] if branch is taken. dwc3_core_exit() is called, which ends up calling phy_power_off() and phy_exit(). Those two functions decrease the PM runtime count at some point, so they will trigger the PHY runtime sleep (assuming the count is right). (2) runtime suspend / system suspend on a wakeup-capable controller The [1] branch is not taken. dwc3_suspend_common() calls phy_pm_runtime_put_sync(). Assuming the ref count is right, the PHY runtime suspend op is called. DEVICE mode: dwc3_core_exit() is called on both runtime and system sleep unless the controller is already runtime suspended. OTG mode: (1) system suspend : dwc3_core_exit() is called (2) runtime suspend : do nothing In host mode, the code seems to make a distinction between 1) runtime sleep / system sleep for wakeup-capable controller, and 2) system sleep for non-wakeup-capable controller, where phy_power_off() and phy_exit() are only called for the latter. This suggests the PHY is not supposed to be in a fully powered-off state for runtime sleep and system sleep for wakeup-capable controller. Moreover, downstream, cfg_ahb_clk only gets disabled for system suspend. The clocks are disabled by phy->set_suspend() [2] which is only called in the system sleep path through dwc3_core_exit() [3]. With that in mind, don't disable the clocks during the femto PHY runtime suspend callback. The clocks will only be disabled during system suspend for non-wakeup-capable controllers, through dwc3_core_exit(). [1] https://elixir.bootlin.com/linux/v6.4/source/drivers/usb/dwc3/core.c#L1988 [2] https://git.codelinaro.org/clo/la/kernel/msm-5.4/-/blob/LV.AU.1.2.1.r2-05300-gen3meta.0/drivers/usb/phy/phy-msm-snps-hs.c#L524 [3] https://git.codelinaro.org/clo/la/kernel/msm-5.4/-/blob/LV.AU.1.2.1.r2-05300-gen3meta.0/drivers/usb/dwc3/core.c#L1915 Signed-off-by: Adrien Thierry Link: https://lore.kernel.org/r/20230629144542.14906-2-athierry@redhat.com Signed-off-by: Vinod Koul Stable-dep-of: 8a0eb8f9b9a0 ("phy: qcom-snps-femto-v2: properly enable ref clock") Signed-off-by: Sasha Levin --- drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c | 9 --------- 1 file changed, 9 deletions(-) diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c index 136b45903c79..dfe5f0944910 100644 --- a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c +++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c @@ -122,22 +122,13 @@ static int qcom_snps_hsphy_suspend(struct qcom_snps_hsphy *hsphy) 0, USB2_AUTO_RESUME); } - clk_disable_unprepare(hsphy->cfg_ahb_clk); return 0; } static int qcom_snps_hsphy_resume(struct qcom_snps_hsphy *hsphy) { - int ret; - dev_dbg(&hsphy->phy->dev, "Resume QCOM SNPS PHY, mode\n"); - ret = clk_prepare_enable(hsphy->cfg_ahb_clk); - if (ret) { - dev_err(&hsphy->phy->dev, "failed to enable cfg ahb clock\n"); - return ret; - } - return 0; } From 3f28ec4a40029a85d22ff00af966f49c81843ae1 Mon Sep 17 00:00:00 2001 From: Adrien Thierry Date: Thu, 29 Jun 2023 10:45:39 -0400 Subject: [PATCH 050/513] phy: qcom-snps-femto-v2: properly enable ref clock [ Upstream commit 8a0eb8f9b9a002291a3934acfd913660b905249e ] The driver is not enabling the ref clock, which thus gets disabled by the clk_disable_unused() initcall. This leads to the dwc3 controller failing to initialize if probed after clk_disable_unused() is called, for instance when the driver is built as a module. To fix this, switch to the clk_bulk API to handle both cfg_ahb and ref clocks at the proper places. Note that the cfg_ahb clock is currently not used by any device tree instantiation of the PHY. Work needs to be done separately to fix this. Link: https://lore.kernel.org/linux-arm-msm/ZEqvy+khHeTkC2hf@fedora/ Fixes: 51e8114f80d0 ("phy: qcom-snps: Add SNPS USB PHY driver for QCOM based SOCs") Signed-off-by: Adrien Thierry Link: https://lore.kernel.org/r/20230629144542.14906-3-athierry@redhat.com Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c | 63 ++++++++++++++----- 1 file changed, 48 insertions(+), 15 deletions(-) diff --git a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c index dfe5f0944910..abb926456933 100644 --- a/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c +++ b/drivers/phy/qualcomm/phy-qcom-snps-femto-v2.c @@ -68,11 +68,13 @@ static const char * const qcom_snps_hsphy_vreg_names[] = { /** * struct qcom_snps_hsphy - snps hs phy attributes * + * @dev: device structure + * * @phy: generic phy * @base: iomapped memory space for snps hs phy * - * @cfg_ahb_clk: AHB2PHY interface clock - * @ref_clk: phy reference clock + * @num_clks: number of clocks + * @clks: array of clocks * @phy_reset: phy reset control * @vregs: regulator supplies bulk data * @phy_initialized: if PHY has been initialized correctly @@ -80,11 +82,13 @@ static const char * const qcom_snps_hsphy_vreg_names[] = { * @update_seq_cfg: tuning parameters for phy init */ struct qcom_snps_hsphy { + struct device *dev; + struct phy *phy; void __iomem *base; - struct clk *cfg_ahb_clk; - struct clk *ref_clk; + int num_clks; + struct clk_bulk_data *clks; struct reset_control *phy_reset; struct regulator_bulk_data vregs[SNPS_HS_NUM_VREGS]; @@ -92,6 +96,34 @@ struct qcom_snps_hsphy { enum phy_mode mode; }; +static int qcom_snps_hsphy_clk_init(struct qcom_snps_hsphy *hsphy) +{ + struct device *dev = hsphy->dev; + + hsphy->num_clks = 2; + hsphy->clks = devm_kcalloc(dev, hsphy->num_clks, sizeof(*hsphy->clks), GFP_KERNEL); + if (!hsphy->clks) + return -ENOMEM; + + /* + * TODO: Currently no device tree instantiation of the PHY is using the clock. + * This needs to be fixed in order for this code to be able to use devm_clk_bulk_get(). + */ + hsphy->clks[0].id = "cfg_ahb"; + hsphy->clks[0].clk = devm_clk_get_optional(dev, "cfg_ahb"); + if (IS_ERR(hsphy->clks[0].clk)) + return dev_err_probe(dev, PTR_ERR(hsphy->clks[0].clk), + "failed to get cfg_ahb clk\n"); + + hsphy->clks[1].id = "ref"; + hsphy->clks[1].clk = devm_clk_get(dev, "ref"); + if (IS_ERR(hsphy->clks[1].clk)) + return dev_err_probe(dev, PTR_ERR(hsphy->clks[1].clk), + "failed to get ref clk\n"); + + return 0; +} + static inline void qcom_snps_hsphy_write_mask(void __iomem *base, u32 offset, u32 mask, u32 val) { @@ -174,16 +206,16 @@ static int qcom_snps_hsphy_init(struct phy *phy) if (ret) return ret; - ret = clk_prepare_enable(hsphy->cfg_ahb_clk); + ret = clk_bulk_prepare_enable(hsphy->num_clks, hsphy->clks); if (ret) { - dev_err(&phy->dev, "failed to enable cfg ahb clock, %d\n", ret); + dev_err(&phy->dev, "failed to enable clocks, %d\n", ret); goto poweroff_phy; } ret = reset_control_assert(hsphy->phy_reset); if (ret) { dev_err(&phy->dev, "failed to assert phy_reset, %d\n", ret); - goto disable_ahb_clk; + goto disable_clks; } usleep_range(100, 150); @@ -191,7 +223,7 @@ static int qcom_snps_hsphy_init(struct phy *phy) ret = reset_control_deassert(hsphy->phy_reset); if (ret) { dev_err(&phy->dev, "failed to de-assert phy_reset, %d\n", ret); - goto disable_ahb_clk; + goto disable_clks; } qcom_snps_hsphy_write_mask(hsphy->base, USB2_PHY_USB_PHY_CFG0, @@ -237,8 +269,8 @@ static int qcom_snps_hsphy_init(struct phy *phy) return 0; -disable_ahb_clk: - clk_disable_unprepare(hsphy->cfg_ahb_clk); +disable_clks: + clk_bulk_disable_unprepare(hsphy->num_clks, hsphy->clks); poweroff_phy: regulator_bulk_disable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs); @@ -250,7 +282,7 @@ static int qcom_snps_hsphy_exit(struct phy *phy) struct qcom_snps_hsphy *hsphy = phy_get_drvdata(phy); reset_control_assert(hsphy->phy_reset); - clk_disable_unprepare(hsphy->cfg_ahb_clk); + clk_bulk_disable_unprepare(hsphy->num_clks, hsphy->clks); regulator_bulk_disable(ARRAY_SIZE(hsphy->vregs), hsphy->vregs); hsphy->phy_initialized = false; @@ -290,14 +322,15 @@ static int qcom_snps_hsphy_probe(struct platform_device *pdev) if (!hsphy) return -ENOMEM; + hsphy->dev = dev; + hsphy->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(hsphy->base)) return PTR_ERR(hsphy->base); - hsphy->ref_clk = devm_clk_get(dev, "ref"); - if (IS_ERR(hsphy->ref_clk)) - return dev_err_probe(dev, PTR_ERR(hsphy->ref_clk), - "failed to get ref clk\n"); + ret = qcom_snps_hsphy_clk_init(hsphy); + if (ret) + return dev_err_probe(dev, ret, "failed to initialize clocks\n"); hsphy->phy_reset = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(hsphy->phy_reset)) { From 6aa7cb3bb5c96a1e344a5111e164c46bf2ee8717 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Thu, 25 May 2023 14:38:09 +0100 Subject: [PATCH 051/513] soundwire: qcom: update status correctly with mask [ Upstream commit f84d41b2a083b990cbdf70f3b24b6b108b9678ad ] SoundWire device status can be incorrectly updated without proper mask, fix this by adding a mask before updating the status. Fixes: c7d49c76d1d5 ("soundwire: qcom: add support to new interrupts") Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20230525133812.30841-2-srinivas.kandagatla@linaro.org Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/soundwire/qcom.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soundwire/qcom.c b/drivers/soundwire/qcom.c index 2045bcdfce1a..e3b52d5aa411 100644 --- a/drivers/soundwire/qcom.c +++ b/drivers/soundwire/qcom.c @@ -405,7 +405,7 @@ static int qcom_swrm_get_alert_slave_dev_num(struct qcom_swrm_ctrl *ctrl) status = (val >> (dev_num * SWRM_MCP_SLV_STATUS_SZ)); if ((status & SWRM_MCP_SLV_STATUS_MASK) == SDW_SLAVE_ALERT) { - ctrl->status[dev_num] = status; + ctrl->status[dev_num] = status & SWRM_MCP_SLV_STATUS_MASK; return dev_num; } } From c9b936984d89560203dad9339987130580893554 Mon Sep 17 00:00:00 2001 From: Sakari Ailus Date: Tue, 13 Jun 2023 18:47:53 +0200 Subject: [PATCH 052/513] media: staging: atomisp: select V4L2_FWNODE [ Upstream commit bf4c985707d3168ebb7d87d15830de66949d979c ] Select V4L2_FWNODE as the driver depends on it. Reported-by: Andy Shevchenko Fixes: aa31f6514047 ("media: atomisp: allow building the driver again") Signed-off-by: Sakari Ailus Tested-by: Andy Shevchenko Reviewed-by: Hans de Goede Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/staging/media/atomisp/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/staging/media/atomisp/Kconfig b/drivers/staging/media/atomisp/Kconfig index aeed5803dfb1..0031d76356c1 100644 --- a/drivers/staging/media/atomisp/Kconfig +++ b/drivers/staging/media/atomisp/Kconfig @@ -13,6 +13,7 @@ config VIDEO_ATOMISP tristate "Intel Atom Image Signal Processor Driver" depends on VIDEO_V4L2 && INTEL_ATOMISP depends on PMIC_OPREGION + select V4L2_FWNODE select IOSF_MBI select VIDEOBUF_VMALLOC select VIDEO_V4L2_SUBDEV_API From 5a4048355725e439e42cbae4f6e27ed091ba047f Mon Sep 17 00:00:00 2001 From: Wang Ming Date: Thu, 13 Jul 2023 09:42:39 +0800 Subject: [PATCH 053/513] i40e: Fix an NULL vs IS_ERR() bug for debugfs_create_dir() [ Upstream commit 043b1f185fb0f3939b7427f634787706f45411c4 ] The debugfs_create_dir() function returns error pointers. It never returns NULL. Most incorrect error checks were fixed, but the one in i40e_dbg_init() was forgotten. Fix the remaining error check. Fixes: 02e9c290814c ("i40e: debugfs interface") Signed-off-by: Wang Ming Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/i40e/i40e_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index c057343165a5..7c5f874ef335 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -1839,7 +1839,7 @@ void i40e_dbg_pf_exit(struct i40e_pf *pf) void i40e_dbg_init(void) { i40e_dbg_root = debugfs_create_dir(i40e_driver_name, NULL); - if (!i40e_dbg_root) + if (IS_ERR(i40e_dbg_root)) pr_info("init of debugfs failed\n"); } From 1542e399a12ae572ffce413c001344c7734f2bc2 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2023 13:41:27 -0700 Subject: [PATCH 054/513] iavf: fix potential deadlock on allocation failure [ Upstream commit a2f054c10bef0b54600ec9cb776508443e941343 ] In iavf_adminq_task(), if kzalloc() fails to allocate the event.msg_buf, the function will exit without releasing the adapter->crit_lock. This is unlikely, but if it happens, the next access to that mutex will deadlock. Fix this by moving the unlock to the end of the function, and adding a new label to allow jumping to the unlock portion of the function exit flow. Fixes: fc2e6b3b132a ("iavf: Rework mutexes for better synchronisation") Signed-off-by: Jacob Keller Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/iavf/iavf_main.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index bcceb2ddfea6..1e349a90d21a 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -2546,7 +2546,7 @@ static void iavf_adminq_task(struct work_struct *work) event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) - goto out; + goto unlock; do { ret = iavf_clean_arq_element(hw, &event, &pending); @@ -2561,7 +2561,6 @@ static void iavf_adminq_task(struct work_struct *work) if (pending != 0) memset(event.msg_buf, 0, IAVF_MAX_AQ_BUF_SIZE); } while (pending); - mutex_unlock(&adapter->crit_lock); if ((adapter->flags & IAVF_FLAG_SETUP_NETDEV_FEATURES)) { if (adapter->netdev_registered || @@ -2619,6 +2618,8 @@ static void iavf_adminq_task(struct work_struct *work) freedom: kfree(event.msg_buf); +unlock: + mutex_unlock(&adapter->crit_lock); out: /* re-enable Admin queue interrupt cause */ iavf_misc_irq_enable(adapter); From 57743a86cce10ec674273d1db89c015a0ec1ec14 Mon Sep 17 00:00:00 2001 From: Jacob Keller Date: Mon, 10 Jul 2023 13:41:28 -0700 Subject: [PATCH 055/513] iavf: check for removal state before IAVF_FLAG_PF_COMMS_FAILED [ Upstream commit 91896c8acce23d33ed078cffd46a9534b1f82be5 ] In iavf_adminq_task(), if the function can't acquire the adapter->crit_lock, it checks if the driver is removing. If so, it simply exits without re-enabling the interrupt. This is done to ensure that the task stops processing as soon as possible once the driver is being removed. However, if the IAVF_FLAG_PF_COMMS_FAILED is set, the function checks this before attempting to acquire the lock. In this case, the function exits early and re-enables the interrupt. This will happen even if the driver is already removing. Avoid this, by moving the check to after the adapter->crit_lock is acquired. This way, if the driver is removing, we will not re-enable the interrupt. Fixes: fc2e6b3b132a ("iavf: Rework mutexes for better synchronisation") Signed-off-by: Jacob Keller Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/iavf/iavf_main.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c index 1e349a90d21a..a87f4f1ae684 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_main.c +++ b/drivers/net/ethernet/intel/iavf/iavf_main.c @@ -2532,9 +2532,6 @@ static void iavf_adminq_task(struct work_struct *work) u32 val, oldval; u16 pending; - if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) - goto out; - if (!mutex_trylock(&adapter->crit_lock)) { if (adapter->state == __IAVF_REMOVE) return; @@ -2543,6 +2540,9 @@ static void iavf_adminq_task(struct work_struct *work) goto out; } + if (adapter->flags & IAVF_FLAG_PF_COMMS_FAILED) + goto unlock; + event.buf_len = IAVF_MAX_AQ_BUF_SIZE; event.msg_buf = kzalloc(event.buf_len, GFP_KERNEL); if (!event.msg_buf) From 01460ac6ff95d483a6ec76c96b90a3ee330b4d53 Mon Sep 17 00:00:00 2001 From: Jiawen Wu Date: Wed, 19 Jul 2023 17:22:33 +0800 Subject: [PATCH 056/513] net: phy: marvell10g: fix 88x3310 power up [ Upstream commit c7b75bea853daeb64fc831dbf39a6bbabcc402ac ] Clear MV_V2_PORT_CTRL_PWRDOWN bit to set power up for 88x3310 PHY, it sometimes does not take effect immediately. And a read of this register causes the bit not to clear. This will cause mv3310_reset() to time out, which will fail the config initialization. So add a delay before the next access. Fixes: c9cc1c815d36 ("net: phy: marvell10g: place in powersave mode at probe") Signed-off-by: Jiawen Wu Reviewed-by: Russell King (Oracle) Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/phy/marvell10g.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/net/phy/marvell10g.c b/drivers/net/phy/marvell10g.c index df33637c5269..1caa6d943a7b 100644 --- a/drivers/net/phy/marvell10g.c +++ b/drivers/net/phy/marvell10g.c @@ -307,6 +307,13 @@ static int mv3310_power_up(struct phy_device *phydev) ret = phy_clear_bits_mmd(phydev, MDIO_MMD_VEND2, MV_V2_PORT_CTRL, MV_V2_PORT_CTRL_PWRDOWN); + /* Sometimes, the power down bit doesn't clear immediately, and + * a read of this register causes the bit not to clear. Delay + * 100us to allow the PHY to come out of power down mode before + * the next access. + */ + udelay(100); + if (phydev->drv->phy_id != MARVELL_PHY_ID_88X3310 || priv->firmware_ver < 0x00030000) return ret; From 9755714d238c622a70f1e5fbbb642b091dfadd47 Mon Sep 17 00:00:00 2001 From: Jijie Shao Date: Thu, 20 Jul 2023 10:05:09 +0800 Subject: [PATCH 057/513] net: hns3: fix wrong tc bandwidth weight data issue [ Upstream commit 116d9f732eef634abbd871f2c6f613a5b4677742 ] Currently, the weight saved by the driver is used as the query result, which may be different from the actual weight in the register. Therefore, the register value read from the firmware is used as the query result Fixes: 0e32038dc856 ("net: hns3: refactor dump tc of debugfs") Signed-off-by: Jijie Shao Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c index 9cda8b3562b8..dd8b73aebe6a 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_debugfs.c @@ -677,8 +677,7 @@ static int hclge_dbg_dump_tc(struct hclge_dev *hdev, char *buf, int len) for (i = 0; i < HNAE3_MAX_TC; i++) { sch_mode_str = ets_weight->tc_weight[i] ? "dwrr" : "sp"; pos += scnprintf(buf + pos, len - pos, "%u %4s %3u\n", - i, sch_mode_str, - hdev->tm_info.pg_info[0].tc_dwrr[i]); + i, sch_mode_str, ets_weight->tc_weight[i]); } return 0; From 96dbc68b7f86b453be6ab11fee728f2704f24e61 Mon Sep 17 00:00:00 2001 From: Jijie Shao Date: Thu, 20 Jul 2023 10:05:10 +0800 Subject: [PATCH 058/513] net: hns3: fix wrong bw weight of disabled tc issue [ Upstream commit 882481b1c55fc44861d7e2d54b4e0936b1b39f2c ] In dwrr mode, the default bandwidth weight of disabled tc is set to 0. If the bandwidth weight is 0, the mode will change to sp. Therefore, disabled tc default bandwidth weight need changed to 1, and 0 is returned when query the bandwidth weight of disabled tc. In addition, driver need stop configure bandwidth weight if tc is disabled. Fixes: 848440544b41 ("net: hns3: Add support of TX Scheduler & Shaper to HNS3 driver") Signed-off-by: Jie Wang Signed-off-by: Jijie Shao Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- .../ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c | 17 ++++++++++++++--- .../ethernet/hisilicon/hns3/hns3pf/hclge_tm.c | 3 ++- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c index 375ebf105a9a..87640a2e1794 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_dcb.c @@ -52,7 +52,10 @@ static void hclge_tm_info_to_ieee_ets(struct hclge_dev *hdev, for (i = 0; i < HNAE3_MAX_TC; i++) { ets->prio_tc[i] = hdev->tm_info.prio_tc[i]; - ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i]; + if (i < hdev->tm_info.num_tc) + ets->tc_tx_bw[i] = hdev->tm_info.pg_info[0].tc_dwrr[i]; + else + ets->tc_tx_bw[i] = 0; if (hdev->tm_info.tc_info[i].tc_sch_mode == HCLGE_SCH_MODE_SP) @@ -123,7 +126,8 @@ static u8 hclge_ets_tc_changed(struct hclge_dev *hdev, struct ieee_ets *ets, } static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev, - struct ieee_ets *ets, bool *changed) + struct ieee_ets *ets, bool *changed, + u8 tc_num) { bool has_ets_tc = false; u32 total_ets_bw = 0; @@ -137,6 +141,13 @@ static int hclge_ets_sch_mode_validate(struct hclge_dev *hdev, *changed = true; break; case IEEE_8021QAZ_TSA_ETS: + if (i >= tc_num) { + dev_err(&hdev->pdev->dev, + "tc%u is disabled, cannot set ets bw\n", + i); + return -EINVAL; + } + /* The hardware will switch to sp mode if bandwidth is * 0, so limit ets bandwidth must be greater than 0. */ @@ -176,7 +187,7 @@ static int hclge_ets_validate(struct hclge_dev *hdev, struct ieee_ets *ets, if (ret) return ret; - ret = hclge_ets_sch_mode_validate(hdev, ets, changed); + ret = hclge_ets_sch_mode_validate(hdev, ets, changed, tc_num); if (ret) return ret; diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c index 97a6864f60ef..e7cb6a81e5b6 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c @@ -732,6 +732,7 @@ static void hclge_tm_tc_info_init(struct hclge_dev *hdev) static void hclge_tm_pg_info_init(struct hclge_dev *hdev) { #define BW_PERCENT 100 +#define DEFAULT_BW_WEIGHT 1 u8 i; @@ -753,7 +754,7 @@ static void hclge_tm_pg_info_init(struct hclge_dev *hdev) for (k = 0; k < hdev->tm_info.num_tc; k++) hdev->tm_info.pg_info[i].tc_dwrr[k] = BW_PERCENT; for (; k < HNAE3_MAX_TC; k++) - hdev->tm_info.pg_info[i].tc_dwrr[k] = 0; + hdev->tm_info.pg_info[i].tc_dwrr[k] = DEFAULT_BW_WEIGHT; } } From 77396fa9096abdbfbb87d63e73ad44d5621cf103 Mon Sep 17 00:00:00 2001 From: Roopa Prabhu Date: Tue, 1 Mar 2022 05:04:28 +0000 Subject: [PATCH 059/513] vxlan: move to its own directory [ Upstream commit 6765393614ea8e2c0a7b953063513823f87c9115 ] vxlan.c has grown too long. This patch moves it to its own directory. subsequent patches add new functionality in new files. Signed-off-by: Roopa Prabhu Signed-off-by: David S. Miller Stable-dep-of: 94d166c5318c ("vxlan: calculate correct header length for GPE") Signed-off-by: Sasha Levin --- drivers/net/Makefile | 2 +- drivers/net/vxlan/Makefile | 7 +++++++ drivers/net/{vxlan.c => vxlan/vxlan_core.c} | 0 3 files changed, 8 insertions(+), 1 deletion(-) create mode 100644 drivers/net/vxlan/Makefile rename drivers/net/{vxlan.c => vxlan/vxlan_core.c} (100%) diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 739838623cf6..50e60852f128 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -30,7 +30,7 @@ obj-$(CONFIG_TUN) += tun.o obj-$(CONFIG_TAP) += tap.o obj-$(CONFIG_VETH) += veth.o obj-$(CONFIG_VIRTIO_NET) += virtio_net.o -obj-$(CONFIG_VXLAN) += vxlan.o +obj-$(CONFIG_VXLAN) += vxlan/ obj-$(CONFIG_GENEVE) += geneve.o obj-$(CONFIG_BAREUDP) += bareudp.o obj-$(CONFIG_GTP) += gtp.o diff --git a/drivers/net/vxlan/Makefile b/drivers/net/vxlan/Makefile new file mode 100644 index 000000000000..567266133593 --- /dev/null +++ b/drivers/net/vxlan/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for the vxlan driver +# + +obj-$(CONFIG_VXLAN) += vxlan.o + +vxlan-objs := vxlan_core.o diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan/vxlan_core.c similarity index 100% rename from drivers/net/vxlan.c rename to drivers/net/vxlan/vxlan_core.c From 49f5b3c9499bd8593d29646435c41bfa4e6b3a26 Mon Sep 17 00:00:00 2001 From: Jiri Benc Date: Thu, 20 Jul 2023 11:05:56 +0200 Subject: [PATCH 060/513] vxlan: calculate correct header length for GPE [ Upstream commit 94d166c5318c6edd1e079df8552233443e909c33 ] VXLAN-GPE does not add an extra inner Ethernet header. Take that into account when calculating header length. This causes problems in skb_tunnel_check_pmtu, where incorrect PMTU is cached. In the collect_md mode (which is the only mode that VXLAN-GPE supports), there's no magic auto-setting of the tunnel interface MTU. It can't be, since the destination and thus the underlying interface may be different for each packet. So, the administrator is responsible for setting the correct tunnel interface MTU. Apparently, the administrators are capable enough to calculate that the maximum MTU for VXLAN-GPE is (their_lower_MTU - 36). They set the tunnel interface MTU to 1464. If you run a TCP stream over such interface, it's then segmented according to the MTU 1464, i.e. producing 1514 bytes frames. Which is okay, this still fits the lower MTU. However, skb_tunnel_check_pmtu (called from vxlan_xmit_one) uses 50 as the header size and thus incorrectly calculates the frame size to be 1528. This leads to ICMP too big message being generated (locally), PMTU of 1450 to be cached and the TCP stream to be resegmented. The fix is to use the correct actual header size, especially for skb_tunnel_check_pmtu calculation. Fixes: e1e5314de08ba ("vxlan: implement GPE") Signed-off-by: Jiri Benc Reviewed-by: Simon Horman Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 2 +- drivers/net/vxlan/vxlan_core.c | 23 ++++++++----------- include/net/vxlan.h | 13 +++++++---- 3 files changed, 20 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 6fb9c18297bc..af824370a2f6 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8398,7 +8398,7 @@ static void ixgbe_atr(struct ixgbe_ring *ring, struct ixgbe_adapter *adapter = q_vector->adapter; if (unlikely(skb_tail_pointer(skb) < hdr.network + - VXLAN_HEADROOM)) + vxlan_headroom(0))) return; /* verify the port is recognized as VXLAN */ diff --git a/drivers/net/vxlan/vxlan_core.c b/drivers/net/vxlan/vxlan_core.c index 129e270e9a7c..106b66570e04 100644 --- a/drivers/net/vxlan/vxlan_core.c +++ b/drivers/net/vxlan/vxlan_core.c @@ -2721,7 +2721,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, } ndst = &rt->dst; - err = skb_tunnel_check_pmtu(skb, ndst, VXLAN_HEADROOM, + err = skb_tunnel_check_pmtu(skb, ndst, vxlan_headroom(flags & VXLAN_F_GPE), netif_is_any_bridge_port(dev)); if (err < 0) { goto tx_error; @@ -2782,7 +2782,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev, goto out_unlock; } - err = skb_tunnel_check_pmtu(skb, ndst, VXLAN6_HEADROOM, + err = skb_tunnel_check_pmtu(skb, ndst, + vxlan_headroom((flags & VXLAN_F_GPE) | VXLAN_F_IPV6), netif_is_any_bridge_port(dev)); if (err < 0) { goto tx_error; @@ -3159,14 +3160,12 @@ static int vxlan_change_mtu(struct net_device *dev, int new_mtu) struct vxlan_rdst *dst = &vxlan->default_dst; struct net_device *lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex); - bool use_ipv6 = !!(vxlan->cfg.flags & VXLAN_F_IPV6); /* This check is different than dev->max_mtu, because it looks at * the lowerdev->mtu, rather than the static dev->max_mtu */ if (lowerdev) { - int max_mtu = lowerdev->mtu - - (use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM); + int max_mtu = lowerdev->mtu - vxlan_headroom(vxlan->cfg.flags); if (new_mtu > max_mtu) return -EINVAL; } @@ -3788,11 +3787,11 @@ static void vxlan_config_apply(struct net_device *dev, struct vxlan_dev *vxlan = netdev_priv(dev); struct vxlan_rdst *dst = &vxlan->default_dst; unsigned short needed_headroom = ETH_HLEN; - bool use_ipv6 = !!(conf->flags & VXLAN_F_IPV6); int max_mtu = ETH_MAX_MTU; + u32 flags = conf->flags; if (!changelink) { - if (conf->flags & VXLAN_F_GPE) + if (flags & VXLAN_F_GPE) vxlan_raw_setup(dev); else vxlan_ether_setup(dev); @@ -3818,8 +3817,7 @@ static void vxlan_config_apply(struct net_device *dev, dev->needed_tailroom = lowerdev->needed_tailroom; - max_mtu = lowerdev->mtu - (use_ipv6 ? VXLAN6_HEADROOM : - VXLAN_HEADROOM); + max_mtu = lowerdev->mtu - vxlan_headroom(flags); if (max_mtu < ETH_MIN_MTU) max_mtu = ETH_MIN_MTU; @@ -3830,10 +3828,9 @@ static void vxlan_config_apply(struct net_device *dev, if (dev->mtu > max_mtu) dev->mtu = max_mtu; - if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA) - needed_headroom += VXLAN6_HEADROOM; - else - needed_headroom += VXLAN_HEADROOM; + if (flags & VXLAN_F_COLLECT_METADATA) + flags |= VXLAN_F_IPV6; + needed_headroom += vxlan_headroom(flags); dev->needed_headroom = needed_headroom; memcpy(&vxlan->cfg, conf, sizeof(*conf)); diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 08537aa14f7c..cf1d870f7b9a 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -327,10 +327,15 @@ static inline netdev_features_t vxlan_features_check(struct sk_buff *skb, return features; } -/* IP header + UDP + VXLAN + Ethernet header */ -#define VXLAN_HEADROOM (20 + 8 + 8 + 14) -/* IPv6 header + UDP + VXLAN + Ethernet header */ -#define VXLAN6_HEADROOM (40 + 8 + 8 + 14) +static inline int vxlan_headroom(u32 flags) +{ + /* VXLAN: IP4/6 header + UDP + VXLAN + Ethernet header */ + /* VXLAN-GPE: IP4/6 header + UDP + VXLAN */ + return (flags & VXLAN_F_IPV6 ? sizeof(struct ipv6hdr) : + sizeof(struct iphdr)) + + sizeof(struct udphdr) + sizeof(struct vxlanhdr) + + (flags & VXLAN_F_GPE ? 0 : ETH_HLEN); +} static inline struct vxlanhdr *vxlan_hdr(struct sk_buff *skb) { From 6d8a71e4c3a2fa4960cc50996e76a42b62fab677 Mon Sep 17 00:00:00 2001 From: Harshit Mogalapalli Date: Fri, 21 Jul 2023 02:05:55 -0700 Subject: [PATCH 061/513] phy: hisilicon: Fix an out of bounds check in hisi_inno_phy_probe() [ Upstream commit 13c088cf3657d70893d75cf116be937f1509cc0f ] The size of array 'priv->ports[]' is INNO_PHY_PORT_NUM. In the for loop, 'i' is used as the index for array 'priv->ports[]' with a check (i > INNO_PHY_PORT_NUM) which indicates that INNO_PHY_PORT_NUM is allowed value for 'i' in the same loop. This > comparison needs to be changed to >=, otherwise it potentially leads to an out of bounds write on the next iteration through the loop Fixes: ba8b0ee81fbb ("phy: add inno-usb2-phy driver for hi3798cv200 SoC") Reported-by: Dan Carpenter Signed-off-by: Harshit Mogalapalli Link: https://lore.kernel.org/r/20230721090558.3588613-1-harshit.m.mogalapalli@oracle.com Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/phy/hisilicon/phy-hisi-inno-usb2.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c index 34a6a9a1ceb2..897c6bb4cbb8 100644 --- a/drivers/phy/hisilicon/phy-hisi-inno-usb2.c +++ b/drivers/phy/hisilicon/phy-hisi-inno-usb2.c @@ -153,7 +153,7 @@ static int hisi_inno_phy_probe(struct platform_device *pdev) phy_set_drvdata(phy, &priv->ports[i]); i++; - if (i > INNO_PHY_PORT_NUM) { + if (i >= INNO_PHY_PORT_NUM) { dev_warn(dev, "Support %d ports in maximum\n", i); break; } From 46e40297355ec60a1de8c911db97106dd5fef1ca Mon Sep 17 00:00:00 2001 From: Yuanjun Gong Date: Thu, 20 Jul 2023 22:42:19 +0800 Subject: [PATCH 062/513] ethernet: atheros: fix return value check in atl1e_tso_csum() [ Upstream commit 69a184f7a372aac588babfb0bd681aaed9779f5b ] in atl1e_tso_csum, it should check the return value of pskb_trim(), and return an error code if an unexpected value is returned by pskb_trim(). Fixes: a6a5325239c2 ("atl1e: Atheros L1E Gigabit Ethernet driver") Signed-off-by: Yuanjun Gong Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230720144219.39285-1-ruc_gongyuanjun@163.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/atheros/atl1e/atl1e_main.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c index 753973ac922e..db13311e77e7 100644 --- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c @@ -1642,8 +1642,11 @@ static int atl1e_tso_csum(struct atl1e_adapter *adapter, real_len = (((unsigned char *)ip_hdr(skb) - skb->data) + ntohs(ip_hdr(skb)->tot_len)); - if (real_len < skb->len) - pskb_trim(skb, real_len); + if (real_len < skb->len) { + err = pskb_trim(skb, real_len); + if (err) + return err; + } hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); if (unlikely(skb->len == hdr_len)) { From dd48780a7bbbba0fdd04a969f6064916a7db3114 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20=C5=BBenczykowski?= Date: Thu, 20 Jul 2023 09:00:22 -0700 Subject: [PATCH 063/513] ipv6 addrconf: fix bug where deleting a mngtmpaddr can create a new temporary address MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 69172f0bcb6a09110c5d2a6d792627f5095a9018 ] currently on 6.4 net/main: # ip link add dummy1 type dummy # echo 1 > /proc/sys/net/ipv6/conf/dummy1/use_tempaddr # ip link set dummy1 up # ip -6 addr add 2000::1/64 mngtmpaddr dev dummy1 # ip -6 addr show dev dummy1 11: dummy1: mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000 inet6 2000::44f3:581c:8ca:3983/64 scope global temporary dynamic valid_lft 604800sec preferred_lft 86172sec inet6 2000::1/64 scope global mngtmpaddr valid_lft forever preferred_lft forever inet6 fe80::e8a8:a6ff:fed5:56d4/64 scope link valid_lft forever preferred_lft forever # ip -6 addr del 2000::44f3:581c:8ca:3983/64 dev dummy1 (can wait a few seconds if you want to, the above delete isn't [directly] the problem) # ip -6 addr show dev dummy1 11: dummy1: mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000 inet6 2000::1/64 scope global mngtmpaddr valid_lft forever preferred_lft forever inet6 fe80::e8a8:a6ff:fed5:56d4/64 scope link valid_lft forever preferred_lft forever # ip -6 addr del 2000::1/64 mngtmpaddr dev dummy1 # ip -6 addr show dev dummy1 11: dummy1: mtu 1500 qdisc noqueue state UNKNOWN group default qlen 1000 inet6 2000::81c9:56b7:f51a:b98f/64 scope global temporary dynamic valid_lft 604797sec preferred_lft 86169sec inet6 fe80::e8a8:a6ff:fed5:56d4/64 scope link valid_lft forever preferred_lft forever This patch prevents this new 'global temporary dynamic' address from being created by the deletion of the related (same subnet prefix) 'mngtmpaddr' (which is triggered by there already being no temporary addresses). Cc: Jiri Pirko Fixes: 53bd67491537 ("ipv6 addrconf: introduce IFA_F_MANAGETEMPADDR to tell kernel to manage temporary addresses") Reported-by: Xiao Ma Signed-off-by: Maciej Żenczykowski Reviewed-by: David Ahern Link: https://lore.kernel.org/r/20230720160022.1887942-1-maze@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/ipv6/addrconf.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index e0d3909172a8..0c0b7969840f 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -2565,12 +2565,18 @@ static void manage_tempaddrs(struct inet6_dev *idev, ipv6_ifa_notify(0, ift); } - if ((create || list_empty(&idev->tempaddr_list)) && - idev->cnf.use_tempaddr > 0) { + /* Also create a temporary address if it's enabled but no temporary + * address currently exists. + * However, we get called with valid_lft == 0, prefered_lft == 0, create == false + * as part of cleanup (ie. deleting the mngtmpaddr). + * We don't want that to result in creating a new temporary ip address. + */ + if (list_empty(&idev->tempaddr_list) && (valid_lft || prefered_lft)) + create = true; + + if (create && idev->cnf.use_tempaddr > 0) { /* When a new public address is created as described * in [ADDRCONF], also create a new temporary address. - * Also create a temporary address if it's enabled but - * no temporary address currently exists. */ read_unlock_bh(&idev->lock); ipv6_create_tempaddr(ifp, false); From ecb741a17cb2abf693b34d8e05a1e7e40494afb6 Mon Sep 17 00:00:00 2001 From: Stewart Smith Date: Fri, 21 Jul 2023 15:24:10 -0700 Subject: [PATCH 064/513] tcp: Reduce chance of collisions in inet6_hashfn(). [ Upstream commit d11b0df7ddf1831f3e170972f43186dad520bfcc ] For both IPv4 and IPv6 incoming TCP connections are tracked in a hash table with a hash over the source & destination addresses and ports. However, the IPv6 hash is insufficient and can lead to a high rate of collisions. The IPv6 hash used an XOR to fit everything into the 96 bits for the fast jenkins hash, meaning it is possible for an external entity to ensure the hash collides, thus falling back to a linear search in the bucket, which is slow. We take the approach of hash the full length of IPv6 address in __ipv6_addr_jhash() so that all users can benefit from a more secure version. While this may look like it adds overhead, the reality of modern CPUs means that this is unmeasurable in real world scenarios. In simulating with llvm-mca, the increase in cycles for the hashing code was ~16 cycles on Skylake (from a base of ~155), and an extra ~9 on Nehalem (base of ~173). In commit dd6d2910c5e0 ("netfilter: conntrack: switch to siphash") netfilter switched from a jenkins hash to a siphash, but even the faster hsiphash is a more significant overhead (~20-30%) in some preliminary testing. So, in this patch, we keep to the more conservative approach to ensure we don't add much overhead per SYN. In testing, this results in a consistently even spread across the connection buckets. In both testing and real-world scenarios, we have not found any measurable performance impact. Fixes: 08dcdbf6a7b9 ("ipv6: use a stronger hash for tcp") Signed-off-by: Stewart Smith Signed-off-by: Samuel Mendoza-Jonas Suggested-by: Eric Dumazet Signed-off-by: Kuniyuki Iwashima Reviewed-by: Eric Dumazet Link: https://lore.kernel.org/r/20230721222410.17914-1-kuniyu@amazon.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- include/net/ipv6.h | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/include/net/ipv6.h b/include/net/ipv6.h index e3ab99f4edab..20930086b228 100644 --- a/include/net/ipv6.h +++ b/include/net/ipv6.h @@ -664,12 +664,8 @@ static inline u32 ipv6_addr_hash(const struct in6_addr *a) /* more secured version of ipv6_addr_hash() */ static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval) { - u32 v = (__force u32)a->s6_addr32[0] ^ (__force u32)a->s6_addr32[1]; - - return jhash_3words(v, - (__force u32)a->s6_addr32[2], - (__force u32)a->s6_addr32[3], - initval); + return jhash2((__force const u32 *)a->s6_addr32, + ARRAY_SIZE(a->s6_addr32), initval); } static inline bool ipv6_addr_loopback(const struct in6_addr *a) From c28b3938763450133a17460bcb1d02c804313323 Mon Sep 17 00:00:00 2001 From: Jedrzej Jagielski Date: Fri, 21 Jul 2023 08:58:54 -0700 Subject: [PATCH 065/513] ice: Fix memory management in ice_ethtool_fdir.c [ Upstream commit a3336056504d780590ac6d6ac94fbba829994594 ] Fix ethtool FDIR logic to not use memory after its release. In the ice_ethtool_fdir.c file there are 2 spots where code can refer to pointers which may be missing. In the ice_cfg_fdir_xtrct_seq() function seg may be freed but even then may be still used by memcpy(&tun_seg[1], seg, sizeof(*seg)). In the ice_add_fdir_ethtool() function struct ice_fdir_fltr *input may first fail to be added via ice_fdir_update_list_entry() but then may be deleted by ice_fdir_update_list_entry. Terminate in both cases when the returned value of the previous operation is other than 0, free memory and don't use it anymore. Reported-by: Michal Schmidt Link: https://bugzilla.redhat.com/show_bug.cgi?id=2208423 Fixes: cac2a27cd9ab ("ice: Support IPv4 Flow Director filters") Reviewed-by: Przemek Kitszel Signed-off-by: Jedrzej Jagielski Reviewed-by: Leon Romanovsky Tested-by: Pucha Himasekhar Reddy (A Contingent worker at Intel) Signed-off-by: Tony Nguyen Link: https://lore.kernel.org/r/20230721155854.1292805-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- .../net/ethernet/intel/ice/ice_ethtool_fdir.c | 26 ++++++++++--------- 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c index 16de603b280c..0106ea3519a0 100644 --- a/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c +++ b/drivers/net/ethernet/intel/ice/ice_ethtool_fdir.c @@ -1135,16 +1135,21 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp, ICE_FLOW_FLD_OFF_INVAL); } - /* add filter for outer headers */ fltr_idx = ice_ethtool_flow_to_fltr(fsp->flow_type & ~FLOW_EXT); + + assign_bit(fltr_idx, hw->fdir_perfect_fltr, perfect_filter); + + /* add filter for outer headers */ ret = ice_fdir_set_hw_fltr_rule(pf, seg, fltr_idx, ICE_FD_HW_SEG_NON_TUN); - if (ret == -EEXIST) - /* Rule already exists, free memory and continue */ - devm_kfree(dev, seg); - else if (ret) + if (ret == -EEXIST) { + /* Rule already exists, free memory and count as success */ + ret = 0; + goto err_exit; + } else if (ret) { /* could not write filter, free memory */ goto err_exit; + } /* make tunneled filter HW entries if possible */ memcpy(&tun_seg[1], seg, sizeof(*seg)); @@ -1159,18 +1164,13 @@ ice_cfg_fdir_xtrct_seq(struct ice_pf *pf, struct ethtool_rx_flow_spec *fsp, devm_kfree(dev, tun_seg); } - if (perfect_filter) - set_bit(fltr_idx, hw->fdir_perfect_fltr); - else - clear_bit(fltr_idx, hw->fdir_perfect_fltr); - return ret; err_exit: devm_kfree(dev, tun_seg); devm_kfree(dev, seg); - return -EOPNOTSUPP; + return ret; } /** @@ -1684,7 +1684,9 @@ int ice_add_fdir_ethtool(struct ice_vsi *vsi, struct ethtool_rxnfc *cmd) input->comp_report = ICE_FXD_FLTR_QW0_COMP_REPORT_SW_FAIL; /* input struct is added to the HW filter list */ - ice_fdir_update_list_entry(pf, input, fsp->location); + ret = ice_fdir_update_list_entry(pf, input, fsp->location); + if (ret) + goto release_lock; ret = ice_fdir_write_all_fltr(pf, input, true); if (ret) From bf2d7b63e2b5d5e53cb219e92114564461bdf120 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Fri, 21 Jul 2023 12:03:55 +0800 Subject: [PATCH 066/513] bonding: reset bond's flags when down link is P2P device [ Upstream commit da19a2b967cf1e2c426f50d28550d1915214a81d ] When adding a point to point downlink to the bond, we neglected to reset the bond's flags, which were still using flags like BROADCAST and MULTICAST. Consequently, this would initiate ARP/DAD for P2P downlink interfaces, such as when adding a GRE device to the bonding. To address this issue, let's reset the bond's flags for P2P interfaces. Before fix: 7: gre0@NONE: mtu 1500 qdisc noqueue master bond0 state UNKNOWN group default qlen 1000 link/gre6 2006:70:10::1 peer 2006:70:10::2 permaddr 167f:18:f188:: 8: bond0: mtu 1500 qdisc noqueue state UP group default qlen 1000 link/gre6 2006:70:10::1 brd 2006:70:10::2 inet6 fe80::200:ff:fe00:0/64 scope link valid_lft forever preferred_lft forever After fix: 7: gre0@NONE: mtu 1500 qdisc noqueue master bond2 state UNKNOWN group default qlen 1000 link/gre6 2006:70:10::1 peer 2006:70:10::2 permaddr c29e:557a:e9d9:: 8: bond0: mtu 1500 qdisc noqueue state UP group default qlen 1000 link/gre6 2006:70:10::1 peer 2006:70:10::2 inet6 fe80::1/64 scope link valid_lft forever preferred_lft forever Reported-by: Liang Li Closes: https://bugzilla.redhat.com/show_bug.cgi?id=2221438 Fixes: 872254dd6b1f ("net/bonding: Enable bonding to enslave non ARPHRD_ETHER") Signed-off-by: Hangbin Liu Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- drivers/net/bonding/bond_main.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 7b0b4049bd29..69cc36c0840f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1482,6 +1482,11 @@ static void bond_setup_by_slave(struct net_device *bond_dev, memcpy(bond_dev->broadcast, slave_dev->broadcast, slave_dev->addr_len); + + if (slave_dev->flags & IFF_POINTOPOINT) { + bond_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + bond_dev->flags |= (IFF_POINTOPOINT | IFF_NOARP); + } } /* On bonding slaves other than the currently active slave, suppress From 9be8ec5a0cfed64ac8b29b193135148e52f26c09 Mon Sep 17 00:00:00 2001 From: Hangbin Liu Date: Fri, 21 Jul 2023 12:03:56 +0800 Subject: [PATCH 067/513] team: reset team's flags when down link is P2P device [ Upstream commit fa532bee17d15acf8bba4bc8e2062b7a093ba801 ] When adding a point to point downlink to team device, we neglected to reset the team's flags, which were still using flags like BROADCAST and MULTICAST. Consequently, this would initiate ARP/DAD for P2P downlink interfaces, such as when adding a GRE device to team device. Fix this by remove multicast/broadcast flags and add p2p and noarp flags. After removing the none ethernet interface and adding an ethernet interface to team, we need to reset team interface flags. Unlike bonding interface, team do not need restore IFF_MASTER, IFF_SLAVE flags. Reported-by: Liang Li Closes: https://bugzilla.redhat.com/show_bug.cgi?id=2221438 Fixes: 1d76efe1577b ("team: add support for non-ethernet devices") Signed-off-by: Hangbin Liu Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- drivers/net/team/team.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index d9386d614a94..4dfa9c610974 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2130,6 +2130,15 @@ static void team_setup_by_port(struct net_device *dev, dev->mtu = port_dev->mtu; memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len); eth_hw_addr_inherit(dev, port_dev); + + if (port_dev->flags & IFF_POINTOPOINT) { + dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST); + dev->flags |= (IFF_POINTOPOINT | IFF_NOARP); + } else if ((port_dev->flags & (IFF_BROADCAST | IFF_MULTICAST)) == + (IFF_BROADCAST | IFF_MULTICAST)) { + dev->flags |= (IFF_BROADCAST | IFF_MULTICAST); + dev->flags &= ~(IFF_POINTOPOINT | IFF_NOARP); + } } static int team_dev_type_check_change(struct net_device *dev, From 238420a24d6bbac4b1c805b9e2657f02a72ab344 Mon Sep 17 00:00:00 2001 From: Vincent Whitchurch Date: Fri, 21 Jul 2023 15:39:20 +0200 Subject: [PATCH 068/513] net: stmmac: Apply redundant write work around on 4.xx too [ Upstream commit 284779dbf4e98753458708783af8c35630674a21 ] commit a3a57bf07de23fe1ff779e0fdf710aa581c3ff73 ("net: stmmac: work around sporadic tx issue on link-up") worked around a problem with TX sometimes not working after a link-up by avoiding a redundant write to MAC_CTRL_REG (aka GMAC_CONFIG), since the IP appeared to have problems with handling multiple writes to that register in some cases. That commit however only added the work around to dwmac_lib.c (apart from the common code in stmmac_main.c), but my systems with version 4.21a of the IP exhibit the same problem, so add the work around to dwmac4_lib.c too. Fixes: a3a57bf07de2 ("net: stmmac: work around sporadic tx issue on link-up") Signed-off-by: Vincent Whitchurch Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230721-stmmac-tx-workaround-v1-1-9411cbd5ee07@axis.com Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c index 9292a1fab7d3..7011c08d2e01 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_lib.c @@ -207,13 +207,15 @@ void stmmac_dwmac4_set_mac_addr(void __iomem *ioaddr, u8 addr[6], void stmmac_dwmac4_set_mac(void __iomem *ioaddr, bool enable) { u32 value = readl(ioaddr + GMAC_CONFIG); + u32 old_val = value; if (enable) value |= GMAC_CONFIG_RE | GMAC_CONFIG_TE; else value &= ~(GMAC_CONFIG_TE | GMAC_CONFIG_RE); - writel(value, ioaddr + GMAC_CONFIG); + if (value != old_val) + writel(value, ioaddr + GMAC_CONFIG); } void stmmac_dwmac4_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, From 8412fe36863b5e2315a97d451768b0fe68115188 Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Fri, 21 Jul 2023 17:54:23 +0300 Subject: [PATCH 069/513] platform/x86: msi-laptop: Fix rfkill out-of-sync on MSI Wind U100 [ Upstream commit ad084a6d99bc182bf109c190c808e2ea073ec57b ] Only the HW rfkill state is toggled on laptops with quirks->ec_read_only (so far only MSI Wind U90/U100). There are, however, a few issues with the implementation: 1. The initial HW state is always unblocked, regardless of the actual state on boot, because msi_init_rfkill only sets the SW state, regardless of ec_read_only. 2. The initial SW state corresponds to the actual state on boot, but it can't be changed afterwards, because set_device_state returns -EOPNOTSUPP. It confuses the userspace, making Wi-Fi and/or Bluetooth unusable if it was blocked on boot, and breaking the airplane mode if the rfkill was unblocked on boot. Address the above issues by properly initializing the HW state on ec_read_only laptops and by allowing the userspace to toggle the SW state. Don't set the SW state ourselves and let the userspace fully control it. Toggling the SW state is a no-op, however, it allows the userspace to properly toggle the airplane mode. The actual SW radio disablement is handled by the corresponding rtl818x_pci and btusb drivers that have their own rfkills. Tested on MSI Wind U100 Plus, BIOS ver 1.0G, EC ver 130. Fixes: 0816392b97d4 ("msi-laptop: merge quirk tables to one") Fixes: 0de6575ad0a8 ("msi-laptop: Add MSI Wind U90/U100 support") Signed-off-by: Maxim Mikityanskiy Link: https://lore.kernel.org/r/20230721145423.161057-1-maxtram95@gmail.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede Signed-off-by: Sasha Levin --- drivers/platform/x86/msi-laptop.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c index 0e804b6c2d24..dfb4af759aa7 100644 --- a/drivers/platform/x86/msi-laptop.c +++ b/drivers/platform/x86/msi-laptop.c @@ -210,7 +210,7 @@ static ssize_t set_device_state(const char *buf, size_t count, u8 mask) return -EINVAL; if (quirks->ec_read_only) - return -EOPNOTSUPP; + return 0; /* read current device state */ result = ec_read(MSI_STANDARD_EC_COMMAND_ADDRESS, &rdata); @@ -841,15 +841,15 @@ static bool msi_laptop_i8042_filter(unsigned char data, unsigned char str, static void msi_init_rfkill(struct work_struct *ignored) { if (rfk_wlan) { - rfkill_set_sw_state(rfk_wlan, !wlan_s); + msi_rfkill_set_state(rfk_wlan, !wlan_s); rfkill_wlan_set(NULL, !wlan_s); } if (rfk_bluetooth) { - rfkill_set_sw_state(rfk_bluetooth, !bluetooth_s); + msi_rfkill_set_state(rfk_bluetooth, !bluetooth_s); rfkill_bluetooth_set(NULL, !bluetooth_s); } if (rfk_threeg) { - rfkill_set_sw_state(rfk_threeg, !threeg_s); + msi_rfkill_set_state(rfk_threeg, !threeg_s); rfkill_threeg_set(NULL, !threeg_s); } } From feba294c454a51bb1e80dd2ff038e335f07ae481 Mon Sep 17 00:00:00 2001 From: Muhammad Husaini Zulkifli Date: Mon, 24 Jul 2023 09:12:50 -0700 Subject: [PATCH 070/513] igc: Fix Kernel Panic during ndo_tx_timeout callback [ Upstream commit d4a7ce642100765119a872d4aba1bf63e3a22c8a ] The Xeon validation group has been carrying out some loaded tests with various HW configurations, and they have seen some transmit queue time out happening during the test. This will cause the reset adapter function to be called by igc_tx_timeout(). Similar race conditions may arise when the interface is being brought down and up in igc_reinit_locked(), an interrupt being generated, and igc_clean_tx_irq() being called to complete the TX. When the igc_tx_timeout() function is invoked, this patch will turn off all TX ring HW queues during igc_down() process. TX ring HW queues will be activated again during the igc_configure_tx_ring() process when performing the igc_up() procedure later. This patch also moved existing igc_disable_tx_ring_hw() to avoid using forward declaration. Kernel trace: [ 7678.747813] ------------[ cut here ]------------ [ 7678.757914] NETDEV WATCHDOG: enp1s0 (igc): transmit queue 2 timed out [ 7678.770117] WARNING: CPU: 0 PID: 13 at net/sched/sch_generic.c:525 dev_watchdog+0x1ae/0x1f0 [ 7678.784459] Modules linked in: xt_conntrack nft_chain_nat xt_MASQUERADE xt_addrtype nft_compat nf_tables nfnetlink br_netfilter bridge stp llc overlay dm_mod emrcha(PO) emriio(PO) rktpm(PO) cegbuf_mod(PO) patch_update(PO) se(PO) sgx_tgts(PO) mktme(PO) keylocker(PO) svtdx(PO) svfs_pci_hotplug(PO) vtd_mod(PO) davemem(PO) svmabort(PO) svindexio(PO) usbx2(PO) ehci_sched(PO) svheartbeat(PO) ioapic(PO) sv8259(PO) svintr(PO) lt(PO) pcierootport(PO) enginefw_mod(PO) ata(PO) smbus(PO) spiflash_cdf(PO) arden(PO) dsa_iax(PO) oobmsm_punit(PO) cpm(PO) svkdb(PO) ebg_pch(PO) pch(PO) sviotargets(PO) svbdf(PO) svmem(PO) svbios(PO) dram(PO) svtsc(PO) targets(PO) superio(PO) svkernel(PO) cswitch(PO) mcf(PO) pentiumIII_mod(PO) fs_svfs(PO) mdevdefdb(PO) svfs_os_services(O) ixgbe mdio mdio_devres libphy emeraldrapids_svdefs(PO) regsupport(O) libnvdimm nls_cp437 snd_hda_codec_realtek snd_hda_codec_generic ledtrig_audio snd_hda_intel snd_intel_dspcfg snd_hda_codec snd_hwdep x86_pkg_temp_thermal snd_hda_core snd_pcm snd_timer isst_if_mbox_pci [ 7678.784496] input_leds isst_if_mmio sg snd isst_if_common soundcore wmi button sad9(O) drm fuse backlight configfs efivarfs ip_tables x_tables vmd sdhci led_class rtl8150 r8152 hid_generic pegasus mmc_block usbhid mmc_core hid megaraid_sas ixgb igb i2c_algo_bit ice i40e hpsa scsi_transport_sas e1000e e1000 e100 ax88179_178a usbnet xhci_pci sd_mod xhci_hcd t10_pi crc32c_intel crc64_rocksoft igc crc64 crc_t10dif usbcore crct10dif_generic ptp crct10dif_common usb_common pps_core [ 7679.200403] RIP: 0010:dev_watchdog+0x1ae/0x1f0 [ 7679.210201] Code: 28 e9 53 ff ff ff 4c 89 e7 c6 05 06 42 b9 00 01 e8 17 d1 fb ff 44 89 e9 4c 89 e6 48 c7 c7 40 ad fb 81 48 89 c2 e8 52 62 82 ff <0f> 0b e9 72 ff ff ff 65 8b 05 80 7d 7c 7e 89 c0 48 0f a3 05 0a c1 [ 7679.245438] RSP: 0018:ffa00000001f7d90 EFLAGS: 00010282 [ 7679.256021] RAX: 0000000000000000 RBX: ff11000109938440 RCX: 0000000000000000 [ 7679.268710] RDX: ff11000361e26cd8 RSI: ff11000361e1b880 RDI: ff11000361e1b880 [ 7679.281314] RBP: ffa00000001f7da8 R08: ff1100035f8fffe8 R09: 0000000000027ffb [ 7679.293840] R10: 0000000000001f0a R11: ff1100035f840000 R12: ff11000109938000 [ 7679.306276] R13: 0000000000000002 R14: dead000000000122 R15: ffa00000001f7e18 [ 7679.318648] FS: 0000000000000000(0000) GS:ff11000361e00000(0000) knlGS:0000000000000000 [ 7679.332064] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 7679.342757] CR2: 00007ffff7fca168 CR3: 000000013b08a006 CR4: 0000000000471ef8 [ 7679.354984] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 7679.367207] DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400 [ 7679.379370] PKRU: 55555554 [ 7679.386446] Call Trace: [ 7679.393152] [ 7679.399363] ? __pfx_dev_watchdog+0x10/0x10 [ 7679.407870] call_timer_fn+0x31/0x110 [ 7679.415698] expire_timers+0xb2/0x120 [ 7679.423403] run_timer_softirq+0x179/0x1e0 [ 7679.431532] ? __schedule+0x2b1/0x820 [ 7679.439078] __do_softirq+0xd1/0x295 [ 7679.446426] ? __pfx_smpboot_thread_fn+0x10/0x10 [ 7679.454867] run_ksoftirqd+0x22/0x30 [ 7679.462058] smpboot_thread_fn+0xb7/0x160 [ 7679.469670] kthread+0xcd/0xf0 [ 7679.476097] ? __pfx_kthread+0x10/0x10 [ 7679.483211] ret_from_fork+0x29/0x50 [ 7679.490047] [ 7679.495204] ---[ end trace 0000000000000000 ]--- [ 7679.503179] igc 0000:01:00.0 enp1s0: Register Dump [ 7679.511230] igc 0000:01:00.0 enp1s0: Register Name Value [ 7679.519892] igc 0000:01:00.0 enp1s0: CTRL 181c0641 [ 7679.528782] igc 0000:01:00.0 enp1s0: STATUS 40280683 [ 7679.537551] igc 0000:01:00.0 enp1s0: CTRL_EXT 10000040 [ 7679.546284] igc 0000:01:00.0 enp1s0: MDIC 180a3800 [ 7679.554942] igc 0000:01:00.0 enp1s0: ICR 00000081 [ 7679.563503] igc 0000:01:00.0 enp1s0: RCTL 04408022 [ 7679.571963] igc 0000:01:00.0 enp1s0: RDLEN[0-3] 00001000 00001000 00001000 00001000 [ 7679.583075] igc 0000:01:00.0 enp1s0: RDH[0-3] 00000068 000000b6 0000000f 00000031 [ 7679.594162] igc 0000:01:00.0 enp1s0: RDT[0-3] 00000066 000000b2 0000000e 00000030 [ 7679.605174] igc 0000:01:00.0 enp1s0: RXDCTL[0-3] 02040808 02040808 02040808 02040808 [ 7679.616196] igc 0000:01:00.0 enp1s0: RDBAL[0-3] 1bb7c000 1bb7f000 1bb82000 0ef33000 [ 7679.627242] igc 0000:01:00.0 enp1s0: RDBAH[0-3] 00000001 00000001 00000001 00000001 [ 7679.638256] igc 0000:01:00.0 enp1s0: TCTL a503f0fa [ 7679.646607] igc 0000:01:00.0 enp1s0: TDBAL[0-3] 2ba4a000 1bb6f000 1bb74000 1bb79000 [ 7679.657609] igc 0000:01:00.0 enp1s0: TDBAH[0-3] 00000001 00000001 00000001 00000001 [ 7679.668551] igc 0000:01:00.0 enp1s0: TDLEN[0-3] 00001000 00001000 00001000 00001000 [ 7679.679470] igc 0000:01:00.0 enp1s0: TDH[0-3] 000000a7 0000002d 000000bf 000000d9 [ 7679.690406] igc 0000:01:00.0 enp1s0: TDT[0-3] 000000a7 0000002d 000000bf 000000d9 [ 7679.701264] igc 0000:01:00.0 enp1s0: TXDCTL[0-3] 02100108 02100108 02100108 02100108 [ 7679.712123] igc 0000:01:00.0 enp1s0: Reset adapter [ 7683.085967] igc 0000:01:00.0 enp1s0: NIC Link is Up 1000 Mbps Full Duplex, Flow Control: RX/TX [ 8086.945561] ------------[ cut here ]------------ Entering kdb (current=0xffffffff8220b200, pid 0) on processor 0 Oops: (null) due to oops @ 0xffffffff81573888 RIP: 0010:dql_completed+0x148/0x160 Code: c9 00 48 89 57 58 e9 46 ff ff ff 45 85 e4 41 0f 95 c4 41 39 db 0f 95 c1 41 84 cc 74 05 45 85 ed 78 0a 44 89 c1 e9 27 ff ff ff <0f> 0b 01 f6 44 89 c1 29 f1 0f 48 ca eb 8c cc cc cc cc cc cc cc cc RSP: 0018:ffa0000000003e00 EFLAGS: 00010287 RAX: 000000000000006c RBX: ffa0000003eb0f78 RCX: ff11000109938000 RDX: 0000000000000003 RSI: 0000000000000160 RDI: ff110001002e9480 RBP: ffa0000000003ed8 R08: ff110001002e93c0 R09: ffa0000000003d28 R10: 0000000000007cc0 R11: 0000000000007c54 R12: 00000000ffffffd9 R13: ff1100037039cb00 R14: 00000000ffffffd9 R15: ff1100037039c048 FS: 0000000000000000(0000) GS:ff11000361e00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007ffff7fca168 CR3: 000000013b08a003 CR4: 0000000000471ef8 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: ? igc_poll+0x1a9/0x14d0 [igc] __napi_poll+0x2e/0x1b0 net_rx_action+0x126/0x250 __do_softirq+0xd1/0x295 irq_exit_rcu+0xc5/0xf0 common_interrupt+0x86/0xa0 asm_common_interrupt+0x27/0x40 RIP: 0010:cpuidle_enter_state+0xd3/0x3e0 Code: 73 f1 ff ff 49 89 c6 8b 05 e2 ca a7 00 85 c0 0f 8f b3 02 00 00 31 ff e8 1b de 75 ff 80 7d d7 00 0f 85 cd 01 00 00 fb 45 85 ff <0f> 88 fd 00 00 00 49 63 cf 4c 2b 75 c8 48 8d 04 49 48 89 ca 48 8d RSP: 0018:ffffffff82203df0 EFLAGS: 00000202 RAX: ff11000361e2a200 RBX: 0000000000000002 RCX: 000000000000001f RDX: 0000000000000000 RSI: 000000003cf3cf3d RDI: 0000000000000000 RBP: ffffffff82203e28 R08: 0000075ae38471c8 R09: 0000000000000018 R10: 000000000000031a R11: ffffffff8238dca0 R12: ffd1ffffff200000 R13: ffffffff8238dca0 R14: 0000075ae38471c8 R15: 0000000000000002 cpuidle_enter+0x2e/0x50 call_cpuidle+0x23/0x40 do_idle+0x1be/0x220 cpu_startup_entry+0x20/0x30 rest_init+0xb5/0xc0 arch_call_rest_init+0xe/0x30 start_kernel+0x448/0x760 x86_64_start_kernel+0x109/0x150 secondary_startup_64_no_verify+0xe0/0xeb more> [0]kdb> [0]kdb> [0]kdb> go Catastrophic error detected kdb_continue_catastrophic=0, type go a second time if you really want to continue [0]kdb> go Catastrophic error detected kdb_continue_catastrophic=0, attempting to continue [ 8086.955689] refcount_t: underflow; use-after-free. [ 8086.955697] WARNING: CPU: 0 PID: 0 at lib/refcount.c:28 refcount_warn_saturate+0xc2/0x110 [ 8086.955706] Modules linked in: xt_conntrack nft_chain_nat xt_MASQUERADE xt_addrtype nft_compat nf_tables nfnetlink br_netfilter bridge stp llc overlay dm_mod emrcha(PO) emriio(PO) rktpm(PO) cegbuf_mod(PO) patch_update(PO) se(PO) sgx_tgts(PO) mktme(PO) keylocker(PO) svtdx(PO) svfs_pci_hotplug(PO) vtd_mod(PO) davemem(PO) svmabort(PO) svindexio(PO) usbx2(PO) ehci_sched(PO) svheartbeat(PO) ioapic(PO) sv8259(PO) svintr(PO) lt(PO) pcierootport(PO) enginefw_mod(PO) ata(PO) smbus(PO) spiflash_cdf(PO) arden(PO) dsa_iax(PO) oobmsm_punit(PO) cpm(PO) svkdb(PO) ebg_pch(PO) pch(PO) sviotargets(PO) svbdf(PO) svmem(PO) svbios(PO) dram(PO) svtsc(PO) targets(PO) superio(PO) svkernel(PO) cswitch(PO) mcf(PO) pentiumIII_mod(PO) fs_svfs(PO) mdevdefdb(PO) svfs_os_services(O) ixgbe mdio mdio_devres libphy emeraldrapids_svdefs(PO) regsupport(O) libnvdimm nls_cp437 snd_hda_codec_realtek snd_hda_codec_generic ledtrig_audio snd_hda_intel snd_intel_dspcfg snd_hda_codec snd_hwdep x86_pkg_temp_thermal snd_hda_core snd_pcm snd_timer isst_if_mbox_pci [ 8086.955751] input_leds isst_if_mmio sg snd isst_if_common soundcore wmi button sad9(O) drm fuse backlight configfs efivarfs ip_tables x_tables vmd sdhci led_class rtl8150 r8152 hid_generic pegasus mmc_block usbhid mmc_core hid megaraid_sas ixgb igb i2c_algo_bit ice i40e hpsa scsi_transport_sas e1000e e1000 e100 ax88179_178a usbnet xhci_pci sd_mod xhci_hcd t10_pi crc32c_intel crc64_rocksoft igc crc64 crc_t10dif usbcore crct10dif_generic ptp crct10dif_common usb_common pps_core [ 8086.955784] RIP: 0010:refcount_warn_saturate+0xc2/0x110 [ 8086.955788] Code: 01 e8 82 e7 b4 ff 0f 0b 5d c3 cc cc cc cc 80 3d 68 c6 eb 00 00 75 81 48 c7 c7 a0 87 f6 81 c6 05 58 c6 eb 00 01 e8 5e e7 b4 ff <0f> 0b 5d c3 cc cc cc cc 80 3d 42 c6 eb 00 00 0f 85 59 ff ff ff 48 [ 8086.955790] RSP: 0018:ffa0000000003da0 EFLAGS: 00010286 [ 8086.955793] RAX: 0000000000000000 RBX: ff1100011da40ee0 RCX: ff11000361e1b888 [ 8086.955794] RDX: 00000000ffffffd8 RSI: 0000000000000027 RDI: ff11000361e1b880 [ 8086.955795] RBP: ffa0000000003da0 R08: 80000000ffff9f45 R09: ffa0000000003d28 [ 8086.955796] R10: ff1100035f840000 R11: 0000000000000028 R12: ff11000319ff8000 [ 8086.955797] R13: ff1100011bb79d60 R14: 00000000ffffffd6 R15: ff1100037039cb00 [ 8086.955798] FS: 0000000000000000(0000) GS:ff11000361e00000(0000) knlGS:0000000000000000 [ 8086.955800] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 8086.955801] CR2: 00007ffff7fca168 CR3: 000000013b08a003 CR4: 0000000000471ef8 [ 8086.955803] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 8086.955803] DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400 [ 8086.955804] PKRU: 55555554 [ 8086.955805] Call Trace: [ 8086.955806] [ 8086.955808] tcp_wfree+0x112/0x130 [ 8086.955814] skb_release_head_state+0x24/0xa0 [ 8086.955818] napi_consume_skb+0x9c/0x160 [ 8086.955821] igc_poll+0x5d8/0x14d0 [igc] [ 8086.955835] __napi_poll+0x2e/0x1b0 [ 8086.955839] net_rx_action+0x126/0x250 [ 8086.955843] __do_softirq+0xd1/0x295 [ 8086.955846] irq_exit_rcu+0xc5/0xf0 [ 8086.955851] common_interrupt+0x86/0xa0 [ 8086.955857] [ 8086.955857] [ 8086.955858] asm_common_interrupt+0x27/0x40 [ 8086.955862] RIP: 0010:cpuidle_enter_state+0xd3/0x3e0 [ 8086.955866] Code: 73 f1 ff ff 49 89 c6 8b 05 e2 ca a7 00 85 c0 0f 8f b3 02 00 00 31 ff e8 1b de 75 ff 80 7d d7 00 0f 85 cd 01 00 00 fb 45 85 ff <0f> 88 fd 00 00 00 49 63 cf 4c 2b 75 c8 48 8d 04 49 48 89 ca 48 8d [ 8086.955867] RSP: 0018:ffffffff82203df0 EFLAGS: 00000202 [ 8086.955869] RAX: ff11000361e2a200 RBX: 0000000000000002 RCX: 000000000000001f [ 8086.955870] RDX: 0000000000000000 RSI: 000000003cf3cf3d RDI: 0000000000000000 [ 8086.955871] RBP: ffffffff82203e28 R08: 0000075ae38471c8 R09: 0000000000000018 [ 8086.955872] R10: 000000000000031a R11: ffffffff8238dca0 R12: ffd1ffffff200000 [ 8086.955873] R13: ffffffff8238dca0 R14: 0000075ae38471c8 R15: 0000000000000002 [ 8086.955875] cpuidle_enter+0x2e/0x50 [ 8086.955880] call_cpuidle+0x23/0x40 [ 8086.955884] do_idle+0x1be/0x220 [ 8086.955887] cpu_startup_entry+0x20/0x30 [ 8086.955889] rest_init+0xb5/0xc0 [ 8086.955892] arch_call_rest_init+0xe/0x30 [ 8086.955895] start_kernel+0x448/0x760 [ 8086.955898] x86_64_start_kernel+0x109/0x150 [ 8086.955900] secondary_startup_64_no_verify+0xe0/0xeb [ 8086.955904] [ 8086.955904] ---[ end trace 0000000000000000 ]--- [ 8086.955912] ------------[ cut here ]------------ [ 8086.955913] kernel BUG at lib/dynamic_queue_limits.c:27! [ 8086.955918] invalid opcode: 0000 [#1] SMP [ 8086.955922] RIP: 0010:dql_completed+0x148/0x160 [ 8086.955925] Code: c9 00 48 89 57 58 e9 46 ff ff ff 45 85 e4 41 0f 95 c4 41 39 db 0f 95 c1 41 84 cc 74 05 45 85 ed 78 0a 44 89 c1 e9 27 ff ff ff <0f> 0b 01 f6 44 89 c1 29 f1 0f 48 ca eb 8c cc cc cc cc cc cc cc cc [ 8086.955927] RSP: 0018:ffa0000000003e00 EFLAGS: 00010287 [ 8086.955928] RAX: 000000000000006c RBX: ffa0000003eb0f78 RCX: ff11000109938000 [ 8086.955929] RDX: 0000000000000003 RSI: 0000000000000160 RDI: ff110001002e9480 [ 8086.955930] RBP: ffa0000000003ed8 R08: ff110001002e93c0 R09: ffa0000000003d28 [ 8086.955931] R10: 0000000000007cc0 R11: 0000000000007c54 R12: 00000000ffffffd9 [ 8086.955932] R13: ff1100037039cb00 R14: 00000000ffffffd9 R15: ff1100037039c048 [ 8086.955933] FS: 0000000000000000(0000) GS:ff11000361e00000(0000) knlGS:0000000000000000 [ 8086.955934] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 8086.955935] CR2: 00007ffff7fca168 CR3: 000000013b08a003 CR4: 0000000000471ef8 [ 8086.955936] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 8086.955937] DR3: 0000000000000000 DR6: 00000000fffe07f0 DR7: 0000000000000400 [ 8086.955938] PKRU: 55555554 [ 8086.955939] Call Trace: [ 8086.955939] [ 8086.955940] ? igc_poll+0x1a9/0x14d0 [igc] [ 8086.955949] __napi_poll+0x2e/0x1b0 [ 8086.955952] net_rx_action+0x126/0x250 [ 8086.955956] __do_softirq+0xd1/0x295 [ 8086.955958] irq_exit_rcu+0xc5/0xf0 [ 8086.955961] common_interrupt+0x86/0xa0 [ 8086.955964] [ 8086.955965] [ 8086.955965] asm_common_interrupt+0x27/0x40 [ 8086.955968] RIP: 0010:cpuidle_enter_state+0xd3/0x3e0 [ 8086.955971] Code: 73 f1 ff ff 49 89 c6 8b 05 e2 ca a7 00 85 c0 0f 8f b3 02 00 00 31 ff e8 1b de 75 ff 80 7d d7 00 0f 85 cd 01 00 00 fb 45 85 ff <0f> 88 fd 00 00 00 49 63 cf 4c 2b 75 c8 48 8d 04 49 48 89 ca 48 8d [ 8086.955972] RSP: 0018:ffffffff82203df0 EFLAGS: 00000202 [ 8086.955973] RAX: ff11000361e2a200 RBX: 0000000000000002 RCX: 000000000000001f [ 8086.955974] RDX: 0000000000000000 RSI: 000000003cf3cf3d RDI: 0000000000000000 [ 8086.955974] RBP: ffffffff82203e28 R08: 0000075ae38471c8 R09: 0000000000000018 [ 8086.955975] R10: 000000000000031a R11: ffffffff8238dca0 R12: ffd1ffffff200000 [ 8086.955976] R13: ffffffff8238dca0 R14: 0000075ae38471c8 R15: 0000000000000002 [ 8086.955978] cpuidle_enter+0x2e/0x50 [ 8086.955981] call_cpuidle+0x23/0x40 [ 8086.955984] do_idle+0x1be/0x220 [ 8086.955985] cpu_startup_entry+0x20/0x30 [ 8086.955987] rest_init+0xb5/0xc0 [ 8086.955990] arch_call_rest_init+0xe/0x30 [ 8086.955992] start_kernel+0x448/0x760 [ 8086.955994] x86_64_start_kernel+0x109/0x150 [ 8086.955996] secondary_startup_64_no_verify+0xe0/0xeb [ 8086.955998] [ 8086.955999] Modules linked in: xt_conntrack nft_chain_nat xt_MASQUERADE xt_addrtype nft_compat nf_tables nfnetlink br_netfilter bridge stp llc overlay dm_mod emrcha(PO) emriio(PO) rktpm(PO) cegbuf_mod(PO) patch_update(PO) se(PO) sgx_tgts(PO) mktme(PO) keylocker(PO) svtdx(PO) svfs_pci_hotplug(PO) vtd_mod(PO) davemem(PO) svmabort(PO) svindexio(PO) usbx2(PO) ehci_sched(PO) svheartbeat(PO) ioapic(PO) sv8259(PO) svintr(PO) lt(PO) pcierootport(PO) enginefw_mod(PO) ata(PO) smbus(PO) spiflash_cdf(PO) arden(PO) dsa_iax(PO) oobmsm_punit(PO) cpm(PO) svkdb(PO) ebg_pch(PO) pch(PO) sviotargets(PO) svbdf(PO) svmem(PO) svbios(PO) dram(PO) svtsc(PO) targets(PO) superio(PO) svkernel(PO) cswitch(PO) mcf(PO) pentiumIII_mod(PO) fs_svfs(PO) mdevdefdb(PO) svfs_os_services(O) ixgbe mdio mdio_devres libphy emeraldrapids_svdefs(PO) regsupport(O) libnvdimm nls_cp437 snd_hda_codec_realtek snd_hda_codec_generic ledtrig_audio snd_hda_intel snd_intel_dspcfg snd_hda_codec snd_hwdep x86_pkg_temp_thermal snd_hda_core snd_pcm snd_timer isst_if_mbox_pci [ 8086.956029] input_leds isst_if_mmio sg snd isst_if_common soundcore wmi button sad9(O) drm fuse backlight configfs efivarfs ip_tables x_tables vmd sdhci led_class rtl8150 r8152 hid_generic pegasus mmc_block usbhid mmc_core hid megaraid_sas ixgb igb i2c_algo_bit ice i40e hpsa scsi_transport_sas e1000e e1000 e100 ax88179_178a usbnet xhci_pci sd_mod xhci_hcd t10_pi crc32c_intel crc64_rocksoft igc crc64 crc_t10dif usbcore crct10dif_generic ptp crct10dif_common usb_common pps_core [16762.543675] INFO: NMI handler (kgdb_nmi_handler) took too long to run: 8675587.593 msecs [16762.543678] INFO: NMI handler (kgdb_nmi_handler) took too long to run: 8675587.595 msecs [16762.543673] INFO: NMI handler (kgdb_nmi_handler) took too long to run: 8675587.495 msecs [16762.543679] INFO: NMI handler (kgdb_nmi_handler) took too long to run: 8675587.599 msecs [16762.543678] INFO: NMI handler (kgdb_nmi_handler) took too long to run: 8675587.598 msecs [16762.543690] INFO: NMI handler (kgdb_nmi_handler) took too long to run: 8675587.605 msecs [16762.543684] INFO: NMI handler (kgdb_nmi_handler) took too long to run: 8675587.599 msecs [16762.543693] INFO: NMI handler (kgdb_nmi_handler) took too long to run: 8675587.613 msecs [16762.543784] ---[ end trace 0000000000000000 ]--- [16762.849099] RIP: 0010:dql_completed+0x148/0x160 PANIC: Fatal exception in interrupt Fixes: 9b275176270e ("igc: Add ndo_tx_timeout support") Tested-by: Alejandra Victoria Alcaraz Signed-off-by: Muhammad Husaini Zulkifli Acked-by: Sasha Neftin Tested-by: Naama Meir Signed-off-by: Tony Nguyen Reviewed-by: Simon Horman Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/igc/igc_main.c | 40 ++++++++++++++++------- 1 file changed, 28 insertions(+), 12 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index bcc1c428b4cc..a47dce10d3a7 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -316,6 +316,33 @@ static void igc_clean_all_tx_rings(struct igc_adapter *adapter) igc_clean_tx_ring(adapter->tx_ring[i]); } +static void igc_disable_tx_ring_hw(struct igc_ring *ring) +{ + struct igc_hw *hw = &ring->q_vector->adapter->hw; + u8 idx = ring->reg_idx; + u32 txdctl; + + txdctl = rd32(IGC_TXDCTL(idx)); + txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; + txdctl |= IGC_TXDCTL_SWFLUSH; + wr32(IGC_TXDCTL(idx), txdctl); +} + +/** + * igc_disable_all_tx_rings_hw - Disable all transmit queue operation + * @adapter: board private structure + */ +static void igc_disable_all_tx_rings_hw(struct igc_adapter *adapter) +{ + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) { + struct igc_ring *tx_ring = adapter->tx_ring[i]; + + igc_disable_tx_ring_hw(tx_ring); + } +} + /** * igc_setup_tx_resources - allocate Tx resources (Descriptors) * @tx_ring: tx descriptor ring (for a specific queue) to setup @@ -4975,6 +5002,7 @@ void igc_down(struct igc_adapter *adapter) /* clear VLAN promisc flag so VFTA will be updated if necessary */ adapter->flags &= ~IGC_FLAG_VLAN_PROMISC; + igc_disable_all_tx_rings_hw(adapter); igc_clean_all_tx_rings(adapter); igc_clean_all_rx_rings(adapter); } @@ -7124,18 +7152,6 @@ void igc_enable_rx_ring(struct igc_ring *ring) igc_alloc_rx_buffers(ring, igc_desc_unused(ring)); } -static void igc_disable_tx_ring_hw(struct igc_ring *ring) -{ - struct igc_hw *hw = &ring->q_vector->adapter->hw; - u8 idx = ring->reg_idx; - u32 txdctl; - - txdctl = rd32(IGC_TXDCTL(idx)); - txdctl &= ~IGC_TXDCTL_QUEUE_ENABLE; - txdctl |= IGC_TXDCTL_SWFLUSH; - wr32(IGC_TXDCTL(idx), txdctl); -} - void igc_disable_tx_ring(struct igc_ring *ring) { igc_disable_tx_ring_hw(ring); From 50cbb9d195c197af671869c8cadce3bd483735a0 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 20 Jul 2023 21:30:05 +0200 Subject: [PATCH 071/513] netfilter: nft_set_rbtree: fix overlap expiration walk [ Upstream commit f718863aca469a109895cb855e6b81fff4827d71 ] The lazy gc on insert that should remove timed-out entries fails to release the other half of the interval, if any. Can be reproduced with tests/shell/testcases/sets/0044interval_overlap_0 in nftables.git and kmemleak enabled kernel. Second bug is the use of rbe_prev vs. prev pointer. If rbe_prev() returns NULL after at least one iteration, rbe_prev points to element that is not an end interval, hence it should not be removed. Lastly, check the genmask of the end interval if this is active in the current generation. Fixes: c9e6978e2725 ("netfilter: nft_set_rbtree: Switch to node list walk for overlap detection") Signed-off-by: Florian Westphal Signed-off-by: Sasha Levin --- net/netfilter/nft_set_rbtree.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c index 5c05c9b990fb..8d73fffd2d09 100644 --- a/net/netfilter/nft_set_rbtree.c +++ b/net/netfilter/nft_set_rbtree.c @@ -217,29 +217,37 @@ static void *nft_rbtree_get(const struct net *net, const struct nft_set *set, static int nft_rbtree_gc_elem(const struct nft_set *__set, struct nft_rbtree *priv, - struct nft_rbtree_elem *rbe) + struct nft_rbtree_elem *rbe, + u8 genmask) { struct nft_set *set = (struct nft_set *)__set; struct rb_node *prev = rb_prev(&rbe->node); - struct nft_rbtree_elem *rbe_prev = NULL; + struct nft_rbtree_elem *rbe_prev; struct nft_set_gc_batch *gcb; gcb = nft_set_gc_batch_check(set, NULL, GFP_ATOMIC); if (!gcb) return -ENOMEM; - /* search for expired end interval coming before this element. */ + /* search for end interval coming before this element. + * end intervals don't carry a timeout extension, they + * are coupled with the interval start element. + */ while (prev) { rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node); - if (nft_rbtree_interval_end(rbe_prev)) + if (nft_rbtree_interval_end(rbe_prev) && + nft_set_elem_active(&rbe_prev->ext, genmask)) break; prev = rb_prev(prev); } - if (rbe_prev) { + if (prev) { + rbe_prev = rb_entry(prev, struct nft_rbtree_elem, node); + rb_erase(&rbe_prev->node, &priv->root); atomic_dec(&set->nelems); + nft_set_gc_batch_add(gcb, rbe_prev); } rb_erase(&rbe->node, &priv->root); @@ -321,7 +329,7 @@ static int __nft_rbtree_insert(const struct net *net, const struct nft_set *set, /* perform garbage collection to avoid bogus overlap reports. */ if (nft_set_elem_expired(&rbe->ext)) { - err = nft_rbtree_gc_elem(set, priv, rbe); + err = nft_rbtree_gc_elem(set, priv, rbe, genmask); if (err < 0) return err; From 98bcfcaecc76c4be288278c213b47d36292f40fa Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sun, 23 Jul 2023 16:24:46 +0200 Subject: [PATCH 072/513] netfilter: nf_tables: skip immediate deactivate in _PREPARE_ERROR [ Upstream commit 0a771f7b266b02d262900c75f1e175c7fe76fec2 ] On error when building the rule, the immediate expression unbinds the chain, hence objects can be deactivated by the transaction records. Otherwise, it is possible to trigger the following warning: WARNING: CPU: 3 PID: 915 at net/netfilter/nf_tables_api.c:2013 nf_tables_chain_destroy+0x1f7/0x210 [nf_tables] CPU: 3 PID: 915 Comm: chain-bind-err- Not tainted 6.1.39 #1 RIP: 0010:nf_tables_chain_destroy+0x1f7/0x210 [nf_tables] Fixes: 4bedf9eee016 ("netfilter: nf_tables: fix chain binding transaction logic") Reported-by: Kevin Rich Signed-off-by: Pablo Neira Ayuso Signed-off-by: Florian Westphal Signed-off-by: Sasha Levin --- net/netfilter/nft_immediate.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 6b0efab4fad0..6bf1c852e8ea 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -125,15 +125,27 @@ static void nft_immediate_activate(const struct nft_ctx *ctx, return nft_data_hold(&priv->data, nft_dreg_to_type(priv->dreg)); } +static void nft_immediate_chain_deactivate(const struct nft_ctx *ctx, + struct nft_chain *chain, + enum nft_trans_phase phase) +{ + struct nft_ctx chain_ctx; + struct nft_rule *rule; + + chain_ctx = *ctx; + chain_ctx.chain = chain; + + list_for_each_entry(rule, &chain->rules, list) + nft_rule_expr_deactivate(&chain_ctx, rule, phase); +} + static void nft_immediate_deactivate(const struct nft_ctx *ctx, const struct nft_expr *expr, enum nft_trans_phase phase) { const struct nft_immediate_expr *priv = nft_expr_priv(expr); const struct nft_data *data = &priv->data; - struct nft_ctx chain_ctx; struct nft_chain *chain; - struct nft_rule *rule; if (priv->dreg == NFT_REG_VERDICT) { switch (data->verdict.code) { @@ -143,20 +155,17 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx, if (!nft_chain_binding(chain)) break; - chain_ctx = *ctx; - chain_ctx.chain = chain; - - list_for_each_entry(rule, &chain->rules, list) - nft_rule_expr_deactivate(&chain_ctx, rule, phase); - switch (phase) { case NFT_TRANS_PREPARE_ERROR: nf_tables_unbind_chain(ctx, chain); - fallthrough; + nft_deactivate_next(ctx->net, chain); + break; case NFT_TRANS_PREPARE: + nft_immediate_chain_deactivate(ctx, chain, phase); nft_deactivate_next(ctx->net, chain); break; default: + nft_immediate_chain_deactivate(ctx, chain, phase); nft_chain_del(chain); chain->bound = false; chain->table->use--; From 5bee91121ccea8d69cea51632e9a1dd348ee49a1 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sun, 23 Jul 2023 16:41:48 +0200 Subject: [PATCH 073/513] netfilter: nf_tables: disallow rule addition to bound chain via NFTA_RULE_CHAIN_ID [ Upstream commit 0ebc1064e4874d5987722a2ddbc18f94aa53b211 ] Bail out with EOPNOTSUPP when adding rule to bound chain via NFTA_RULE_CHAIN_ID. The following warning splat is shown when adding a rule to a deleted bound chain: WARNING: CPU: 2 PID: 13692 at net/netfilter/nf_tables_api.c:2013 nf_tables_chain_destroy+0x1f7/0x210 [nf_tables] CPU: 2 PID: 13692 Comm: chain-bound-rul Not tainted 6.1.39 #1 RIP: 0010:nf_tables_chain_destroy+0x1f7/0x210 [nf_tables] Fixes: d0e2c7de92c7 ("netfilter: nf_tables: add NFT_CHAIN_BINDING") Reported-by: Kevin Rich Signed-off-by: Pablo Neira Ayuso Signed-off-by: Florian Westphal Signed-off-by: Sasha Levin --- net/netfilter/nf_tables_api.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index e0e675313d8e..ce9f962380b7 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -3529,8 +3529,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, NL_SET_BAD_ATTR(extack, nla[NFTA_RULE_CHAIN]); return PTR_ERR(chain); } - if (nft_chain_is_bound(chain)) - return -EOPNOTSUPP; } else if (nla[NFTA_RULE_CHAIN_ID]) { chain = nft_chain_lookup_byid(net, table, nla[NFTA_RULE_CHAIN_ID], @@ -3543,6 +3541,9 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, return -EINVAL; } + if (nft_chain_is_bound(chain)) + return -EOPNOTSUPP; + if (nla[NFTA_RULE_HANDLE]) { handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_HANDLE])); rule = __nft_rule_lookup(chain, handle); From b1e85c9d28dd149f5b7e9a02c6f38008ce9f6425 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Sat, 4 Feb 2023 15:52:55 +0200 Subject: [PATCH 074/513] net/sched: mqprio: refactor nlattr parsing to a separate function [ Upstream commit feb2cf3dcfb930aec2ca65c66d1365543d5ba943 ] mqprio_init() is quite large and unwieldy to add more code to. Split the netlink attribute parsing to a dedicated function. Signed-off-by: Vladimir Oltean Reviewed-by: Jacob Keller Reviewed-by: Simon Horman Signed-off-by: David S. Miller Stable-dep-of: 6c58c8816abb ("net/sched: mqprio: Add length check for TCA_MQPRIO_{MAX/MIN}_RATE64") Signed-off-by: Sasha Levin --- net/sched/sch_mqprio.c | 114 +++++++++++++++++++++++------------------ 1 file changed, 63 insertions(+), 51 deletions(-) diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 50e15add6068..a5df5604e015 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -130,6 +130,67 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, return 0; } +static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt, + struct nlattr *opt) +{ + struct mqprio_sched *priv = qdisc_priv(sch); + struct nlattr *tb[TCA_MQPRIO_MAX + 1]; + struct nlattr *attr; + int i, rem, err; + + err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy, + sizeof(*qopt)); + if (err < 0) + return err; + + if (!qopt->hw) + return -EINVAL; + + if (tb[TCA_MQPRIO_MODE]) { + priv->flags |= TC_MQPRIO_F_MODE; + priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]); + } + + if (tb[TCA_MQPRIO_SHAPER]) { + priv->flags |= TC_MQPRIO_F_SHAPER; + priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]); + } + + if (tb[TCA_MQPRIO_MIN_RATE64]) { + if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) + return -EINVAL; + i = 0; + nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64], + rem) { + if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) + return -EINVAL; + if (i >= qopt->num_tc) + break; + priv->min_rate[i] = *(u64 *)nla_data(attr); + i++; + } + priv->flags |= TC_MQPRIO_F_MIN_RATE; + } + + if (tb[TCA_MQPRIO_MAX_RATE64]) { + if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) + return -EINVAL; + i = 0; + nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64], + rem) { + if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) + return -EINVAL; + if (i >= qopt->num_tc) + break; + priv->max_rate[i] = *(u64 *)nla_data(attr); + i++; + } + priv->flags |= TC_MQPRIO_F_MAX_RATE; + } + + return 0; +} + static int mqprio_init(struct Qdisc *sch, struct nlattr *opt, struct netlink_ext_ack *extack) { @@ -139,9 +200,6 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt, struct Qdisc *qdisc; int i, err = -EOPNOTSUPP; struct tc_mqprio_qopt *qopt = NULL; - struct nlattr *tb[TCA_MQPRIO_MAX + 1]; - struct nlattr *attr; - int rem; int len; BUILD_BUG_ON(TC_MAX_QUEUE != TC_QOPT_MAX_QUEUE); @@ -166,55 +224,9 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt, len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt)); if (len > 0) { - err = parse_attr(tb, TCA_MQPRIO_MAX, opt, mqprio_policy, - sizeof(*qopt)); - if (err < 0) + err = mqprio_parse_nlattr(sch, qopt, opt); + if (err) return err; - - if (!qopt->hw) - return -EINVAL; - - if (tb[TCA_MQPRIO_MODE]) { - priv->flags |= TC_MQPRIO_F_MODE; - priv->mode = *(u16 *)nla_data(tb[TCA_MQPRIO_MODE]); - } - - if (tb[TCA_MQPRIO_SHAPER]) { - priv->flags |= TC_MQPRIO_F_SHAPER; - priv->shaper = *(u16 *)nla_data(tb[TCA_MQPRIO_SHAPER]); - } - - if (tb[TCA_MQPRIO_MIN_RATE64]) { - if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) - return -EINVAL; - i = 0; - nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64], - rem) { - if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) - return -EINVAL; - if (i >= qopt->num_tc) - break; - priv->min_rate[i] = *(u64 *)nla_data(attr); - i++; - } - priv->flags |= TC_MQPRIO_F_MIN_RATE; - } - - if (tb[TCA_MQPRIO_MAX_RATE64]) { - if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) - return -EINVAL; - i = 0; - nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64], - rem) { - if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) - return -EINVAL; - if (i >= qopt->num_tc) - break; - priv->max_rate[i] = *(u64 *)nla_data(attr); - i++; - } - priv->flags |= TC_MQPRIO_F_MAX_RATE; - } } /* pre-allocate qdisc, attachment can't fail */ From 98f6bbdfc0cecdc2a38cc4781df1363bb0576791 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 11 Apr 2023 21:01:51 +0300 Subject: [PATCH 075/513] net/sched: mqprio: add extack to mqprio_parse_nlattr() [ Upstream commit 57f21bf85400abadac0cb2a4db5de1d663f8863f ] Netlink attribute parsing in mqprio is a minesweeper game, with many options having the possibility of being passed incorrectly and the user being none the wiser. Try to make errors less sour by giving user space some information regarding what went wrong. Signed-off-by: Vladimir Oltean Reviewed-by: Ferenc Fejes Reviewed-by: Simon Horman Acked-by: Jamal Hadi Salim Signed-off-by: Jakub Kicinski Stable-dep-of: 6c58c8816abb ("net/sched: mqprio: Add length check for TCA_MQPRIO_{MAX/MIN}_RATE64") Signed-off-by: Sasha Levin --- net/sched/sch_mqprio.c | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index a5df5604e015..4ec222a5530d 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -131,7 +131,8 @@ static int parse_attr(struct nlattr *tb[], int maxtype, struct nlattr *nla, } static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt, - struct nlattr *opt) + struct nlattr *opt, + struct netlink_ext_ack *extack) { struct mqprio_sched *priv = qdisc_priv(sch); struct nlattr *tb[TCA_MQPRIO_MAX + 1]; @@ -143,8 +144,11 @@ static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt, if (err < 0) return err; - if (!qopt->hw) + if (!qopt->hw) { + NL_SET_ERR_MSG(extack, + "mqprio TCA_OPTIONS can only contain netlink attributes in hardware mode"); return -EINVAL; + } if (tb[TCA_MQPRIO_MODE]) { priv->flags |= TC_MQPRIO_F_MODE; @@ -157,13 +161,19 @@ static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt, } if (tb[TCA_MQPRIO_MIN_RATE64]) { - if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) + if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) { + NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MIN_RATE64], + "min_rate accepted only when shaper is in bw_rlimit mode"); return -EINVAL; + } i = 0; nla_for_each_nested(attr, tb[TCA_MQPRIO_MIN_RATE64], rem) { - if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) + if (nla_type(attr) != TCA_MQPRIO_MIN_RATE64) { + NL_SET_ERR_MSG_ATTR(extack, attr, + "Attribute type expected to be TCA_MQPRIO_MIN_RATE64"); return -EINVAL; + } if (i >= qopt->num_tc) break; priv->min_rate[i] = *(u64 *)nla_data(attr); @@ -173,13 +183,19 @@ static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt, } if (tb[TCA_MQPRIO_MAX_RATE64]) { - if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) + if (priv->shaper != TC_MQPRIO_SHAPER_BW_RATE) { + NL_SET_ERR_MSG_ATTR(extack, tb[TCA_MQPRIO_MAX_RATE64], + "max_rate accepted only when shaper is in bw_rlimit mode"); return -EINVAL; + } i = 0; nla_for_each_nested(attr, tb[TCA_MQPRIO_MAX_RATE64], rem) { - if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) + if (nla_type(attr) != TCA_MQPRIO_MAX_RATE64) { + NL_SET_ERR_MSG_ATTR(extack, attr, + "Attribute type expected to be TCA_MQPRIO_MAX_RATE64"); return -EINVAL; + } if (i >= qopt->num_tc) break; priv->max_rate[i] = *(u64 *)nla_data(attr); @@ -224,7 +240,7 @@ static int mqprio_init(struct Qdisc *sch, struct nlattr *opt, len = nla_len(opt) - NLA_ALIGN(sizeof(*qopt)); if (len > 0) { - err = mqprio_parse_nlattr(sch, qopt, opt); + err = mqprio_parse_nlattr(sch, qopt, opt, extack); if (err) return err; } From 95cf4fa31b0cebeef9cb98654386037077d06dcd Mon Sep 17 00:00:00 2001 From: Lin Ma Date: Tue, 25 Jul 2023 10:42:27 +0800 Subject: [PATCH 076/513] net/sched: mqprio: Add length check for TCA_MQPRIO_{MAX/MIN}_RATE64 [ Upstream commit 6c58c8816abb7b93b21fa3b1d0c1726402e5e568 ] The nla_for_each_nested parsing in function mqprio_parse_nlattr() does not check the length of the nested attribute. This can lead to an out-of-attribute read and allow a malformed nlattr (e.g., length 0) to be viewed as 8 byte integer and passed to priv->max_rate/min_rate. This patch adds the check based on nla_len() when check the nla_type(), which ensures that the length of these two attribute must equals sizeof(u64). Fixes: 4e8b86c06269 ("mqprio: Introduce new hardware offload mode and shaper in mqprio") Reviewed-by: Victor Nogueira Signed-off-by: Lin Ma Link: https://lore.kernel.org/r/20230725024227.426561-1-linma@zju.edu.cn Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/sched/sch_mqprio.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/net/sched/sch_mqprio.c b/net/sched/sch_mqprio.c index 4ec222a5530d..56d3dc5e95c7 100644 --- a/net/sched/sch_mqprio.c +++ b/net/sched/sch_mqprio.c @@ -174,6 +174,13 @@ static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt, "Attribute type expected to be TCA_MQPRIO_MIN_RATE64"); return -EINVAL; } + + if (nla_len(attr) != sizeof(u64)) { + NL_SET_ERR_MSG_ATTR(extack, attr, + "Attribute TCA_MQPRIO_MIN_RATE64 expected to have 8 bytes length"); + return -EINVAL; + } + if (i >= qopt->num_tc) break; priv->min_rate[i] = *(u64 *)nla_data(attr); @@ -196,6 +203,13 @@ static int mqprio_parse_nlattr(struct Qdisc *sch, struct tc_mqprio_qopt *qopt, "Attribute type expected to be TCA_MQPRIO_MAX_RATE64"); return -EINVAL; } + + if (nla_len(attr) != sizeof(u64)) { + NL_SET_ERR_MSG_ATTR(extack, attr, + "Attribute TCA_MQPRIO_MAX_RATE64 expected to have 8 bytes length"); + return -EINVAL; + } + if (i >= qopt->num_tc) break; priv->max_rate[i] = *(u64 *)nla_data(attr); From 42afa7ef6629ae237ba5fb6a4c60e8bf0b41206b Mon Sep 17 00:00:00 2001 From: Yuanjun Gong Date: Tue, 25 Jul 2023 11:27:26 +0800 Subject: [PATCH 077/513] benet: fix return value check in be_lancer_xmit_workarounds() [ Upstream commit 5c85f7065718a949902b238a6abd8fc907c5d3e0 ] in be_lancer_xmit_workarounds(), it should go to label 'tx_drop' if an unexpected value is returned by pskb_trim(). Fixes: 93040ae5cc8d ("be2net: Fix to trim skb for padded vlan packets to workaround an ASIC Bug") Signed-off-by: Yuanjun Gong Link: https://lore.kernel.org/r/20230725032726.15002-1-ruc_gongyuanjun@163.com Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- drivers/net/ethernet/emulex/benet/be_main.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 3ccb955eb6f2..c14a3dbd075c 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1139,7 +1139,8 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, (lancer_chip(adapter) || BE3_chip(adapter) || skb_vlan_tag_present(skb)) && is_ipv4_pkt(skb)) { ip = (struct iphdr *)ip_hdr(skb); - pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)); + if (unlikely(pskb_trim(skb, eth_hdr_len + ntohs(ip->tot_len)))) + goto tx_drop; } /* If vlan tag is already inlined in the packet, skip HW VLAN From df019bc1241e68ded3de2cc328e8c61c100d8740 Mon Sep 17 00:00:00 2001 From: Yuanjun Gong Date: Tue, 25 Jul 2023 14:48:10 +0800 Subject: [PATCH 078/513] tipc: check return value of pskb_trim() [ Upstream commit e46e06ffc6d667a89b979701288e2264f45e6a7b ] goto free_skb if an unexpected result is returned by pskb_tirm() in tipc_crypto_rcv_complete(). Fixes: fc1b6d6de220 ("tipc: introduce TIPC encryption & authentication") Signed-off-by: Yuanjun Gong Reviewed-by: Tung Nguyen Link: https://lore.kernel.org/r/20230725064810.5820-1-ruc_gongyuanjun@163.com Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- net/tipc/crypto.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/tipc/crypto.c b/net/tipc/crypto.c index 4243d2ab8adf..32447e8d94ac 100644 --- a/net/tipc/crypto.c +++ b/net/tipc/crypto.c @@ -1971,7 +1971,8 @@ static void tipc_crypto_rcv_complete(struct net *net, struct tipc_aead *aead, skb_reset_network_header(*skb); skb_pull(*skb, tipc_ehdr_size(ehdr)); - pskb_trim(*skb, (*skb)->len - aead->authsize); + if (pskb_trim(*skb, (*skb)->len - aead->authsize)) + goto free_skb; /* Validate TIPCv2 message */ if (unlikely(!tipc_msg_validate(skb))) { From 9dde876a4dc8cc8be8ed4ccf34c137723f968a3a Mon Sep 17 00:00:00 2001 From: Fedor Pchelkin Date: Wed, 26 Jul 2023 00:46:25 +0300 Subject: [PATCH 079/513] tipc: stop tipc crypto on failure in tipc_node_create [ Upstream commit de52e17326c3e9a719c9ead4adb03467b8fae0ef ] If tipc_link_bc_create() fails inside tipc_node_create() for a newly allocated tipc node then we should stop its tipc crypto and free the resources allocated with a call to tipc_crypto_start(). As the node ref is initialized to one to that point, just put the ref on tipc_link_bc_create() error case that would lead to tipc_node_free() be eventually executed and properly clean the node and its crypto resources. Found by Linux Verification Center (linuxtesting.org). Fixes: cb8092d70a6f ("tipc: move bc link creation back to tipc_node_create") Suggested-by: Xin Long Signed-off-by: Fedor Pchelkin Reviewed-by: Xin Long Link: https://lore.kernel.org/r/20230725214628.25246-1-pchelkin@ispras.ru Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- net/tipc/node.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/tipc/node.c b/net/tipc/node.c index 5e000fde8067..a9c5b6594889 100644 --- a/net/tipc/node.c +++ b/net/tipc/node.c @@ -583,7 +583,7 @@ struct tipc_node *tipc_node_create(struct net *net, u32 addr, u8 *peer_id, n->capabilities, &n->bc_entry.inputq1, &n->bc_entry.namedq, snd_l, &n->bc_entry.link)) { pr_warn("Broadcast rcv link creation failed, no memory\n"); - kfree(n); + tipc_node_put(n); n = NULL; goto exit; } From 6ab756a55e46ebc3944ac73d9c18f2321b0b150b Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Thu, 29 Jun 2023 09:07:37 +0300 Subject: [PATCH 080/513] RDMA/mlx4: Make check for invalid flags stricter [ Upstream commit d64b1ee12a168030fbb3e0aebf7bce49e9a07589 ] This code is trying to ensure that only the flags specified in the list are allowed. The problem is that ucmd->rx_hash_fields_mask is a u64 and the flags are an enum which is treated as a u32 in this context. That means the test doesn't check whether the highest 32 bits are zero. Fixes: 4d02ebd9bbbd ("IB/mlx4: Fix RSS hash fields restrictions") Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/233ed975-982d-422a-b498-410f71d8a101@moroto.mountain Signed-off-by: Leon Romanovsky Signed-off-by: Sasha Levin --- drivers/infiniband/hw/mlx4/qp.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index ec545b8858cc..43b2aad84591 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -530,15 +530,15 @@ static int set_qp_rss(struct mlx4_ib_dev *dev, struct mlx4_ib_rss *rss_ctx, return (-EOPNOTSUPP); } - if (ucmd->rx_hash_fields_mask & ~(MLX4_IB_RX_HASH_SRC_IPV4 | - MLX4_IB_RX_HASH_DST_IPV4 | - MLX4_IB_RX_HASH_SRC_IPV6 | - MLX4_IB_RX_HASH_DST_IPV6 | - MLX4_IB_RX_HASH_SRC_PORT_TCP | - MLX4_IB_RX_HASH_DST_PORT_TCP | - MLX4_IB_RX_HASH_SRC_PORT_UDP | - MLX4_IB_RX_HASH_DST_PORT_UDP | - MLX4_IB_RX_HASH_INNER)) { + if (ucmd->rx_hash_fields_mask & ~(u64)(MLX4_IB_RX_HASH_SRC_IPV4 | + MLX4_IB_RX_HASH_DST_IPV4 | + MLX4_IB_RX_HASH_SRC_IPV6 | + MLX4_IB_RX_HASH_DST_IPV6 | + MLX4_IB_RX_HASH_SRC_PORT_TCP | + MLX4_IB_RX_HASH_DST_PORT_TCP | + MLX4_IB_RX_HASH_SRC_PORT_UDP | + MLX4_IB_RX_HASH_DST_PORT_UDP | + MLX4_IB_RX_HASH_INNER)) { pr_debug("RX Hash fields_mask has unsupported mask (0x%llx)\n", ucmd->rx_hash_fields_mask); return (-EOPNOTSUPP); From 4e9d4a21616b8f7412035005cd32d0cc1ac4a8ed Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Fri, 7 Jul 2023 22:39:32 +0300 Subject: [PATCH 081/513] drm/msm/dpu: drop enum dpu_core_perf_data_bus_id [ Upstream commit e8383f5cf1b3573ce140a80bfbfd809278ab16d6 ] Drop the leftover of bus-client -> interconnect conversion, the enum dpu_core_perf_data_bus_id. Fixes: cb88482e2570 ("drm/msm/dpu: clean up references of DPU custom bus scaling") Reviewed-by: Konrad Dybcio Reviewed-by: Abhinav Kumar Signed-off-by: Dmitry Baryshkov Patchwork: https://patchwork.freedesktop.org/patch/546048/ Link: https://lore.kernel.org/r/20230707193942.3806526-2-dmitry.baryshkov@linaro.org Signed-off-by: Abhinav Kumar Signed-off-by: Sasha Levin --- drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h index cf4b9b5964c6..cd6c3518ba02 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h @@ -14,19 +14,6 @@ #define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000 -/** - * enum dpu_core_perf_data_bus_id - data bus identifier - * @DPU_CORE_PERF_DATA_BUS_ID_MNOC: DPU/MNOC data bus - * @DPU_CORE_PERF_DATA_BUS_ID_LLCC: MNOC/LLCC data bus - * @DPU_CORE_PERF_DATA_BUS_ID_EBI: LLCC/EBI data bus - */ -enum dpu_core_perf_data_bus_id { - DPU_CORE_PERF_DATA_BUS_ID_MNOC, - DPU_CORE_PERF_DATA_BUS_ID_LLCC, - DPU_CORE_PERF_DATA_BUS_ID_EBI, - DPU_CORE_PERF_DATA_BUS_ID_MAX, -}; - /** * struct dpu_core_perf_params - definition of performance parameters * @max_per_pipe_ib: maximum instantaneous bandwidth request From 5fbb5068d2bdc1f0ec454560b05dbce1d4a94d81 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Tue, 11 Jul 2023 10:54:07 -0700 Subject: [PATCH 082/513] drm/msm/adreno: Fix snapshot BINDLESS_DATA size [ Upstream commit bd846ceee9c478d0397428f02696602ba5eb264a ] The incorrect size was causing "CP | AHB bus error" when snapshotting the GPU state on a6xx gen4 (a660 family). Closes: https://gitlab.freedesktop.org/drm/msm/-/issues/26 Signed-off-by: Rob Clark Reviewed-by: Akhil P Oommen Fixes: 1707add81551 ("drm/msm/a6xx: Add a6xx gpu state") Patchwork: https://patchwork.freedesktop.org/patch/546763/ Signed-off-by: Sasha Levin --- drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h index 2fb58b7098e4..3bd2065a9d30 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.h @@ -200,7 +200,7 @@ static const struct a6xx_shader_block { SHADER(A6XX_SP_LB_3_DATA, 0x800), SHADER(A6XX_SP_LB_4_DATA, 0x800), SHADER(A6XX_SP_LB_5_DATA, 0x200), - SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x2000), + SHADER(A6XX_SP_CB_BINDLESS_DATA, 0x800), SHADER(A6XX_SP_CB_LEGACY_DATA, 0x280), SHADER(A6XX_SP_UAV_DATA, 0x80), SHADER(A6XX_SP_INST_TAG, 0x80), From fd6e50ec2c387a45acd290ba0aa85e74cc9eadbb Mon Sep 17 00:00:00 2001 From: Shiraz Saleem Date: Tue, 11 Jul 2023 12:52:51 -0500 Subject: [PATCH 083/513] RDMA/irdma: Add missing read barriers [ Upstream commit 4984eb51453ff7eddee9e5ce816145be39c0ec5c ] On code inspection, there are many instances in the driver where CEQE and AEQE fields written to by HW are read without guaranteeing that the polarity bit has been read and checked first. Add a read barrier to avoid reordering of loads on the CEQE/AEQE fields prior to checking the polarity bit. Fixes: 3f49d6842569 ("RDMA/irdma: Implement HW Admin Queue OPs") Signed-off-by: Shiraz Saleem Link: https://lore.kernel.org/r/20230711175253.1289-2-shiraz.saleem@intel.com Signed-off-by: Leon Romanovsky Signed-off-by: Sasha Levin --- drivers/infiniband/hw/irdma/ctrl.c | 9 ++++++++- drivers/infiniband/hw/irdma/puda.c | 6 ++++++ drivers/infiniband/hw/irdma/uk.c | 3 +++ 3 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c index 1ac7067e21be..ab195ae089cf 100644 --- a/drivers/infiniband/hw/irdma/ctrl.c +++ b/drivers/infiniband/hw/irdma/ctrl.c @@ -3395,6 +3395,9 @@ enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, if (polarity != ccq->cq_uk.polarity) return IRDMA_ERR_Q_EMPTY; + /* Ensure CEQE contents are read after valid bit is checked */ + dma_rmb(); + get_64bit_val(cqe, 8, &qp_ctx); cqp = (struct irdma_sc_cqp *)(unsigned long)qp_ctx; info->error = (bool)FIELD_GET(IRDMA_CQ_ERROR, temp); @@ -4046,13 +4049,17 @@ enum irdma_status_code irdma_sc_get_next_aeqe(struct irdma_sc_aeq *aeq, u8 polarity; aeqe = IRDMA_GET_CURRENT_AEQ_ELEM(aeq); - get_64bit_val(aeqe, 0, &compl_ctx); get_64bit_val(aeqe, 8, &temp); polarity = (u8)FIELD_GET(IRDMA_AEQE_VALID, temp); if (aeq->polarity != polarity) return IRDMA_ERR_Q_EMPTY; + /* Ensure AEQE contents are read after valid bit is checked */ + dma_rmb(); + + get_64bit_val(aeqe, 0, &compl_ctx); + print_hex_dump_debug("WQE: AEQ_ENTRY WQE", DUMP_PREFIX_OFFSET, 16, 8, aeqe, 16, false); diff --git a/drivers/infiniband/hw/irdma/puda.c b/drivers/infiniband/hw/irdma/puda.c index 58e7d875643b..197eba5eb78f 100644 --- a/drivers/infiniband/hw/irdma/puda.c +++ b/drivers/infiniband/hw/irdma/puda.c @@ -235,6 +235,9 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info) if (valid_bit != cq_uk->polarity) return IRDMA_ERR_Q_EMPTY; + /* Ensure CQE contents are read after valid bit is checked */ + dma_rmb(); + if (cq->dev->hw_attrs.uk_attrs.hw_rev >= IRDMA_GEN_2) ext_valid = (bool)FIELD_GET(IRDMA_CQ_EXTCQE, qword3); @@ -248,6 +251,9 @@ irdma_puda_poll_info(struct irdma_sc_cq *cq, struct irdma_puda_cmpl_info *info) if (polarity != cq_uk->polarity) return IRDMA_ERR_Q_EMPTY; + /* Ensure ext CQE contents are read after ext valid bit is checked */ + dma_rmb(); + IRDMA_RING_MOVE_HEAD_NOCHECK(cq_uk->cq_ring); if (!IRDMA_RING_CURRENT_HEAD(cq_uk->cq_ring)) cq_uk->polarity = !cq_uk->polarity; diff --git a/drivers/infiniband/hw/irdma/uk.c b/drivers/infiniband/hw/irdma/uk.c index a348f0c010ab..4b00a9adbe3a 100644 --- a/drivers/infiniband/hw/irdma/uk.c +++ b/drivers/infiniband/hw/irdma/uk.c @@ -1549,6 +1549,9 @@ void irdma_uk_clean_cq(void *q, struct irdma_cq_uk *cq) if (polarity != temp) break; + /* Ensure CQE contents are read after valid bit is checked */ + dma_rmb(); + get_64bit_val(cqe, 8, &comp_ctx); if ((void *)(unsigned long)comp_ctx == q) set_64bit_val(cqe, 8, 0); From bf0f9f65b7fe36ea9d2e23263dcefc90255d7b1f Mon Sep 17 00:00:00 2001 From: Shiraz Saleem Date: Tue, 11 Jul 2023 12:52:52 -0500 Subject: [PATCH 084/513] RDMA/irdma: Fix data race on CQP completion stats [ Upstream commit f2c3037811381f9149243828c7eb9a1631df9f9c ] CQP completion statistics is read lockesly in irdma_wait_event and irdma_check_cqp_progress while it can be updated in the completion thread irdma_sc_ccq_get_cqe_info on another CPU as KCSAN reports. Make completion statistics an atomic variable to reflect coherent updates to it. This will also avoid load/store tearing logic bug potentially possible by compiler optimizations. [77346.170861] BUG: KCSAN: data-race in irdma_handle_cqp_op [irdma] / irdma_sc_ccq_get_cqe_info [irdma] [77346.171383] write to 0xffff8a3250b108e0 of 8 bytes by task 9544 on cpu 4: [77346.171483] irdma_sc_ccq_get_cqe_info+0x27a/0x370 [irdma] [77346.171658] irdma_cqp_ce_handler+0x164/0x270 [irdma] [77346.171835] cqp_compl_worker+0x1b/0x20 [irdma] [77346.172009] process_one_work+0x4d1/0xa40 [77346.172024] worker_thread+0x319/0x700 [77346.172037] kthread+0x180/0x1b0 [77346.172054] ret_from_fork+0x22/0x30 [77346.172136] read to 0xffff8a3250b108e0 of 8 bytes by task 9838 on cpu 2: [77346.172234] irdma_handle_cqp_op+0xf4/0x4b0 [irdma] [77346.172413] irdma_cqp_aeq_cmd+0x75/0xa0 [irdma] [77346.172592] irdma_create_aeq+0x390/0x45a [irdma] [77346.172769] irdma_rt_init_hw.cold+0x212/0x85d [irdma] [77346.172944] irdma_probe+0x54f/0x620 [irdma] [77346.173122] auxiliary_bus_probe+0x66/0xa0 [77346.173137] really_probe+0x140/0x540 [77346.173154] __driver_probe_device+0xc7/0x220 [77346.173173] driver_probe_device+0x5f/0x140 [77346.173190] __driver_attach+0xf0/0x2c0 [77346.173208] bus_for_each_dev+0xa8/0xf0 [77346.173225] driver_attach+0x29/0x30 [77346.173240] bus_add_driver+0x29c/0x2f0 [77346.173255] driver_register+0x10f/0x1a0 [77346.173272] __auxiliary_driver_register+0xbc/0x140 [77346.173287] irdma_init_module+0x55/0x1000 [irdma] [77346.173460] do_one_initcall+0x7d/0x410 [77346.173475] do_init_module+0x81/0x2c0 [77346.173491] load_module+0x1232/0x12c0 [77346.173506] __do_sys_finit_module+0x101/0x180 [77346.173522] __x64_sys_finit_module+0x3c/0x50 [77346.173538] do_syscall_64+0x39/0x90 [77346.173553] entry_SYSCALL_64_after_hwframe+0x63/0xcd [77346.173634] value changed: 0x0000000000000094 -> 0x0000000000000095 Fixes: 915cc7ac0f8e ("RDMA/irdma: Add miscellaneous utility definitions") Signed-off-by: Shiraz Saleem Link: https://lore.kernel.org/r/20230711175253.1289-3-shiraz.saleem@intel.com Signed-off-by: Leon Romanovsky Signed-off-by: Sasha Levin --- drivers/infiniband/hw/irdma/ctrl.c | 22 +++++++------- drivers/infiniband/hw/irdma/defs.h | 46 ++++++++++++++--------------- drivers/infiniband/hw/irdma/type.h | 2 ++ drivers/infiniband/hw/irdma/utils.c | 2 +- 4 files changed, 36 insertions(+), 36 deletions(-) diff --git a/drivers/infiniband/hw/irdma/ctrl.c b/drivers/infiniband/hw/irdma/ctrl.c index ab195ae089cf..ad14c2404e94 100644 --- a/drivers/infiniband/hw/irdma/ctrl.c +++ b/drivers/infiniband/hw/irdma/ctrl.c @@ -2741,13 +2741,13 @@ irdma_sc_cq_modify(struct irdma_sc_cq *cq, struct irdma_modify_cq_info *info, */ void irdma_check_cqp_progress(struct irdma_cqp_timeout *timeout, struct irdma_sc_dev *dev) { - if (timeout->compl_cqp_cmds != dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]) { - timeout->compl_cqp_cmds = dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]; + u64 completed_ops = atomic64_read(&dev->cqp->completed_ops); + + if (timeout->compl_cqp_cmds != completed_ops) { + timeout->compl_cqp_cmds = completed_ops; timeout->count = 0; - } else { - if (dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] != - timeout->compl_cqp_cmds) - timeout->count++; + } else if (timeout->compl_cqp_cmds != dev->cqp->requested_ops) { + timeout->count++; } } @@ -2790,7 +2790,7 @@ static enum irdma_status_code irdma_cqp_poll_registers(struct irdma_sc_cqp *cqp, if (newtail != tail) { /* SUCCESS */ IRDMA_RING_MOVE_TAIL(cqp->sq_ring); - cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++; + atomic64_inc(&cqp->completed_ops); return 0; } udelay(cqp->dev->hw_attrs.max_sleep_count); @@ -3152,8 +3152,8 @@ enum irdma_status_code irdma_sc_cqp_init(struct irdma_sc_cqp *cqp, info->dev->cqp = cqp; IRDMA_RING_INIT(cqp->sq_ring, cqp->sq_size); - cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS] = 0; - cqp->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS] = 0; + cqp->requested_ops = 0; + atomic64_set(&cqp->completed_ops, 0); /* for the cqp commands backlog. */ INIT_LIST_HEAD(&cqp->dev->cqp_cmd_head); @@ -3306,7 +3306,7 @@ __le64 *irdma_sc_cqp_get_next_send_wqe_idx(struct irdma_sc_cqp *cqp, u64 scratch if (ret_code) return NULL; - cqp->dev->cqp_cmd_stats[IRDMA_OP_REQ_CMDS]++; + cqp->requested_ops++; if (!*wqe_idx) cqp->polarity = !cqp->polarity; wqe = cqp->sq_base[*wqe_idx].elem; @@ -3432,7 +3432,7 @@ enum irdma_status_code irdma_sc_ccq_get_cqe_info(struct irdma_sc_cq *ccq, dma_wmb(); /* make sure shadow area is updated before moving tail */ IRDMA_RING_MOVE_TAIL(cqp->sq_ring); - ccq->dev->cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]++; + atomic64_inc(&cqp->completed_ops); return ret_code; } diff --git a/drivers/infiniband/hw/irdma/defs.h b/drivers/infiniband/hw/irdma/defs.h index b8c10a6ccede..afd16a93ac69 100644 --- a/drivers/infiniband/hw/irdma/defs.h +++ b/drivers/infiniband/hw/irdma/defs.h @@ -190,32 +190,30 @@ enum irdma_cqp_op_type { IRDMA_OP_MANAGE_VF_PBLE_BP = 25, IRDMA_OP_QUERY_FPM_VAL = 26, IRDMA_OP_COMMIT_FPM_VAL = 27, - IRDMA_OP_REQ_CMDS = 28, - IRDMA_OP_CMPL_CMDS = 29, - IRDMA_OP_AH_CREATE = 30, - IRDMA_OP_AH_MODIFY = 31, - IRDMA_OP_AH_DESTROY = 32, - IRDMA_OP_MC_CREATE = 33, - IRDMA_OP_MC_DESTROY = 34, - IRDMA_OP_MC_MODIFY = 35, - IRDMA_OP_STATS_ALLOCATE = 36, - IRDMA_OP_STATS_FREE = 37, - IRDMA_OP_STATS_GATHER = 38, - IRDMA_OP_WS_ADD_NODE = 39, - IRDMA_OP_WS_MODIFY_NODE = 40, - IRDMA_OP_WS_DELETE_NODE = 41, - IRDMA_OP_WS_FAILOVER_START = 42, - IRDMA_OP_WS_FAILOVER_COMPLETE = 43, - IRDMA_OP_SET_UP_MAP = 44, - IRDMA_OP_GEN_AE = 45, - IRDMA_OP_QUERY_RDMA_FEATURES = 46, - IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY = 47, - IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 48, - IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 49, - IRDMA_OP_CQ_MODIFY = 50, + IRDMA_OP_AH_CREATE = 28, + IRDMA_OP_AH_MODIFY = 29, + IRDMA_OP_AH_DESTROY = 30, + IRDMA_OP_MC_CREATE = 31, + IRDMA_OP_MC_DESTROY = 32, + IRDMA_OP_MC_MODIFY = 33, + IRDMA_OP_STATS_ALLOCATE = 34, + IRDMA_OP_STATS_FREE = 35, + IRDMA_OP_STATS_GATHER = 36, + IRDMA_OP_WS_ADD_NODE = 37, + IRDMA_OP_WS_MODIFY_NODE = 38, + IRDMA_OP_WS_DELETE_NODE = 39, + IRDMA_OP_WS_FAILOVER_START = 40, + IRDMA_OP_WS_FAILOVER_COMPLETE = 41, + IRDMA_OP_SET_UP_MAP = 42, + IRDMA_OP_GEN_AE = 43, + IRDMA_OP_QUERY_RDMA_FEATURES = 44, + IRDMA_OP_ALLOC_LOCAL_MAC_ENTRY = 45, + IRDMA_OP_ADD_LOCAL_MAC_ENTRY = 46, + IRDMA_OP_DELETE_LOCAL_MAC_ENTRY = 47, + IRDMA_OP_CQ_MODIFY = 48, /* Must be last entry*/ - IRDMA_MAX_CQP_OPS = 51, + IRDMA_MAX_CQP_OPS = 49, }; /* CQP SQ WQES */ diff --git a/drivers/infiniband/hw/irdma/type.h b/drivers/infiniband/hw/irdma/type.h index 1241e5988c10..8b75e2610e5b 100644 --- a/drivers/infiniband/hw/irdma/type.h +++ b/drivers/infiniband/hw/irdma/type.h @@ -411,6 +411,8 @@ struct irdma_sc_cqp { struct irdma_dcqcn_cc_params dcqcn_params; __le64 *host_ctx; u64 *scratch_array; + u64 requested_ops; + atomic64_t completed_ops; u32 cqp_id; u32 sq_size; u32 hw_sq_size; diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index 1d9280d46d08..d96779996e02 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -567,7 +567,7 @@ static enum irdma_status_code irdma_wait_event(struct irdma_pci_f *rf, bool cqp_error = false; enum irdma_status_code err_code = 0; - cqp_timeout.compl_cqp_cmds = rf->sc_dev.cqp_cmd_stats[IRDMA_OP_CMPL_CMDS]; + cqp_timeout.compl_cqp_cmds = atomic64_read(&rf->sc_dev.cqp->completed_ops); do { irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); if (wait_event_timeout(cqp_request->waitq, From c5b5dbcbf91f769b8eb25f88e32a1522f920f37a Mon Sep 17 00:00:00 2001 From: Shiraz Saleem Date: Tue, 11 Jul 2023 12:52:53 -0500 Subject: [PATCH 085/513] RDMA/irdma: Fix data race on CQP request done [ Upstream commit f0842bb3d38863777e3454da5653d80b5fde6321 ] KCSAN detects a data race on cqp_request->request_done memory location which is accessed locklessly in irdma_handle_cqp_op while being updated in irdma_cqp_ce_handler. Annotate lockless intent with READ_ONCE/WRITE_ONCE to avoid any compiler optimizations like load fusing and/or KCSAN warning. [222808.417128] BUG: KCSAN: data-race in irdma_cqp_ce_handler [irdma] / irdma_wait_event [irdma] [222808.417532] write to 0xffff8e44107019dc of 1 bytes by task 29658 on cpu 5: [222808.417610] irdma_cqp_ce_handler+0x21e/0x270 [irdma] [222808.417725] cqp_compl_worker+0x1b/0x20 [irdma] [222808.417827] process_one_work+0x4d1/0xa40 [222808.417835] worker_thread+0x319/0x700 [222808.417842] kthread+0x180/0x1b0 [222808.417852] ret_from_fork+0x22/0x30 [222808.417918] read to 0xffff8e44107019dc of 1 bytes by task 29688 on cpu 1: [222808.417995] irdma_wait_event+0x1e2/0x2c0 [irdma] [222808.418099] irdma_handle_cqp_op+0xae/0x170 [irdma] [222808.418202] irdma_cqp_cq_destroy_cmd+0x70/0x90 [irdma] [222808.418308] irdma_puda_dele_rsrc+0x46d/0x4d0 [irdma] [222808.418411] irdma_rt_deinit_hw+0x179/0x1d0 [irdma] [222808.418514] irdma_ib_dealloc_device+0x11/0x40 [irdma] [222808.418618] ib_dealloc_device+0x2a/0x120 [ib_core] [222808.418823] __ib_unregister_device+0xde/0x100 [ib_core] [222808.418981] ib_unregister_device+0x22/0x40 [ib_core] [222808.419142] irdma_ib_unregister_device+0x70/0x90 [irdma] [222808.419248] i40iw_close+0x6f/0xc0 [irdma] [222808.419352] i40e_client_device_unregister+0x14a/0x180 [i40e] [222808.419450] i40iw_remove+0x21/0x30 [irdma] [222808.419554] auxiliary_bus_remove+0x31/0x50 [222808.419563] device_remove+0x69/0xb0 [222808.419572] device_release_driver_internal+0x293/0x360 [222808.419582] driver_detach+0x7c/0xf0 [222808.419592] bus_remove_driver+0x8c/0x150 [222808.419600] driver_unregister+0x45/0x70 [222808.419610] auxiliary_driver_unregister+0x16/0x30 [222808.419618] irdma_exit_module+0x18/0x1e [irdma] [222808.419733] __do_sys_delete_module.constprop.0+0x1e2/0x310 [222808.419745] __x64_sys_delete_module+0x1b/0x30 [222808.419755] do_syscall_64+0x39/0x90 [222808.419763] entry_SYSCALL_64_after_hwframe+0x63/0xcd [222808.419829] value changed: 0x01 -> 0x03 Fixes: 915cc7ac0f8e ("RDMA/irdma: Add miscellaneous utility definitions") Signed-off-by: Shiraz Saleem Link: https://lore.kernel.org/r/20230711175253.1289-4-shiraz.saleem@intel.com Signed-off-by: Leon Romanovsky Signed-off-by: Sasha Levin --- drivers/infiniband/hw/irdma/hw.c | 2 +- drivers/infiniband/hw/irdma/main.h | 2 +- drivers/infiniband/hw/irdma/utils.c | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c index 2159470d7f7f..0221200e1654 100644 --- a/drivers/infiniband/hw/irdma/hw.c +++ b/drivers/infiniband/hw/irdma/hw.c @@ -2084,7 +2084,7 @@ void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) cqp_request->compl_info.error = info.error; if (cqp_request->waiting) { - cqp_request->request_done = true; + WRITE_ONCE(cqp_request->request_done, true); wake_up(&cqp_request->waitq); irdma_put_cqp_request(&rf->cqp, cqp_request); } else { diff --git a/drivers/infiniband/hw/irdma/main.h b/drivers/infiniband/hw/irdma/main.h index 454b4b370386..f2e2bc50c6f7 100644 --- a/drivers/infiniband/hw/irdma/main.h +++ b/drivers/infiniband/hw/irdma/main.h @@ -160,8 +160,8 @@ struct irdma_cqp_request { void (*callback_fcn)(struct irdma_cqp_request *cqp_request); void *param; struct irdma_cqp_compl_info compl_info; + bool request_done; /* READ/WRITE_ONCE macros operate on it */ bool waiting:1; - bool request_done:1; bool dynamic:1; }; diff --git a/drivers/infiniband/hw/irdma/utils.c b/drivers/infiniband/hw/irdma/utils.c index d96779996e02..a47eedb6df82 100644 --- a/drivers/infiniband/hw/irdma/utils.c +++ b/drivers/infiniband/hw/irdma/utils.c @@ -481,7 +481,7 @@ void irdma_free_cqp_request(struct irdma_cqp *cqp, if (cqp_request->dynamic) { kfree(cqp_request); } else { - cqp_request->request_done = false; + WRITE_ONCE(cqp_request->request_done, false); cqp_request->callback_fcn = NULL; cqp_request->waiting = false; @@ -515,7 +515,7 @@ irdma_free_pending_cqp_request(struct irdma_cqp *cqp, { if (cqp_request->waiting) { cqp_request->compl_info.error = true; - cqp_request->request_done = true; + WRITE_ONCE(cqp_request->request_done, true); wake_up(&cqp_request->waitq); } wait_event_timeout(cqp->remove_wq, @@ -571,7 +571,7 @@ static enum irdma_status_code irdma_wait_event(struct irdma_pci_f *rf, do { irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); if (wait_event_timeout(cqp_request->waitq, - cqp_request->request_done, + READ_ONCE(cqp_request->request_done), msecs_to_jiffies(CQP_COMPL_WAIT_TIME_MS))) break; From 3ad5f655eb8a61e6a4f4322983a942d2276e17d4 Mon Sep 17 00:00:00 2001 From: Thomas Bogendoerfer Date: Thu, 13 Jul 2023 16:16:58 +0200 Subject: [PATCH 086/513] RDMA/mthca: Fix crash when polling CQ for shared QPs [ Upstream commit dc52aadbc1849cbe3fcf6bc54d35f6baa396e0a1 ] Commit 21c2fe94abb2 ("RDMA/mthca: Combine special QP struct with mthca QP") introduced a new struct mthca_sqp which doesn't contain struct mthca_qp any longer. Placing a pointer of this new struct into qptable leads to crashes, because mthca_poll_one() expects a qp pointer. Fix this by putting the correct pointer into qptable. Fixes: 21c2fe94abb2 ("RDMA/mthca: Combine special QP struct with mthca QP") Signed-off-by: Thomas Bogendoerfer Link: https://lore.kernel.org/r/20230713141658.9426-1-tbogendoerfer@suse.de Signed-off-by: Leon Romanovsky Signed-off-by: Sasha Levin --- drivers/infiniband/hw/mthca/mthca_qp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c index 69bba0ef4a5d..53f43649f7d0 100644 --- a/drivers/infiniband/hw/mthca/mthca_qp.c +++ b/drivers/infiniband/hw/mthca/mthca_qp.c @@ -1393,7 +1393,7 @@ int mthca_alloc_sqp(struct mthca_dev *dev, if (mthca_array_get(&dev->qp_table.qp, mqpn)) err = -EBUSY; else - mthca_array_set(&dev->qp_table.qp, mqpn, qp->sqp); + mthca_array_set(&dev->qp_table.qp, mqpn, qp); spin_unlock_irq(&dev->qp_table.lock); if (err) From b79a0e71d6e8692e0b6da05f8aaa7d69191cf7e7 Mon Sep 17 00:00:00 2001 From: Kashyap Desai Date: Fri, 14 Jul 2023 01:22:48 -0700 Subject: [PATCH 087/513] RDMA/bnxt_re: Prevent handling any completions after qp destroy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit b5bbc6551297447d3cca55cf907079e206e9cd82 ] HW may generate completions that indicates QP is destroyed. Driver should not be scheduling any more completion handlers for this QP, after the QP is destroyed. Since CQs are active during the QP destroy, driver may still schedule completion handlers. This can cause a race where the destroy_cq and poll_cq running simultaneously. Snippet of kernel panic while doing bnxt_re driver load unload in loop. This indicates a poll after the CQ is freed.  [77786.481636] Call Trace: [77786.481640]   [77786.481644]  bnxt_re_poll_cq+0x14a/0x620 [bnxt_re] [77786.481658]  ? kvm_clock_read+0x14/0x30 [77786.481693]  __ib_process_cq+0x57/0x190 [ib_core] [77786.481728]  ib_cq_poll_work+0x26/0x80 [ib_core] [77786.481761]  process_one_work+0x1e5/0x3f0 [77786.481768]  worker_thread+0x50/0x3a0 [77786.481785]  ? __pfx_worker_thread+0x10/0x10 [77786.481790]  kthread+0xe2/0x110 [77786.481794]  ? __pfx_kthread+0x10/0x10 [77786.481797]  ret_from_fork+0x2c/0x50 To avoid this, complete all completion handlers before returning the destroy QP. If free_cq is called soon after destroy_qp, IB stack will cancel the CQ work before invoking the destroy_cq verb and this will prevent any race mentioned. Fixes: 1ac5a4047975 ("RDMA/bnxt_re: Add bnxt_re RoCE driver") Signed-off-by: Kashyap Desai Signed-off-by: Selvin Xavier Link: https://lore.kernel.org/r/1689322969-25402-2-git-send-email-selvin.xavier@broadcom.com Signed-off-by: Leon Romanovsky Signed-off-by: Sasha Levin --- drivers/infiniband/hw/bnxt_re/ib_verbs.c | 12 ++++++++++++ drivers/infiniband/hw/bnxt_re/qplib_fp.c | 18 ++++++++++++++++++ drivers/infiniband/hw/bnxt_re/qplib_fp.h | 1 + 3 files changed, 31 insertions(+) diff --git a/drivers/infiniband/hw/bnxt_re/ib_verbs.c b/drivers/infiniband/hw/bnxt_re/ib_verbs.c index 843d0b5d99ac..87ee616e6938 100644 --- a/drivers/infiniband/hw/bnxt_re/ib_verbs.c +++ b/drivers/infiniband/hw/bnxt_re/ib_verbs.c @@ -792,7 +792,10 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp) int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) { struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); + struct bnxt_qplib_qp *qplib_qp = &qp->qplib_qp; struct bnxt_re_dev *rdev = qp->rdev; + struct bnxt_qplib_nq *scq_nq = NULL; + struct bnxt_qplib_nq *rcq_nq = NULL; unsigned int flags; int rc; @@ -826,6 +829,15 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata) ib_umem_release(qp->rumem); ib_umem_release(qp->sumem); + /* Flush all the entries of notification queue associated with + * given qp. + */ + scq_nq = qplib_qp->scq->nq; + rcq_nq = qplib_qp->rcq->nq; + bnxt_re_synchronize_nq(scq_nq); + if (scq_nq != rcq_nq) + bnxt_re_synchronize_nq(rcq_nq); + return 0; } diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.c b/drivers/infiniband/hw/bnxt_re/qplib_fp.c index d44b6a5c90b5..f1aa3e19b6de 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.c +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.c @@ -386,6 +386,24 @@ static void bnxt_qplib_service_nq(struct tasklet_struct *t) spin_unlock_bh(&hwq->lock); } +/* bnxt_re_synchronize_nq - self polling notification queue. + * @nq - notification queue pointer + * + * This function will start polling entries of a given notification queue + * for all pending entries. + * This function is useful to synchronize notification entries while resources + * are going away. + */ + +void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq) +{ + int budget = nq->budget; + + nq->budget = nq->hwq.max_elements; + bnxt_qplib_service_nq(&nq->nq_tasklet); + nq->budget = budget; +} + static irqreturn_t bnxt_qplib_nq_irq(int irq, void *dev_instance) { struct bnxt_qplib_nq *nq = dev_instance; diff --git a/drivers/infiniband/hw/bnxt_re/qplib_fp.h b/drivers/infiniband/hw/bnxt_re/qplib_fp.h index f859710f9a7f..49d89c080827 100644 --- a/drivers/infiniband/hw/bnxt_re/qplib_fp.h +++ b/drivers/infiniband/hw/bnxt_re/qplib_fp.h @@ -548,6 +548,7 @@ int bnxt_qplib_process_flush_list(struct bnxt_qplib_cq *cq, struct bnxt_qplib_cqe *cqe, int num_cqes); void bnxt_qplib_flush_cqn_wq(struct bnxt_qplib_qp *qp); +void bnxt_re_synchronize_nq(struct bnxt_qplib_nq *nq); static inline void *bnxt_qplib_get_swqe(struct bnxt_qplib_q *que, u32 *swq_idx) { From 5ec0e4deee5b5f1dbfaae8d30c2dc8ed600b6d74 Mon Sep 17 00:00:00 2001 From: Gaosheng Cui Date: Mon, 17 Jul 2023 09:47:38 +0800 Subject: [PATCH 088/513] drm/msm: Fix IS_ERR_OR_NULL() vs NULL check in a5xx_submit_in_rb() [ Upstream commit 6e8a996563ecbe68e49c49abd4aaeef69f11f2dc ] The msm_gem_get_vaddr() returns an ERR_PTR() on failure, and a null is catastrophic here, so we should use IS_ERR_OR_NULL() to check the return value. Fixes: 6a8bd08d0465 ("drm/msm: add sudo flag to submit ioctl") Signed-off-by: Gaosheng Cui Reviewed-by: Dmitry Baryshkov Reviewed-by: Abhinav Kumar Reviewed-by: Akhil P Oommen Patchwork: https://patchwork.freedesktop.org/patch/547712/ Signed-off-by: Rob Clark Signed-off-by: Sasha Levin --- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index ef62900b0612..e9c8111122bd 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -90,7 +90,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit * since we've already mapped it once in * submit_reloc() */ - if (WARN_ON(!ptr)) + if (WARN_ON(IS_ERR_OR_NULL(ptr))) return; for (i = 0; i < dwords; i++) { From 2861b33820f9bbc95fd6056e9434ccb9ec2bf277 Mon Sep 17 00:00:00 2001 From: Matus Gajdos Date: Wed, 19 Jul 2023 18:47:29 +0200 Subject: [PATCH 089/513] ASoC: fsl_spdif: Silence output on stop [ Upstream commit 0e4c2b6b0c4a4b4014d9424c27e5e79d185229c5 ] Clear TX registers on stop to prevent the SPDIF interface from sending last written word over and over again. Fixes: a2388a498ad2 ("ASoC: fsl: Add S/PDIF CPU DAI driver") Signed-off-by: Matus Gajdos Reviewed-by: Fabio Estevam Link: https://lore.kernel.org/r/20230719164729.19969-1-matuszpd@gmail.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/fsl/fsl_spdif.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/soc/fsl/fsl_spdif.c b/sound/soc/fsl/fsl_spdif.c index 8b5c3ba48516..5b107f2555dd 100644 --- a/sound/soc/fsl/fsl_spdif.c +++ b/sound/soc/fsl/fsl_spdif.c @@ -666,6 +666,8 @@ static int fsl_spdif_trigger(struct snd_pcm_substream *substream, case SNDRV_PCM_TRIGGER_PAUSE_PUSH: regmap_update_bits(regmap, REG_SPDIF_SCR, dmaen, 0); regmap_update_bits(regmap, REG_SPDIF_SIE, intr, 0); + regmap_write(regmap, REG_SPDIF_STL, 0x0); + regmap_write(regmap, REG_SPDIF_STR, 0x0); break; default: return -EINVAL; From 4b9f3ef1f3eb1dbb8b4056bba2ad401a2f72a3fd Mon Sep 17 00:00:00 2001 From: Bart Van Assche Date: Thu, 6 Jul 2023 13:14:12 -0700 Subject: [PATCH 090/513] block: Fix a source code comment in include/uapi/linux/blkzoned.h [ Upstream commit e0933b526fbfd937c4a8f4e35fcdd49f0e22d411 ] Fix the symbolic names for zone conditions in the blkzoned.h header file. Cc: Hannes Reinecke Cc: Damien Le Moal Fixes: 6a0cb1bc106f ("block: Implement support for zoned block devices") Signed-off-by: Bart Van Assche Reviewed-by: Damien Le Moal Link: https://lore.kernel.org/r/20230706201422.3987341-1-bvanassche@acm.org Signed-off-by: Jens Axboe Signed-off-by: Sasha Levin --- include/uapi/linux/blkzoned.h | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h index 656a326821a2..321965feee35 100644 --- a/include/uapi/linux/blkzoned.h +++ b/include/uapi/linux/blkzoned.h @@ -51,13 +51,13 @@ enum blk_zone_type { * * The Zone Condition state machine in the ZBC/ZAC standards maps the above * deinitions as: - * - ZC1: Empty | BLK_ZONE_EMPTY + * - ZC1: Empty | BLK_ZONE_COND_EMPTY * - ZC2: Implicit Open | BLK_ZONE_COND_IMP_OPEN * - ZC3: Explicit Open | BLK_ZONE_COND_EXP_OPEN - * - ZC4: Closed | BLK_ZONE_CLOSED - * - ZC5: Full | BLK_ZONE_FULL - * - ZC6: Read Only | BLK_ZONE_READONLY - * - ZC7: Offline | BLK_ZONE_OFFLINE + * - ZC4: Closed | BLK_ZONE_COND_CLOSED + * - ZC5: Full | BLK_ZONE_COND_FULL + * - ZC6: Read Only | BLK_ZONE_COND_READONLY + * - ZC7: Offline | BLK_ZONE_COND_OFFLINE * * Conditions 0x5 to 0xC are reserved by the current ZBC/ZAC spec and should * be considered invalid. From 2e321ee96f886440b5959c63df48fa3cda584388 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Sat, 8 Jul 2023 17:21:51 +0800 Subject: [PATCH 091/513] dm raid: fix missing reconfig_mutex unlock in raid_ctr() error paths [ Upstream commit bae3028799dc4f1109acc4df37c8ff06f2d8f1a0 ] In the error paths 'bad_stripe_cache' and 'bad_check_reshape', 'reconfig_mutex' is still held after raid_ctr() returns. Fixes: 9dbd1aa3a81c ("dm raid: add reshaping support to the target") Signed-off-by: Yu Kuai Signed-off-by: Mike Snitzer Signed-off-by: Sasha Levin --- drivers/md/dm-raid.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index eba277bb8a1f..d3ac3b894bdf 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3278,15 +3278,19 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */ if (rs_is_raid456(rs)) { r = rs_set_raid456_stripe_cache(rs); - if (r) + if (r) { + mddev_unlock(&rs->md); goto bad_stripe_cache; + } } /* Now do an early reshape check */ if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { r = rs_check_reshape(rs); - if (r) + if (r) { + mddev_unlock(&rs->md); goto bad_check_reshape; + } /* Restore new, ctr requested layout to perform check */ rs_config_restore(rs, &rs_layout); @@ -3295,6 +3299,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) r = rs->md.pers->check_reshape(&rs->md); if (r) { ti->error = "Reshape check failed"; + mddev_unlock(&rs->md); goto bad_check_reshape; } } From 0c4db5a04d4ffd9f117a413b90845e9dc734a643 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Sat, 8 Jul 2023 17:21:52 +0800 Subject: [PATCH 092/513] dm raid: clean up four equivalent goto tags in raid_ctr() [ Upstream commit e74c874eabe2e9173a8fbdad616cd89c70eb8ffd ] There are four equivalent goto tags in raid_ctr(), clean them up to use just one. There is no functional change and this is preparation to fix raid_ctr()'s unprotected md_stop(). Signed-off-by: Yu Kuai Signed-off-by: Mike Snitzer Stable-dep-of: 7d5fff8982a2 ("dm raid: protect md_stop() with 'reconfig_mutex'") Signed-off-by: Sasha Levin --- drivers/md/dm-raid.c | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index d3ac3b894bdf..6d4e287350a3 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3258,8 +3258,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) r = md_start(&rs->md); if (r) { ti->error = "Failed to start raid array"; - mddev_unlock(&rs->md); - goto bad_md_start; + goto bad_unlock; } /* If raid4/5/6 journal mode explicitly requested (only possible with journal dev) -> set it */ @@ -3267,8 +3266,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) r = r5c_journal_mode_set(&rs->md, rs->journal_dev.mode); if (r) { ti->error = "Failed to set raid4/5/6 journal mode"; - mddev_unlock(&rs->md); - goto bad_journal_mode_set; + goto bad_unlock; } } @@ -3278,19 +3276,15 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) /* Try to adjust the raid4/5/6 stripe cache size to the stripe size */ if (rs_is_raid456(rs)) { r = rs_set_raid456_stripe_cache(rs); - if (r) { - mddev_unlock(&rs->md); - goto bad_stripe_cache; - } + if (r) + goto bad_unlock; } /* Now do an early reshape check */ if (test_bit(RT_FLAG_RESHAPE_RS, &rs->runtime_flags)) { r = rs_check_reshape(rs); - if (r) { - mddev_unlock(&rs->md); - goto bad_check_reshape; - } + if (r) + goto bad_unlock; /* Restore new, ctr requested layout to perform check */ rs_config_restore(rs, &rs_layout); @@ -3299,8 +3293,7 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) r = rs->md.pers->check_reshape(&rs->md); if (r) { ti->error = "Reshape check failed"; - mddev_unlock(&rs->md); - goto bad_check_reshape; + goto bad_unlock; } } } @@ -3311,10 +3304,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) mddev_unlock(&rs->md); return 0; -bad_md_start: -bad_journal_mode_set: -bad_stripe_cache: -bad_check_reshape: +bad_unlock: + mddev_unlock(&rs->md); md_stop(&rs->md); bad: raid_set_free(rs); From 4e1c1d742970d65f1551a455a780c17a61ce4251 Mon Sep 17 00:00:00 2001 From: Yu Kuai Date: Sat, 8 Jul 2023 17:21:53 +0800 Subject: [PATCH 093/513] dm raid: protect md_stop() with 'reconfig_mutex' [ Upstream commit 7d5fff8982a2199d49ec067818af7d84d4f95ca0 ] __md_stop_writes() and __md_stop() will modify many fields that are protected by 'reconfig_mutex', and all the callers will grab 'reconfig_mutex' except for md_stop(). Also, update md_stop() to make certain 'reconfig_mutex' is held using lockdep_assert_held(). Fixes: 9d09e663d550 ("dm: raid456 basic support") Signed-off-by: Yu Kuai Signed-off-by: Mike Snitzer Signed-off-by: Sasha Levin --- drivers/md/dm-raid.c | 4 +++- drivers/md/md.c | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c index 6d4e287350a3..8d489933d579 100644 --- a/drivers/md/dm-raid.c +++ b/drivers/md/dm-raid.c @@ -3305,8 +3305,8 @@ static int raid_ctr(struct dm_target *ti, unsigned int argc, char **argv) return 0; bad_unlock: - mddev_unlock(&rs->md); md_stop(&rs->md); + mddev_unlock(&rs->md); bad: raid_set_free(rs); @@ -3317,7 +3317,9 @@ static void raid_dtr(struct dm_target *ti) { struct raid_set *rs = ti->private; + mddev_lock_nointr(&rs->md); md_stop(&rs->md); + mddev_unlock(&rs->md); raid_set_free(rs); } diff --git a/drivers/md/md.c b/drivers/md/md.c index 5a21aeedc1ba..89a270d29369 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -6281,6 +6281,8 @@ static void __md_stop(struct mddev *mddev) void md_stop(struct mddev *mddev) { + lockdep_assert_held(&mddev->reconfig_mutex); + /* stop the array and free an attached data structures. * This is called from dm-raid */ From bd79de8bd3718dbea51e13615010df5db6b49174 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Thu, 13 Jul 2023 00:14:59 -0500 Subject: [PATCH 094/513] drm/amd: Fix an error handling mistake in psp_sw_init() [ Upstream commit c01aebeef3ce45f696ffa0a1303cea9b34babb45 ] If the second call to amdgpu_bo_create_kernel() fails, the memory allocated from the first call should be cleared. If the third call fails, the memory from the second call should be cleared. Fixes: b95b5391684b ("drm/amdgpu/psp: move PSP memory alloc from hw_init to sw_init") Signed-off-by: Mario Limonciello Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 65744c3bd364..f305a0f8e9b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -341,11 +341,11 @@ static int psp_sw_init(void *handle) return 0; failed2: - amdgpu_bo_free_kernel(&psp->fw_pri_bo, - &psp->fw_pri_mc_addr, &psp->fw_pri_buf); -failed1: amdgpu_bo_free_kernel(&psp->fence_buf_bo, &psp->fence_buf_mc_addr, &psp->fence_buf); +failed1: + amdgpu_bo_free_kernel(&psp->fw_pri_bo, + &psp->fw_pri_mc_addr, &psp->fw_pri_buf); return ret; } From 6bbbe1b2161ec74d898dd532a674038b0426b3cc Mon Sep 17 00:00:00 2001 From: Sindhu Devale Date: Tue, 25 Jul 2023 10:54:38 -0500 Subject: [PATCH 095/513] RDMA/irdma: Report correct WC error [ Upstream commit ae463563b7a1b7d4a3d0b065b09d37a76b693937 ] Report the correct WC error if a MW bind is performed on an already valid/bound window. Fixes: 44d9e52977a1 ("RDMA/irdma: Implement device initialization definitions") Signed-off-by: Sindhu Devale Signed-off-by: Shiraz Saleem Link: https://lore.kernel.org/r/20230725155439.1057-2-shiraz.saleem@intel.com Signed-off-by: Leon Romanovsky Signed-off-by: Sasha Levin --- drivers/infiniband/hw/irdma/hw.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/hw/irdma/hw.c b/drivers/infiniband/hw/irdma/hw.c index 0221200e1654..70dffa9a9f67 100644 --- a/drivers/infiniband/hw/irdma/hw.c +++ b/drivers/infiniband/hw/irdma/hw.c @@ -191,6 +191,7 @@ static void irdma_set_flush_fields(struct irdma_sc_qp *qp, case IRDMA_AE_AMP_MWBIND_INVALID_RIGHTS: case IRDMA_AE_AMP_MWBIND_BIND_DISABLED: case IRDMA_AE_AMP_MWBIND_INVALID_BOUNDS: + case IRDMA_AE_AMP_MWBIND_VALID_STAG: qp->flush_code = FLUSH_MW_BIND_ERR; qp->event_type = IRDMA_QP_EVENT_ACCESS_ERR; break; From ae5b8b1c2eac1f8287b978eb4086ec345f33a47f Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 26 Jul 2023 22:33:22 +0200 Subject: [PATCH 096/513] ata: pata_ns87415: mark ns87560_tf_read static [ Upstream commit 3fc2febb0f8ffae354820c1772ec008733237cfa ] The global function triggers a warning because of the missing prototype drivers/ata/pata_ns87415.c:263:6: warning: no previous prototype for 'ns87560_tf_read' [-Wmissing-prototypes] 263 | void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf) There are no other references to this, so just make it static. Fixes: c4b5b7b6c4423 ("pata_ns87415: Initial cut at 87415/87560 IDE support") Reviewed-by: Sergey Shtylyov Reviewed-by: Serge Semin Signed-off-by: Arnd Bergmann Signed-off-by: Damien Le Moal Signed-off-by: Sasha Levin --- drivers/ata/pata_ns87415.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/ata/pata_ns87415.c b/drivers/ata/pata_ns87415.c index 9dd6bffefb48..602472d4e693 100644 --- a/drivers/ata/pata_ns87415.c +++ b/drivers/ata/pata_ns87415.c @@ -260,7 +260,7 @@ static u8 ns87560_check_status(struct ata_port *ap) * LOCKING: * Inherited from caller. */ -void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf) +static void ns87560_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; From 23e8a65f9a939fc5af4d40b6f31d1518d4f2ac18 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Mon, 24 Jul 2023 13:40:40 +0800 Subject: [PATCH 097/513] ring-buffer: Fix wrong stat of cpu_buffer->read [ Upstream commit 2d093282b0d4357373497f65db6a05eb0c28b7c8 ] When pages are removed in rb_remove_pages(), 'cpu_buffer->read' is set to 0 in order to make sure any read iterators reset themselves. However, this will mess 'entries' stating, see following steps: # cd /sys/kernel/tracing/ # 1. Enlarge ring buffer prepare for later reducing: # echo 20 > per_cpu/cpu0/buffer_size_kb # 2. Write a log into ring buffer of cpu0: # taskset -c 0 echo "hello1" > trace_marker # 3. Read the log: # cat per_cpu/cpu0/trace_pipe <...>-332 [000] ..... 62.406844: tracing_mark_write: hello1 # 4. Stop reading and see the stats, now 0 entries, and 1 event readed: # cat per_cpu/cpu0/stats entries: 0 [...] read events: 1 # 5. Reduce the ring buffer # echo 7 > per_cpu/cpu0/buffer_size_kb # 6. Now entries became unexpected 1 because actually no entries!!! # cat per_cpu/cpu0/stats entries: 1 [...] read events: 0 To fix it, introduce 'page_removed' field to count total removed pages since last reset, then use it to let read iterators reset themselves instead of changing the 'read' pointer. Link: https://lore.kernel.org/linux-trace-kernel/20230724054040.3489499-1-zhengyejian1@huawei.com Cc: Cc: Fixes: 83f40318dab0 ("ring-buffer: Make removal of ring buffer pages atomic") Signed-off-by: Zheng Yejian Signed-off-by: Steven Rostedt (Google) Signed-off-by: Sasha Levin --- kernel/trace/ring_buffer.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index ceeba8bf1265..e1cef097b0df 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -520,6 +520,8 @@ struct ring_buffer_per_cpu { rb_time_t before_stamp; u64 event_stamp[MAX_NEST]; u64 read_stamp; + /* pages removed since last reset */ + unsigned long pages_removed; /* ring buffer pages to update, > 0 to add, < 0 to remove */ long nr_pages_to_update; struct list_head new_pages; /* new pages to add */ @@ -555,6 +557,7 @@ struct ring_buffer_iter { struct buffer_page *head_page; struct buffer_page *cache_reader_page; unsigned long cache_read; + unsigned long cache_pages_removed; u64 read_stamp; u64 page_stamp; struct ring_buffer_event *event; @@ -1931,6 +1934,8 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) to_remove = rb_list_head(to_remove)->next; head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; } + /* Read iterators need to reset themselves when some pages removed */ + cpu_buffer->pages_removed += nr_removed; next_page = rb_list_head(to_remove)->next; @@ -1952,12 +1957,6 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) cpu_buffer->head_page = list_entry(next_page, struct buffer_page, list); - /* - * change read pointer to make sure any read iterators reset - * themselves - */ - cpu_buffer->read = 0; - /* pages are removed, resume tracing and then free the pages */ atomic_dec(&cpu_buffer->record_disabled); raw_spin_unlock_irq(&cpu_buffer->reader_lock); @@ -4347,6 +4346,7 @@ static void rb_iter_reset(struct ring_buffer_iter *iter) iter->cache_reader_page = iter->head_page; iter->cache_read = cpu_buffer->read; + iter->cache_pages_removed = cpu_buffer->pages_removed; if (iter->head) { iter->read_stamp = cpu_buffer->read_stamp; @@ -4800,12 +4800,13 @@ rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) buffer = cpu_buffer->buffer; /* - * Check if someone performed a consuming read to - * the buffer. A consuming read invalidates the iterator - * and we need to reset the iterator in this case. + * Check if someone performed a consuming read to the buffer + * or removed some pages from the buffer. In these cases, + * iterator was invalidated and we need to reset it. */ if (unlikely(iter->cache_read != cpu_buffer->read || - iter->cache_reader_page != cpu_buffer->reader_page)) + iter->cache_reader_page != cpu_buffer->reader_page || + iter->cache_pages_removed != cpu_buffer->pages_removed)) rb_iter_reset(iter); again: @@ -5249,6 +5250,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) cpu_buffer->last_overrun = 0; rb_head_page_activate(cpu_buffer); + cpu_buffer->pages_removed = 0; } /* Must have disabled the cpu buffer then done a synchronize_rcu */ From 813cede7b2f5a4b1b75d2d4bb4e705cc8e063b20 Mon Sep 17 00:00:00 2001 From: Zheng Yejian Date: Wed, 26 Jul 2023 17:58:04 +0800 Subject: [PATCH 098/513] tracing: Fix warning in trace_buffered_event_disable() [ Upstream commit dea499781a1150d285c62b26659f62fb00824fce ] Warning happened in trace_buffered_event_disable() at WARN_ON_ONCE(!trace_buffered_event_ref) Call Trace: ? __warn+0xa5/0x1b0 ? trace_buffered_event_disable+0x189/0x1b0 __ftrace_event_enable_disable+0x19e/0x3e0 free_probe_data+0x3b/0xa0 unregister_ftrace_function_probe_func+0x6b8/0x800 event_enable_func+0x2f0/0x3d0 ftrace_process_regex.isra.0+0x12d/0x1b0 ftrace_filter_write+0xe6/0x140 vfs_write+0x1c9/0x6f0 [...] The cause of the warning is in __ftrace_event_enable_disable(), trace_buffered_event_enable() was called once while trace_buffered_event_disable() was called twice. Reproduction script show as below, for analysis, see the comments: ``` #!/bin/bash cd /sys/kernel/tracing/ # 1. Register a 'disable_event' command, then: # 1) SOFT_DISABLED_BIT was set; # 2) trace_buffered_event_enable() was called first time; echo 'cmdline_proc_show:disable_event:initcall:initcall_finish' > \ set_ftrace_filter # 2. Enable the event registered, then: # 1) SOFT_DISABLED_BIT was cleared; # 2) trace_buffered_event_disable() was called first time; echo 1 > events/initcall/initcall_finish/enable # 3. Try to call into cmdline_proc_show(), then SOFT_DISABLED_BIT was # set again!!! cat /proc/cmdline # 4. Unregister the 'disable_event' command, then: # 1) SOFT_DISABLED_BIT was cleared again; # 2) trace_buffered_event_disable() was called second time!!! echo '!cmdline_proc_show:disable_event:initcall:initcall_finish' > \ set_ftrace_filter ``` To fix it, IIUC, we can change to call trace_buffered_event_enable() at fist time soft-mode enabled, and call trace_buffered_event_disable() at last time soft-mode disabled. Link: https://lore.kernel.org/linux-trace-kernel/20230726095804.920457-1-zhengyejian1@huawei.com Cc: Fixes: 0fc1b09ff1ff ("tracing: Use temp buffer when filtering events") Signed-off-by: Zheng Yejian Signed-off-by: Steven Rostedt (Google) Signed-off-by: Sasha Levin --- kernel/trace/trace_events.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 160298d285c0..2a2a59999767 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -594,7 +594,6 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file, { struct trace_event_call *call = file->event_call; struct trace_array *tr = file->tr; - unsigned long file_flags = file->flags; int ret = 0; int disable; @@ -618,6 +617,8 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file, break; disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED; clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); + /* Disable use of trace_buffered_event */ + trace_buffered_event_disable(); } else disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE); @@ -656,6 +657,8 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file, if (atomic_inc_return(&file->sm_ref) > 1) break; set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags); + /* Enable use of trace_buffered_event */ + trace_buffered_event_enable(); } if (!(file->flags & EVENT_FILE_FL_ENABLED)) { @@ -695,15 +698,6 @@ static int __ftrace_event_enable_disable(struct trace_event_file *file, break; } - /* Enable or disable use of trace_buffered_event */ - if ((file_flags & EVENT_FILE_FL_SOFT_DISABLED) != - (file->flags & EVENT_FILE_FL_SOFT_DISABLED)) { - if (file->flags & EVENT_FILE_FL_SOFT_DISABLED) - trace_buffered_event_enable(); - else - trace_buffered_event_disable(); - } - return ret; } From a49884561a8c1fe830de0f937a8b89f1edce1951 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 4 Jul 2023 17:09:09 +0300 Subject: [PATCH 099/513] Revert "usb: gadget: tegra-xudc: Fix error check in tegra_xudc_powerdomain_init()" commit a8291be6b5dd465c22af229483dbac543a91e24e upstream. This reverts commit f08aa7c80dac27ee00fa6827f447597d2fba5465. The reverted commit was based on static analysis and a misunderstanding of how PTR_ERR() and NULLs are supposed to work. When a function returns both pointer errors and NULL then normally the NULL means "continue operating without a feature because it was deliberately turned off". The NULL should not be treated as a failure. If a driver cannot work when that feature is disabled then the KConfig should enforce that the function cannot return NULL. We should not need to test for it. In this driver, the bug means that probe cannot succeed when CONFIG_PM is disabled. Signed-off-by: Dan Carpenter Fixes: f08aa7c80dac ("usb: gadget: tegra-xudc: Fix error check in tegra_xudc_powerdomain_init()") Cc: stable Link: https://lore.kernel.org/r/ZKQoBa84U/ykEh3C@moroto Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/udc/tegra-xudc.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index 1cb4258077bd..52996bf2cc70 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -3689,15 +3689,15 @@ static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc) int err; xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, "dev"); - if (IS_ERR_OR_NULL(xudc->genpd_dev_device)) { - err = PTR_ERR(xudc->genpd_dev_device) ? : -ENODATA; + if (IS_ERR(xudc->genpd_dev_device)) { + err = PTR_ERR(xudc->genpd_dev_device); dev_err(dev, "failed to get device power domain: %d\n", err); return err; } xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss"); - if (IS_ERR_OR_NULL(xudc->genpd_dev_ss)) { - err = PTR_ERR(xudc->genpd_dev_ss) ? : -ENODATA; + if (IS_ERR(xudc->genpd_dev_ss)) { + err = PTR_ERR(xudc->genpd_dev_ss); dev_err(dev, "failed to get SuperSpeed power domain: %d\n", err); return err; } From 2f9bfccced04d59a802a348b613741cefeef2228 Mon Sep 17 00:00:00 2001 From: Frank Li Date: Fri, 7 Jul 2023 19:00:14 -0400 Subject: [PATCH 100/513] usb: gadget: call usb_gadget_check_config() to verify UDC capability commit f4fc01af5b640bc39bd9403b5fd855345a2ad5f8 upstream. The legacy gadget driver omitted calling usb_gadget_check_config() to ensure that the USB device controller (UDC) has adequate resources, including sufficient endpoint numbers and types, to support the given configuration. Previously, usb_add_config() was solely invoked by the legacy gadget driver. Adds the necessary usb_gadget_check_config() after the bind() operation to fix the issue. Fixes: dce49449e04f ("usb: cdns3: allocate TX FIFO size according to composite EP number") Cc: stable Reported-by: Ravi Gunasekaran Signed-off-by: Frank Li Link: https://lore.kernel.org/r/20230707230015.494999-1-Frank.Li@nxp.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/composite.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c index 0886cff9aa1c..edce0a1bdddf 100644 --- a/drivers/usb/gadget/composite.c +++ b/drivers/usb/gadget/composite.c @@ -1033,6 +1033,10 @@ int usb_add_config(struct usb_composite_dev *cdev, goto done; status = bind(config); + + if (status == 0) + status = usb_gadget_check_config(cdev->gadget); + if (status < 0) { while (!list_empty(&config->functions)) { struct usb_function *f; From 0f7a2b567197798da7bfa2252f4485c0ca6c6266 Mon Sep 17 00:00:00 2001 From: Zqiang Date: Fri, 14 Jul 2023 15:40:11 +0800 Subject: [PATCH 101/513] USB: gadget: Fix the memory leak in raw_gadget driver commit 83e30f2bf86ef7c38fbd476ed81a88522b620628 upstream. Currently, increasing raw_dev->count happens before invoke the raw_queue_event(), if the raw_queue_event() return error, invoke raw_release() will not trigger the dev_free() to be called. [ 268.905865][ T5067] raw-gadget.0 gadget.0: failed to queue event [ 268.912053][ T5067] udc dummy_udc.0: failed to start USB Raw Gadget: -12 [ 268.918885][ T5067] raw-gadget.0: probe of gadget.0 failed with error -12 [ 268.925956][ T5067] UDC core: USB Raw Gadget: couldn't find an available UDC or it's busy [ 268.934657][ T5067] misc raw-gadget: fail, usb_gadget_register_driver returned -16 BUG: memory leak [] kmalloc_trace+0x24/0x90 mm/slab_common.c:1076 [] kmalloc include/linux/slab.h:582 [inline] [] kzalloc include/linux/slab.h:703 [inline] [] dev_new drivers/usb/gadget/legacy/raw_gadget.c:191 [inline] [] raw_open+0x45/0x110 drivers/usb/gadget/legacy/raw_gadget.c:385 [] misc_open+0x1a9/0x1f0 drivers/char/misc.c:165 [] kmalloc_trace+0x24/0x90 mm/slab_common.c:1076 [] kmalloc include/linux/slab.h:582 [inline] [] raw_ioctl_init+0xdf/0x410 drivers/usb/gadget/legacy/raw_gadget.c:460 [] raw_ioctl+0x5f9/0x1120 drivers/usb/gadget/legacy/raw_gadget.c:1250 [] vfs_ioctl fs/ioctl.c:51 [inline] [] kmalloc_trace+0x24/0x90 mm/slab_common.c:1076 [] kmalloc include/linux/slab.h:582 [inline] [] kzalloc include/linux/slab.h:703 [inline] [] dummy_alloc_request+0x5a/0xe0 drivers/usb/gadget/udc/dummy_hcd.c:665 [] usb_ep_alloc_request+0x22/0xd0 drivers/usb/gadget/udc/core.c:196 [] gadget_bind+0x6d/0x370 drivers/usb/gadget/legacy/raw_gadget.c:292 This commit therefore invoke kref_get() under the condition that raw_queue_event() return success. Reported-by: syzbot+feb045d335c1fdde5bf7@syzkaller.appspotmail.com Cc: stable Closes: https://syzkaller.appspot.com/bug?extid=feb045d335c1fdde5bf7 Signed-off-by: Zqiang Reviewed-by: Andrey Konovalov Tested-by: Andrey Konovalov Link: https://lore.kernel.org/r/20230714074011.20989-1-qiang.zhang1211@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/legacy/raw_gadget.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index 2869bda64229..9d13f2274398 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -310,13 +310,15 @@ static int gadget_bind(struct usb_gadget *gadget, dev->eps_num = i; spin_unlock_irqrestore(&dev->lock, flags); - /* Matches kref_put() in gadget_unbind(). */ - kref_get(&dev->count); - ret = raw_queue_event(dev, USB_RAW_EVENT_CONNECT, 0, NULL); - if (ret < 0) + if (ret < 0) { dev_err(&gadget->dev, "failed to queue event\n"); + set_gadget_data(gadget, NULL); + return ret; + } + /* Matches kref_put() in gadget_unbind(). */ + kref_get(&dev->count); return ret; } From 5883a4e8478d0b3ecf90262951d152ece0d5bb67 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 11 Jul 2023 16:01:25 -0700 Subject: [PATCH 102/513] KVM: Grab a reference to KVM for VM and vCPU stats file descriptors commit eed3013faa401aae662398709410a59bb0646e32 upstream. Grab a reference to KVM prior to installing VM and vCPU stats file descriptors to ensure the underlying VM and vCPU objects are not freed until the last reference to any and all stats fds are dropped. Note, the stats paths manually invoke fd_install() and so don't need to grab a reference before creating the file. Fixes: ce55c049459c ("KVM: stats: Support binary stats retrieval for a VCPU") Fixes: fcfe1baeddbf ("KVM: stats: Support binary stats retrieval for a VM") Reported-by: Zheng Zhang Closes: https://lore.kernel.org/all/CAC_GQSr3xzZaeZt85k_RCBd5kfiOve8qXo7a81Cq53LuVQ5r=Q@mail.gmail.com Cc: stable@vger.kernel.org Cc: Kees Cook Signed-off-by: Sean Christopherson Reviewed-by: Kees Cook Message-Id: <20230711230131.648752-2-seanjc@google.com> Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- virt/kvm/kvm_main.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 3967184b7d62..d11c581ce9b9 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -3804,8 +3804,17 @@ static ssize_t kvm_vcpu_stats_read(struct file *file, char __user *user_buffer, sizeof(vcpu->stat), user_buffer, size, offset); } +static int kvm_vcpu_stats_release(struct inode *inode, struct file *file) +{ + struct kvm_vcpu *vcpu = file->private_data; + + kvm_put_kvm(vcpu->kvm); + return 0; +} + static const struct file_operations kvm_vcpu_stats_fops = { .read = kvm_vcpu_stats_read, + .release = kvm_vcpu_stats_release, .llseek = noop_llseek, }; @@ -3826,6 +3835,9 @@ static int kvm_vcpu_ioctl_get_stats_fd(struct kvm_vcpu *vcpu) put_unused_fd(fd); return PTR_ERR(file); } + + kvm_get_kvm(vcpu->kvm); + file->f_mode |= FMODE_PREAD; fd_install(fd, file); @@ -4409,8 +4421,17 @@ static ssize_t kvm_vm_stats_read(struct file *file, char __user *user_buffer, sizeof(kvm->stat), user_buffer, size, offset); } +static int kvm_vm_stats_release(struct inode *inode, struct file *file) +{ + struct kvm *kvm = file->private_data; + + kvm_put_kvm(kvm); + return 0; +} + static const struct file_operations kvm_vm_stats_fops = { .read = kvm_vm_stats_read, + .release = kvm_vm_stats_release, .llseek = noop_llseek, }; @@ -4429,6 +4450,9 @@ static int kvm_vm_ioctl_get_stats_fd(struct kvm *kvm) put_unused_fd(fd); return PTR_ERR(file); } + + kvm_get_kvm(kvm); + file->f_mode |= FMODE_PREAD; fd_install(fd, file); From 41c487de4cf528e17987be22457814e7c07acf98 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Jun 2023 13:30:36 -0700 Subject: [PATCH 103/513] KVM: VMX: Don't fudge CR0 and CR4 for restricted L2 guest commit c4abd7352023aa96114915a0bb2b88016a425cda upstream. Stuff CR0 and/or CR4 to be compliant with a restricted guest if and only if KVM itself is not configured to utilize unrestricted guests, i.e. don't stuff CR0/CR4 for a restricted L2 that is running as the guest of an unrestricted L1. Any attempt to VM-Enter a restricted guest with invalid CR0/CR4 values should fail, i.e. in a nested scenario, KVM (as L0) should never observe a restricted L2 with incompatible CR0/CR4, since nested VM-Enter from L1 should have failed. And if KVM does observe an active, restricted L2 with incompatible state, e.g. due to a KVM bug, fudging CR0/CR4 instead of letting VM-Enter fail does more harm than good, as KVM will often neglect to undo the side effects, e.g. won't clear rmode.vm86_active on nested VM-Exit, and thus the damage can easily spill over to L1. On the other hand, letting VM-Enter fail due to bad guest state is more likely to contain the damage to L2 as KVM relies on hardware to perform most guest state consistency checks, i.e. KVM needs to be able to reflect a failed nested VM-Enter into L1 irrespective of (un)restricted guest behavior. Cc: Jim Mattson Cc: stable@vger.kernel.org Fixes: bddd82d19e2e ("KVM: nVMX: KVM needs to unset "unrestricted guest" VM-execution control in vmcs02 if vmcs12 doesn't set it") Signed-off-by: Sean Christopherson Message-Id: <20230613203037.1968489-3-seanjc@google.com> Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- arch/x86/kvm/vmx/vmx.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index ec56ed91b503..0841f9a34d1c 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -1416,6 +1416,11 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) struct vcpu_vmx *vmx = to_vmx(vcpu); unsigned long old_rflags; + /* + * Unlike CR0 and CR4, RFLAGS handling requires checking if the vCPU + * is an unrestricted guest in order to mark L2 as needing emulation + * if L1 runs L2 as a restricted guest. + */ if (is_unrestricted_guest(vcpu)) { kvm_register_mark_available(vcpu, VCPU_EXREG_RFLAGS); vmx->rflags = rflags; @@ -3088,7 +3093,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) old_cr0_pg = kvm_read_cr0_bits(vcpu, X86_CR0_PG); hw_cr0 = (cr0 & ~KVM_VM_CR0_ALWAYS_OFF); - if (is_unrestricted_guest(vcpu)) + if (enable_unrestricted_guest) hw_cr0 |= KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST; else { hw_cr0 |= KVM_VM_CR0_ALWAYS_ON; @@ -3116,7 +3121,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) } #endif - if (enable_ept && !is_unrestricted_guest(vcpu)) { + if (enable_ept && !enable_unrestricted_guest) { /* * Ensure KVM has an up-to-date snapshot of the guest's CR3. If * the below code _enables_ CR3 exiting, vmx_cache_reg() will @@ -3239,7 +3244,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) unsigned long hw_cr4; hw_cr4 = (cr4_read_shadow() & X86_CR4_MCE) | (cr4 & ~X86_CR4_MCE); - if (is_unrestricted_guest(vcpu)) + if (enable_unrestricted_guest) hw_cr4 |= KVM_VM_CR4_ALWAYS_ON_UNRESTRICTED_GUEST; else if (vmx->rmode.vm86_active) hw_cr4 |= KVM_RMODE_VM_CR4_ALWAYS_ON; @@ -3259,7 +3264,7 @@ void vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) vcpu->arch.cr4 = cr4; kvm_register_mark_available(vcpu, VCPU_EXREG_CR4); - if (!is_unrestricted_guest(vcpu)) { + if (!enable_unrestricted_guest) { if (enable_ept) { if (!is_paging(vcpu)) { hw_cr4 &= ~X86_CR4_PAE; From dc4f6c537f37776a8df10dbfbe94017f031eff65 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 13 Jul 2023 16:57:41 +0200 Subject: [PATCH 104/513] serial: qcom-geni: drop bogus runtime pm state update commit 4dd8752a14ca0303fbdf0a6c68ff65f0a50bd2fa upstream. The runtime PM state should not be changed by drivers that do not implement runtime PM even if it happens to work around a bug in PM core. With the wake irq arming now fixed, drop the bogus runtime PM state update which left the device in active state (and could potentially prevent a parent device from suspending). Fixes: f3974413cf02 ("tty: serial: qcom_geni_serial: Wakeup IRQ cleanup") Cc: 5.6+ # 5.6+ Signed-off-by: Johan Hovold Reviewed-by: Tony Lindgren Signed-off-by: Rafael J. Wysocki Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/qcom_geni_serial.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c index 5157ddffaf68..197397a98944 100644 --- a/drivers/tty/serial/qcom_geni_serial.c +++ b/drivers/tty/serial/qcom_geni_serial.c @@ -1455,13 +1455,6 @@ static int qcom_geni_serial_probe(struct platform_device *pdev) if (ret) return ret; - /* - * Set pm_runtime status as ACTIVE so that wakeup_irq gets - * enabled/disabled from dev_pm_arm_wake_irq during system - * suspend/resume respectively. - */ - pm_runtime_set_active(&pdev->dev); - if (port->wakeup_irq > 0) { device_init_wakeup(&pdev->dev, true); ret = dev_pm_set_dedicated_wake_irq(&pdev->dev, From 8fa462ad0f9b24ec538e3725fd87adb5babddf4b Mon Sep 17 00:00:00 2001 From: Ruihong Luo Date: Thu, 13 Jul 2023 08:42:36 +0800 Subject: [PATCH 105/513] serial: 8250_dw: Preserve original value of DLF register MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 748c5ea8b8796ae8ee80b8d3a3d940570b588d59 upstream. Preserve the original value of the Divisor Latch Fraction (DLF) register. When the DLF register is modified without preservation, it can disrupt the baudrate settings established by firmware or bootloader, leading to data corruption and the generation of unreadable or distorted characters. Fixes: 701c5e73b296 ("serial: 8250_dw: add fractional divisor support") Cc: stable Signed-off-by: Ruihong Luo Link: https://lore.kernel.org/stable/20230713004235.35904-1-colorsu1922%40gmail.com Reviewed-by: Ilpo Järvinen Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20230713004235.35904-1-colorsu1922@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/8250/8250_dwlib.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/tty/serial/8250/8250_dwlib.c b/drivers/tty/serial/8250/8250_dwlib.c index 6d6a78eead3e..1cf229cca592 100644 --- a/drivers/tty/serial/8250/8250_dwlib.c +++ b/drivers/tty/serial/8250/8250_dwlib.c @@ -80,7 +80,7 @@ static void dw8250_set_divisor(struct uart_port *p, unsigned int baud, void dw8250_setup_port(struct uart_port *p) { struct uart_8250_port *up = up_to_u8250p(p); - u32 reg; + u32 reg, old_dlf; /* * If the Component Version Register returns zero, we know that @@ -93,9 +93,11 @@ void dw8250_setup_port(struct uart_port *p) dev_dbg(p->dev, "Designware UART version %c.%c%c\n", (reg >> 24) & 0xff, (reg >> 16) & 0xff, (reg >> 8) & 0xff); + /* Preserve value written by firmware or bootloader */ + old_dlf = dw8250_readl_ext(p, DW_UART_DLF); dw8250_writel_ext(p, DW_UART_DLF, ~0U); reg = dw8250_readl_ext(p, DW_UART_DLF); - dw8250_writel_ext(p, DW_UART_DLF, 0); + dw8250_writel_ext(p, DW_UART_DLF, old_dlf); if (reg) { struct dw8250_port_data *d = p->private_data; From b800c0d5576e4485da23554ed0a835df20cde646 Mon Sep 17 00:00:00 2001 From: Samuel Holland Date: Fri, 23 Jun 2023 23:01:59 -0700 Subject: [PATCH 106/513] serial: sifive: Fix sifive_serial_console_setup() section commit 9b8fef6345d5487137d4193bb0a0eae2203c284e upstream. This function is called indirectly from the platform driver probe function. Even if the driver is built in, it may be probed after free_initmem() due to deferral or unbinding/binding via sysfs. Thus the function cannot be marked as __init. Fixes: 45c054d0815b ("tty: serial: add driver for the SiFive UART") Cc: stable Signed-off-by: Samuel Holland Link: https://lore.kernel.org/r/20230624060159.3401369-1-samuel.holland@sifive.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/sifive.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/tty/serial/sifive.c b/drivers/tty/serial/sifive.c index 4b0fa91e9f9a..69a32d94ec9d 100644 --- a/drivers/tty/serial/sifive.c +++ b/drivers/tty/serial/sifive.c @@ -843,7 +843,7 @@ static void sifive_serial_console_write(struct console *co, const char *s, local_irq_restore(flags); } -static int __init sifive_serial_console_setup(struct console *co, char *options) +static int sifive_serial_console_setup(struct console *co, char *options) { struct sifive_serial_port *ssp; int baud = SIFIVE_DEFAULT_BAUD_RATE; From 399091399777022e5b8fb54ee6349e9e04154248 Mon Sep 17 00:00:00 2001 From: Jerry Meng Date: Thu, 29 Jun 2023 17:35:22 +0800 Subject: [PATCH 107/513] USB: serial: option: support Quectel EM060K_128 commit 4f7cab49cecee16120d27c1734cfdf3d6c0e5329 upstream. EM060K_128 is EM060K's sub-model, having the same name "Quectel EM060K-GL" MBIM + GNSS + DIAG + NMEA + AT + QDSS + DPL T: Bus=03 Lev=01 Prnt=01 Port=01 Cnt=02 Dev#= 8 Spd=480 MxCh= 0 D: Ver= 2.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=64 #Cfgs= 1 P: Vendor=2c7c ProdID=0128 Rev= 5.04 S: Manufacturer=Quectel S: Product=Quectel EM060K-GL S: SerialNumber=f6fa08b6 C:* #Ifs= 8 Cfg#= 1 Atr=a0 MxPwr=500mA A: FirstIf#= 0 IfCount= 2 Cls=02(comm.) Sub=0e Prot=00 I:* If#= 0 Alt= 0 #EPs= 1 Cls=02(comm.) Sub=0e Prot=00 Driver=cdc_mbim E: Ad=81(I) Atr=03(Int.) MxPS= 64 Ivl=32ms I: If#= 1 Alt= 0 #EPs= 0 Cls=0a(data ) Sub=00 Prot=02 Driver=cdc_mbim I:* If#= 1 Alt= 1 #EPs= 2 Cls=0a(data ) Sub=00 Prot=02 Driver=cdc_mbim E: Ad=8e(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=0f(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 2 Alt= 0 #EPs= 1 Cls=ff(vend.) Sub=ff Prot=ff Driver=(none) E: Ad=82(I) Atr=03(Int.) MxPS= 64 Ivl=32ms I:* If#= 3 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=30 Driver=option E: Ad=01(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=40 Driver=option E: Ad=85(I) Atr=03(Int.) MxPS= 10 Ivl=32ms E: Ad=84(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=02(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 5 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=40 Driver=option E: Ad=87(I) Atr=03(Int.) MxPS= 10 Ivl=32ms E: Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=03(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 6 Alt= 0 #EPs= 1 Cls=ff(vend.) Sub=ff Prot=70 Driver=(none) E: Ad=88(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms I:* If#= 7 Alt= 0 #EPs= 1 Cls=ff(vend.) Sub=ff Prot=80 Driver=(none) E: Ad=8f(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms Signed-off-by: Jerry Meng Cc: stable@vger.kernel.org Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 6692440c1e0a..efd57c6a5f81 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_EM061K_LTA 0x0123 #define QUECTEL_PRODUCT_EM061K_LMS 0x0124 #define QUECTEL_PRODUCT_EC25 0x0125 +#define QUECTEL_PRODUCT_EM060K_128 0x0128 #define QUECTEL_PRODUCT_EG91 0x0191 #define QUECTEL_PRODUCT_EG95 0x0195 #define QUECTEL_PRODUCT_BG96 0x0296 @@ -1197,6 +1198,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0x00, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K, 0xff, 0xff, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x30) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0x00, 0x40) }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM060K_128, 0xff, 0xff, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x30) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0x00, 0x40) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM061K_LCN, 0xff, 0xff, 0x40) }, From 5b9a5cf1bf4ab56934314c5181586bd851ac668f Mon Sep 17 00:00:00 2001 From: Mohsen Tahmasebi Date: Mon, 10 Jul 2023 11:22:18 +0330 Subject: [PATCH 108/513] USB: serial: option: add Quectel EC200A module support commit 857ea9005806e2a458016880278f98715873e977 upstream. Add Quectel EC200A "DIAG, AT, MODEM": 0x6005: ECM / RNDIS + DIAG + AT + MODEM T: Bus=01 Lev=01 Prnt=02 Port=05 Cnt=01 Dev#= 8 Spd=480 MxCh= 0 D: Ver= 2.00 Cls=ef(misc ) Sub=02 Prot=01 MxPS=64 #Cfgs= 1 P: Vendor=2c7c ProdID=6005 Rev=03.18 S: Manufacturer=Android S: Product=Android S: SerialNumber=0000 C: #Ifs= 5 Cfg#= 1 Atr=e0 MxPwr=500mA I: If#= 0 Alt= 0 #EPs= 1 Cls=02(commc) Sub=06 Prot=00 Driver=cdc_ether E: Ad=87(I) Atr=03(Int.) MxPS= 64 Ivl=4096ms I: If#= 1 Alt= 1 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=cdc_ether E: Ad=0c(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=83(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms I: If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=00 Prot=00 Driver=option E: Ad=0b(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=82(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms I: If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option E: Ad=0f(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=86(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=89(I) Atr=03(Int.) MxPS= 64 Ivl=4096ms I: If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=00 Prot=00 Driver=option E: Ad=0a(O) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=81(I) Atr=02(Bulk) MxPS= 512 Ivl=0ms E: Ad=88(I) Atr=03(Int.) MxPS= 64 Ivl=4096ms Signed-off-by: Mohsen Tahmasebi Tested-by: Mostafa Ghofrani Cc: stable@vger.kernel.org Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/option.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index efd57c6a5f81..119641761c3b 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -269,6 +269,7 @@ static void option_instat_callback(struct urb *urb); #define QUECTEL_PRODUCT_RM520N 0x0801 #define QUECTEL_PRODUCT_EC200U 0x0901 #define QUECTEL_PRODUCT_EC200S_CN 0x6002 +#define QUECTEL_PRODUCT_EC200A 0x6005 #define QUECTEL_PRODUCT_EM061K_LWW 0x6008 #define QUECTEL_PRODUCT_EM061K_LCN 0x6009 #define QUECTEL_PRODUCT_EC200T 0x6026 @@ -1229,6 +1230,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM520N, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0900, 0xff, 0, 0), /* RM500U-CN */ .driver_info = ZLP }, + { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200A, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200U, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200S_CN, 0xff, 0, 0) }, { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EC200T, 0xff, 0, 0) }, From 378e036237419927bfef0b00a21dcc6226447c7c Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Wed, 12 Jul 2023 16:16:41 +0200 Subject: [PATCH 109/513] USB: serial: simple: add Kaufmann RKS+CAN VCP commit dd92c8a1f99bcd166204ffc219ea5a23dd65d64f upstream. Add the device and product ID for this CAN bus interface / license dongle. The device is usable either directly from user space or can be attached to a kernel CAN interface with slcan_attach. Reported-by: Kaufmann Automotive GmbH Tested-by: Kaufmann Automotive GmbH Signed-off-by: Oliver Neukum [ johan: amend commit message and move entries in sort order ] Cc: stable@vger.kernel.org Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/usb-serial-simple.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index 4c6747889a19..3c552e4b87ce 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -63,6 +63,11 @@ DEVICE(flashloader, FLASHLOADER_IDS); 0x01) } DEVICE(google, GOOGLE_IDS); +/* KAUFMANN RKS+CAN VCP */ +#define KAUFMANN_IDS() \ + { USB_DEVICE(0x16d0, 0x0870) } +DEVICE(kaufmann, KAUFMANN_IDS); + /* Libtransistor USB console */ #define LIBTRANSISTOR_IDS() \ { USB_DEVICE(0x1209, 0x8b00) } @@ -124,6 +129,7 @@ static struct usb_serial_driver * const serial_drivers[] = { &funsoft_device, &flashloader_device, &google_device, + &kaufmann_device, &libtransistor_device, &vivopay_device, &moto_modem_device, @@ -142,6 +148,7 @@ static const struct usb_device_id id_table[] = { FUNSOFT_IDS(), FLASHLOADER_IDS(), GOOGLE_IDS(), + KAUFMANN_IDS(), LIBTRANSISTOR_IDS(), VIVOPAY_IDS(), MOTO_IDS(), From 0ac13ef002090a7584fb69990b1d5370fa0ac651 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 20 Jul 2023 09:53:57 +0200 Subject: [PATCH 110/513] USB: serial: simple: sort driver entries commit d245aedc00775c4d7265a9f4522cc4e1fd34d102 upstream. Sort the driver symbols alphabetically in order to make it more obvious where new driver entries should be added. Cc: stable@vger.kernel.org Acked-by: Greg Kroah-Hartman Signed-off-by: Johan Hovold Signed-off-by: Greg Kroah-Hartman --- drivers/usb/serial/usb-serial-simple.c | 66 +++++++++++++------------- 1 file changed, 33 insertions(+), 33 deletions(-) diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c index 3c552e4b87ce..24b8772a345e 100644 --- a/drivers/usb/serial/usb-serial-simple.c +++ b/drivers/usb/serial/usb-serial-simple.c @@ -38,16 +38,6 @@ static struct usb_serial_driver vendor##_device = { \ { USB_DEVICE(0x0a21, 0x8001) } /* MMT-7305WW */ DEVICE(carelink, CARELINK_IDS); -/* ZIO Motherboard USB driver */ -#define ZIO_IDS() \ - { USB_DEVICE(0x1CBE, 0x0103) } -DEVICE(zio, ZIO_IDS); - -/* Funsoft Serial USB driver */ -#define FUNSOFT_IDS() \ - { USB_DEVICE(0x1404, 0xcddc) } -DEVICE(funsoft, FUNSOFT_IDS); - /* Infineon Flashloader driver */ #define FLASHLOADER_IDS() \ { USB_DEVICE_INTERFACE_CLASS(0x058b, 0x0041, USB_CLASS_CDC_DATA) }, \ @@ -55,6 +45,11 @@ DEVICE(funsoft, FUNSOFT_IDS); { USB_DEVICE(0x8087, 0x0801) } DEVICE(flashloader, FLASHLOADER_IDS); +/* Funsoft Serial USB driver */ +#define FUNSOFT_IDS() \ + { USB_DEVICE(0x1404, 0xcddc) } +DEVICE(funsoft, FUNSOFT_IDS); + /* Google Serial USB SubClass */ #define GOOGLE_IDS() \ { USB_VENDOR_AND_INTERFACE_INFO(0x18d1, \ @@ -63,6 +58,11 @@ DEVICE(flashloader, FLASHLOADER_IDS); 0x01) } DEVICE(google, GOOGLE_IDS); +/* HP4x (48/49) Generic Serial driver */ +#define HP4X_IDS() \ + { USB_DEVICE(0x03f0, 0x0121) } +DEVICE(hp4x, HP4X_IDS); + /* KAUFMANN RKS+CAN VCP */ #define KAUFMANN_IDS() \ { USB_DEVICE(0x16d0, 0x0870) } @@ -73,11 +73,6 @@ DEVICE(kaufmann, KAUFMANN_IDS); { USB_DEVICE(0x1209, 0x8b00) } DEVICE(libtransistor, LIBTRANSISTOR_IDS); -/* ViVOpay USB Serial Driver */ -#define VIVOPAY_IDS() \ - { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */ -DEVICE(vivopay, VIVOPAY_IDS); - /* Motorola USB Phone driver */ #define MOTO_IDS() \ { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ \ @@ -106,10 +101,10 @@ DEVICE(nokia, NOKIA_IDS); { USB_DEVICE(0x09d7, 0x0100) } /* NovAtel FlexPack GPS */ DEVICE_N(novatel_gps, NOVATEL_IDS, 3); -/* HP4x (48/49) Generic Serial driver */ -#define HP4X_IDS() \ - { USB_DEVICE(0x03f0, 0x0121) } -DEVICE(hp4x, HP4X_IDS); +/* Siemens USB/MPI adapter */ +#define SIEMENS_IDS() \ + { USB_DEVICE(0x908, 0x0004) } +DEVICE(siemens_mpi, SIEMENS_IDS); /* Suunto ANT+ USB Driver */ #define SUUNTO_IDS() \ @@ -117,47 +112,52 @@ DEVICE(hp4x, HP4X_IDS); { USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */ DEVICE(suunto, SUUNTO_IDS); -/* Siemens USB/MPI adapter */ -#define SIEMENS_IDS() \ - { USB_DEVICE(0x908, 0x0004) } -DEVICE(siemens_mpi, SIEMENS_IDS); +/* ViVOpay USB Serial Driver */ +#define VIVOPAY_IDS() \ + { USB_DEVICE(0x1d5f, 0x1004) } /* ViVOpay 8800 */ +DEVICE(vivopay, VIVOPAY_IDS); + +/* ZIO Motherboard USB driver */ +#define ZIO_IDS() \ + { USB_DEVICE(0x1CBE, 0x0103) } +DEVICE(zio, ZIO_IDS); /* All of the above structures mushed into two lists */ static struct usb_serial_driver * const serial_drivers[] = { &carelink_device, - &zio_device, - &funsoft_device, &flashloader_device, + &funsoft_device, &google_device, + &hp4x_device, &kaufmann_device, &libtransistor_device, - &vivopay_device, &moto_modem_device, &motorola_tetra_device, &nokia_device, &novatel_gps_device, - &hp4x_device, - &suunto_device, &siemens_mpi_device, + &suunto_device, + &vivopay_device, + &zio_device, NULL }; static const struct usb_device_id id_table[] = { CARELINK_IDS(), - ZIO_IDS(), - FUNSOFT_IDS(), FLASHLOADER_IDS(), + FUNSOFT_IDS(), GOOGLE_IDS(), + HP4X_IDS(), KAUFMANN_IDS(), LIBTRANSISTOR_IDS(), - VIVOPAY_IDS(), MOTO_IDS(), MOTOROLA_TETRA_IDS(), NOKIA_IDS(), NOVATEL_IDS(), - HP4X_IDS(), - SUUNTO_IDS(), SIEMENS_IDS(), + SUUNTO_IDS(), + VIVOPAY_IDS(), + ZIO_IDS(), { }, }; MODULE_DEVICE_TABLE(usb, id_table); From 97620ed1bcab00aece67f912dcea120282fbb3f9 Mon Sep 17 00:00:00 2001 From: Marc Kleine-Budde Date: Tue, 18 Jul 2023 11:43:54 +0200 Subject: [PATCH 111/513] can: gs_usb: gs_can_close(): add missing set of CAN state to CAN_STATE_STOPPED commit f8a2da6ec2417cca169fa85a8ab15817bccbb109 upstream. After an initial link up the CAN device is in ERROR-ACTIVE mode. Due to a missing CAN_STATE_STOPPED in gs_can_close() it doesn't change to STOPPED after a link down: | ip link set dev can0 up | ip link set dev can0 down | ip --details link show can0 | 13: can0: mtu 16 qdisc pfifo_fast state DOWN mode DEFAULT group default qlen 10 | link/can promiscuity 0 allmulti 0 minmtu 0 maxmtu 0 | can state ERROR-ACTIVE restart-ms 1000 Add missing assignment of CAN_STATE_STOPPED in gs_can_close(). Cc: stable@vger.kernel.org Fixes: d08e973a77d1 ("can: gs_usb: Added support for the GS_USB CAN devices") Link: https://lore.kernel.org/all/20230718-gs_usb-fix-can-state-v1-1-f19738ae2c23@pengutronix.de Signed-off-by: Marc Kleine-Budde Signed-off-by: Greg Kroah-Hartman --- drivers/net/can/usb/gs_usb.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 5a43e542b302..5d6062fbebfc 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -734,6 +734,8 @@ static int gs_can_close(struct net_device *netdev) usb_kill_anchored_urbs(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); + dev->can.state = CAN_STATE_STOPPED; + /* reset the device */ rc = gs_cmd_reset(dev); if (rc < 0) From a2d2fa661293948790d68aecd920838194d835b9 Mon Sep 17 00:00:00 2001 From: Jakub Vanek Date: Fri, 14 Jul 2023 14:24:19 +0200 Subject: [PATCH 112/513] Revert "usb: dwc3: core: Enable AutoRetry feature in the controller" commit 734ae15ab95a18d3d425fc9cb38b7a627d786f08 upstream. This reverts commit b138e23d3dff90c0494925b4c1874227b81bddf7. AutoRetry has been found to sometimes cause controller freezes when communicating with buggy USB devices. This controller feature allows the controller in host mode to send non-terminating/burst retry ACKs instead of terminating retry ACKs to devices when a transaction error (CRC error or overflow) occurs. Unfortunately, if the USB device continues to respond with a CRC error, the controller will not complete endpoint-related commands while it keeps trying to auto-retry. [3] The xHCI driver will notice this once it tries to abort the transfer using a Stop Endpoint command and does not receive a completion in time. [1] This situation is reported to dmesg: [sda] tag#29 uas_eh_abort_handler 0 uas-tag 1 inflight: CMD IN [sda] tag#29 CDB: opcode=0x28 28 00 00 69 42 80 00 00 48 00 xhci-hcd: xHCI host not responding to stop endpoint command xhci-hcd: xHCI host controller not responding, assume dead xhci-hcd: HC died; cleaning up Some users observed this problem on an Odroid HC2 with the JMS578 USB3-to-SATA bridge. The issue can be triggered by starting a read-heavy workload on an attached SSD. After a while, the host controller would die and the SSD would disappear from the system. [1] Further analysis by Synopsys determined that controller revisions other than the one in Odroid HC2 are also affected by this. The recommended solution was to disable AutoRetry altogether. This change does not have a noticeable performance impact. [2] Revert the enablement commit. This will keep the AutoRetry bit in the default state configured during SoC design [2]. Fixes: b138e23d3dff ("usb: dwc3: core: Enable AutoRetry feature in the controller") Link: https://lore.kernel.org/r/a21f34c04632d250cd0a78c7c6f4a1c9c7a43142.camel@gmail.com/ [1] Link: https://lore.kernel.org/r/20230711214834.kyr6ulync32d4ktk@synopsys.com/ [2] Link: https://lore.kernel.org/r/20230712225518.2smu7wse6djc7l5o@synopsys.com/ [3] Cc: stable@vger.kernel.org Cc: Mauro Ribeiro Cc: Krzysztof Kozlowski Suggested-by: Thinh Nguyen Signed-off-by: Jakub Vanek Acked-by: Thinh Nguyen Link: https://lore.kernel.org/r/20230714122419.27741-1-linuxtardis@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/dwc3/core.c | 16 ---------------- drivers/usb/dwc3/core.h | 3 --- 2 files changed, 19 deletions(-) diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 9dbea2148362..005bd1286926 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1093,22 +1093,6 @@ static int dwc3_core_init(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_GUCTL1, reg); } - if (dwc->dr_mode == USB_DR_MODE_HOST || - dwc->dr_mode == USB_DR_MODE_OTG) { - reg = dwc3_readl(dwc->regs, DWC3_GUCTL); - - /* - * Enable Auto retry Feature to make the controller operating in - * Host mode on seeing transaction errors(CRC errors or internal - * overrun scenerios) on IN transfers to reply to the device - * with a non-terminating retry ACK (i.e, an ACK transcation - * packet with Retry=1 & Nump != 0) - */ - reg |= DWC3_GUCTL_HSTINAUTORETRY; - - dwc3_writel(dwc->regs, DWC3_GUCTL, reg); - } - /* * Must config both number of packets and max burst settings to enable * RX and/or TX threshold. diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 84cdac33cb35..3dcb5b744f7c 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -252,9 +252,6 @@ #define DWC3_GCTL_GBLHIBERNATIONEN BIT(1) #define DWC3_GCTL_DSBLCLKGTNG BIT(0) -/* Global User Control Register */ -#define DWC3_GUCTL_HSTINAUTORETRY BIT(14) - /* Global User Control 1 Register */ #define DWC3_GUCTL1_DEV_DECOUPLE_L1L2_EVT BIT(31) #define DWC3_GUCTL1_TX_IPGAP_LINECHECK_DIS BIT(28) From 6f126e0263073242f76f4811f06382fe67582623 Mon Sep 17 00:00:00 2001 From: Gratian Crisan Date: Wed, 26 Jul 2023 13:45:56 -0500 Subject: [PATCH 113/513] usb: dwc3: pci: skip BYT GPIO lookup table for hardwired phy commit b32b8f2b9542d8039f5468303a6ca78c1b5611a5 upstream. Hardware based on the Bay Trail / BYT SoCs require an external ULPI phy for USB device-mode. The phy chip usually has its 'reset' and 'chip select' lines connected to GPIOs described by ACPI fwnodes in the DSDT table. Because of hardware with missing ACPI resources for the 'reset' and 'chip select' GPIOs commit 5741022cbdf3 ("usb: dwc3: pci: Add GPIO lookup table on platforms without ACPI GPIO resources") introduced a fallback gpiod_lookup_table with hard-coded mappings for Bay Trail devices. However there are existing Bay Trail based devices, like the National Instruments cRIO-903x series, where the phy chip has its 'reset' and 'chip-select' lines always asserted in hardware via resistor pull-ups. On this hardware the phy chip is always enabled and the ACPI dsdt table is missing information not only for the 'chip-select' and 'reset' lines but also for the BYT GPIO controller itself "INT33FC". With the introduction of the gpiod_lookup_table initializing the USB device-mode on these hardware now errors out. The error comes from the gpiod_get_optional() calls in dwc3_pci_quirks() which will now return an -ENOENT error due to the missing ACPI entry for the INT33FC gpio controller used in the aforementioned table. This hardware used to work before because gpiod_get_optional() will return NULL instead of -ENOENT if no GPIO has been assigned to the requested function. The dwc3_pci_quirks() code for setting the 'cs' and 'reset' GPIOs was then skipped (due to the NULL return). This is the correct behavior in cases where the phy chip is hardwired and there are no GPIOs to control. Since the gpiod_lookup_table relies on the presence of INT33FC fwnode in ACPI tables only add the table if we know the entry for the INT33FC gpio controller is present. This allows Bay Trail based devices with hardwired dwc3 ULPI phys to continue working. Fixes: 5741022cbdf3 ("usb: dwc3: pci: Add GPIO lookup table on platforms without ACPI GPIO resources") Cc: stable Signed-off-by: Gratian Crisan Reviewed-by: Hans de Goede Link: https://lore.kernel.org/r/20230726184555.218091-2-gratian.crisan@ni.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/dwc3/dwc3-pci.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 4bcfcb98f5ec..1872de3ce98b 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -219,10 +219,12 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc, /* * A lot of BYT devices lack ACPI resource entries for - * the GPIOs, add a fallback mapping to the reference + * the GPIOs. If the ACPI entry for the GPIO controller + * is present add a fallback mapping to the reference * design GPIOs which all boards seem to use. */ - gpiod_add_lookup_table(&platform_bytcr_gpios); + if (acpi_dev_present("INT33FC", NULL, -1)) + gpiod_add_lookup_table(&platform_bytcr_gpios); /* * These GPIOs will turn on the USB2 PHY. Note that we have to From 6366b1178545e0a29f69845938153aa3c7aa603b Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Wed, 28 Jun 2023 00:20:18 +0800 Subject: [PATCH 114/513] usb: dwc3: don't reset device side if dwc3 was configured as host-only commit e835c0a4e23c38531dcee5ef77e8d1cf462658c7 upstream. Commit c4a5153e87fd ("usb: dwc3: core: Power-off core/PHYs on system_suspend in host mode") replaces check for HOST only dr_mode with current_dr_role. But during booting, the current_dr_role isn't initialized, thus the device side reset is always issued even if dwc3 was configured as host-only. What's more, on some platforms with host only dwc3, aways issuing device side reset by accessing device register block can cause kernel panic. Fixes: c4a5153e87fd ("usb: dwc3: core: Power-off core/PHYs on system_suspend in host mode") Cc: stable Signed-off-by: Jisheng Zhang Acked-by: Thinh Nguyen Link: https://lore.kernel.org/r/20230627162018.739-1-jszhang@kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/dwc3/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 005bd1286926..f2e841bc05c7 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -275,9 +275,9 @@ int dwc3_core_soft_reset(struct dwc3 *dwc) /* * We're resetting only the device side because, if we're in host mode, * XHCI driver will reset the host block. If dwc3 was configured for - * host-only mode, then we can return early. + * host-only mode or current role is host, then we can return early. */ - if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) + if (dwc->dr_mode == USB_DR_MODE_HOST || dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) return 0; reg = dwc3_readl(dwc->regs, DWC3_DCTL); From 8fb5a01196dfb739159983e55b437b074e5fac95 Mon Sep 17 00:00:00 2001 From: Guiting Shen Date: Mon, 26 Jun 2023 23:27:13 +0800 Subject: [PATCH 115/513] usb: ohci-at91: Fix the unhandle interrupt when resume commit c55afcbeaa7a6f4fffdbc999a9bf3f0b29a5186f upstream. The ohci_hcd_at91_drv_suspend() sets ohci->rh_state to OHCI_RH_HALTED when suspend which will let the ohci_irq() skip the interrupt after resume. And nobody to handle this interrupt. According to the comment in ohci_hcd_at91_drv_suspend(), it need to reset when resume from suspend(MEM) to fix by setting "hibernated" argument of ohci_resume(). Signed-off-by: Guiting Shen Cc: stable Reviewed-by: Alan Stern Link: https://lore.kernel.org/r/20230626152713.18950-1-aarongt.shen@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/ohci-at91.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c index a24aea3d2759..e72f2e456f4a 100644 --- a/drivers/usb/host/ohci-at91.c +++ b/drivers/usb/host/ohci-at91.c @@ -652,7 +652,13 @@ ohci_hcd_at91_drv_resume(struct device *dev) else at91_start_clock(ohci_at91); - ohci_resume(hcd, false); + /* + * According to the comment in ohci_hcd_at91_drv_suspend() + * we need to do a reset if the 48Mhz clock was stopped, + * that is, if ohci_at91->wakeup is clear. Tell ohci_resume() + * to reset in this case by setting its "hibernated" flag. + */ + ohci_resume(hcd, !ohci_at91->wakeup); return 0; } From 3af06a8502ee9f77876d3cf63af5c8be2a945d99 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Bartosik?= Date: Mon, 24 Jul 2023 13:29:11 +0200 Subject: [PATCH 116/513] USB: quirks: add quirk for Focusrite Scarlett MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 9dc162e22387080e2d06de708b89920c0e158c9a upstream. The Focusrite Scarlett audio device does not behave correctly during resumes. Below is what happens during every resume (captured with Beagle 5000): // The Scarlett disconnects and is enumerated again. However from time to time it drops completely off the USB bus during resume. Below is captured occurrence of such an event: // // To fix the condition a user has to unplug and plug the device again. With USB_QUIRK_RESET_RESUME applied ("usbcore.quirks=1235:8211:b") for the Scarlett audio device the issue still reproduces. Applying USB_QUIRK_DISCONNECT_SUSPEND ("usbcore.quirks=1235:8211:m") fixed the issue and the Scarlett audio device didn't drop off the USB bus for ~5000 suspend/resume cycles where originally issue reproduced in ~100 or less suspend/resume cycles. Signed-off-by: Łukasz Bartosik Cc: stable Link: https://lore.kernel.org/r/20230724112911.1802577-1-lb@semihalf.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/core/quirks.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 934b3d997702..15e9bd180a1d 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -436,6 +436,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* novation SoundControl XL */ { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Focusrite Scarlett Solo USB */ + { USB_DEVICE(0x1235, 0x8211), .driver_info = + USB_QUIRK_DISCONNECT_SUSPEND }, + /* Huawei 4G LTE module */ { USB_DEVICE(0x12d1, 0x15bb), .driver_info = USB_QUIRK_DISCONNECT_SUSPEND }, From cd2d96c4bc6fffcc2d7f03fb95efd8b936174820 Mon Sep 17 00:00:00 2001 From: Frank Li Date: Fri, 7 Jul 2023 19:00:15 -0400 Subject: [PATCH 117/513] usb: cdns3: fix incorrect calculation of ep_buf_size when more than one config commit 2627335a1329a0d39d8d277994678571c4f21800 upstream. Previously, the cdns3_gadget_check_config() function in the cdns3 driver mistakenly calculated the ep_buf_size by considering only one configuration's endpoint information because "claimed" will be clear after call usb_gadget_check_config(). The fix involves checking the private flags EP_CLAIMED instead of relying on the "claimed" flag. Fixes: dce49449e04f ("usb: cdns3: allocate TX FIFO size according to composite EP number") Cc: stable Reported-by: Ravi Gunasekaran Signed-off-by: Frank Li Acked-by: Peter Chen Tested-by: Ravi Gunasekaran Link: https://lore.kernel.org/r/20230707230015.494999-2-Frank.Li@nxp.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/cdns3/cdns3-gadget.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/usb/cdns3/cdns3-gadget.c b/drivers/usb/cdns3/cdns3-gadget.c index 1dcadef933e3..69a44bd7e5d0 100644 --- a/drivers/usb/cdns3/cdns3-gadget.c +++ b/drivers/usb/cdns3/cdns3-gadget.c @@ -3012,12 +3012,14 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget) static int cdns3_gadget_check_config(struct usb_gadget *gadget) { struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); + struct cdns3_endpoint *priv_ep; struct usb_ep *ep; int n_in = 0; int total; list_for_each_entry(ep, &gadget->ep_list, ep_list) { - if (ep->claimed && (ep->address & USB_DIR_IN)) + priv_ep = ep_to_cdns3_ep(ep); + if ((priv_ep->flags & EP_CLAIMED) && (ep->address & USB_DIR_IN)) n_in++; } From 2eaa43508a0e227fa442bb91e1afc3422ddad064 Mon Sep 17 00:00:00 2001 From: Ricardo Ribalda Date: Wed, 19 Jul 2023 13:01:04 +0000 Subject: [PATCH 118/513] usb: xhci-mtk: set the dma max_seg_size commit 9fd10829a9eb482e192a845675ecc5480e0bfa10 upstream. Allow devices to have dma operations beyond 64K, and avoid warnings such as: DMA-API: xhci-mtk 11200000.usb: mapping sg segment longer than device claims to support [len=98304] [max=65536] Fixes: 0cbd4b34cda9 ("xhci: mediatek: support MTK xHCI host controller") Cc: stable Tested-by: Zubin Mithra Reported-by: Zubin Mithra Signed-off-by: Ricardo Ribalda Link: https://lore.kernel.org/r/20230628-mtk-usb-v2-1-c8c34eb9f229@chromium.org Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-mtk.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index f8a63c143492..b55ddc1156cc 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -570,6 +570,7 @@ static int xhci_mtk_probe(struct platform_device *pdev) } device_init_wakeup(dev, true); + dma_set_max_seg_size(dev, UINT_MAX); xhci = hcd_to_xhci(hcd); xhci->main_hcd = hcd; From 98a118840b71e3dbad62229f1b00b4ab6f73e27b Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 4 Jul 2023 17:08:27 +0300 Subject: [PATCH 119/513] Revert "usb: xhci: tegra: Fix error check" commit 288b4fa1798e3637a9304c6e90a93d900e02369c upstream. This reverts commit 18fc7c435be3f17ea26a21b2e2312fcb9088e01f. The reverted commit was based on static analysis and a misunderstanding of how PTR_ERR() and NULLs are supposed to work. When a function returns both pointer errors and NULL then normally the NULL means "continue operating without a feature because it was deliberately turned off". The NULL should not be treated as a failure. If a driver cannot work when that feature is disabled then the KConfig should enforce that the function cannot return NULL. We should not need to test for it. In this code, the patch means that certain tegra_xusb_probe() will fail if the firmware supports power-domains but CONFIG_PM is disabled. Signed-off-by: Dan Carpenter Fixes: 18fc7c435be3 ("usb: xhci: tegra: Fix error check") Cc: stable Link: https://lore.kernel.org/r/8baace8d-fb4b-41a4-ad5f-848ae643a23b@moroto.mountain Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-tegra.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 32df571bb233..51eabc5e8770 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1010,15 +1010,15 @@ static int tegra_xusb_powerdomain_init(struct device *dev, int err; tegra->genpd_dev_host = dev_pm_domain_attach_by_name(dev, "xusb_host"); - if (IS_ERR_OR_NULL(tegra->genpd_dev_host)) { - err = PTR_ERR(tegra->genpd_dev_host) ? : -ENODATA; + if (IS_ERR(tegra->genpd_dev_host)) { + err = PTR_ERR(tegra->genpd_dev_host); dev_err(dev, "failed to get host pm-domain: %d\n", err); return err; } tegra->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "xusb_ss"); - if (IS_ERR_OR_NULL(tegra->genpd_dev_ss)) { - err = PTR_ERR(tegra->genpd_dev_ss) ? : -ENODATA; + if (IS_ERR(tegra->genpd_dev_ss)) { + err = PTR_ERR(tegra->genpd_dev_ss); dev_err(dev, "failed to get superspeed pm-domain: %d\n", err); return err; } From 28ae486f8e361c1c64a5a8c8a6a31e55b65e4a2a Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 30 Jun 2023 09:14:20 +0200 Subject: [PATCH 120/513] Documentation: security-bugs.rst: update preferences when dealing with the linux-distros group commit 4fee0915e649bd0cea56dece6d96f8f4643df33c upstream. Because the linux-distros group forces reporters to release information about reported bugs, and they impose arbitrary deadlines in having those bugs fixed despite not actually being kernel developers, the kernel security team recommends not interacting with them at all as this just causes confusion and the early-release of reported security problems. Reviewed-by: Kees Cook Link: https://lore.kernel.org/r/2023063020-throat-pantyhose-f110@gregkh Signed-off-by: Greg Kroah-Hartman --- Documentation/admin-guide/security-bugs.rst | 26 ++++++++++----------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/Documentation/admin-guide/security-bugs.rst b/Documentation/admin-guide/security-bugs.rst index 82e29837d589..f12ac2316ce7 100644 --- a/Documentation/admin-guide/security-bugs.rst +++ b/Documentation/admin-guide/security-bugs.rst @@ -63,20 +63,18 @@ information submitted to the security list and any followup discussions of the report are treated confidentially even after the embargo has been lifted, in perpetuity. -Coordination ------------- - -Fixes for sensitive bugs, such as those that might lead to privilege -escalations, may need to be coordinated with the private - mailing list so that distribution vendors -are well prepared to issue a fixed kernel upon public disclosure of the -upstream fix. Distros will need some time to test the proposed patch and -will generally request at least a few days of embargo, and vendor update -publication prefers to happen Tuesday through Thursday. When appropriate, -the security team can assist with this coordination, or the reporter can -include linux-distros from the start. In this case, remember to prefix -the email Subject line with "[vs]" as described in the linux-distros wiki: - +Coordination with other groups +------------------------------ + +The kernel security team strongly recommends that reporters of potential +security issues NEVER contact the "linux-distros" mailing list until +AFTER discussing it with the kernel security team. Do not Cc: both +lists at once. You may contact the linux-distros mailing list after a +fix has been agreed on and you fully understand the requirements that +doing so will impose on you and the kernel community. + +The different lists have different goals and the linux-distros rules do +not contribute to actually fixing any potential security problems. CVE assignment -------------- From ba2975efe979e0167ca83a0f37e034d2a3b10c47 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 30 Jun 2023 09:14:21 +0200 Subject: [PATCH 121/513] Documentation: security-bugs.rst: clarify CVE handling commit 3c1897ae4b6bc7cc586eda2feaa2cd68325ec29c upstream. The kernel security team does NOT assign CVEs, so document that properly and provide the "if you want one, ask MITRE for it" response that we give on a weekly basis in the document, so we don't have to constantly say it to everyone who asks. Link: https://lore.kernel.org/r/2023063022-retouch-kerosene-7e4a@gregkh Signed-off-by: Greg Kroah-Hartman --- Documentation/admin-guide/security-bugs.rst | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/Documentation/admin-guide/security-bugs.rst b/Documentation/admin-guide/security-bugs.rst index f12ac2316ce7..5a6993795bd2 100644 --- a/Documentation/admin-guide/security-bugs.rst +++ b/Documentation/admin-guide/security-bugs.rst @@ -79,13 +79,12 @@ not contribute to actually fixing any potential security problems. CVE assignment -------------- -The security team does not normally assign CVEs, nor do we require them -for reports or fixes, as this can needlessly complicate the process and -may delay the bug handling. If a reporter wishes to have a CVE identifier -assigned ahead of public disclosure, they will need to contact the private -linux-distros list, described above. When such a CVE identifier is known -before a patch is provided, it is desirable to mention it in the commit -message if the reporter agrees. +The security team does not assign CVEs, nor do we require them for +reports or fixes, as this can needlessly complicate the process and may +delay the bug handling. If a reporter wishes to have a CVE identifier +assigned, they should find one by themselves, for example by contacting +MITRE directly. However under no circumstances will a patch inclusion +be delayed to wait for a CVE identifier to arrive. Non-disclosure agreements ------------------------- From acacdbe0f740ca8c5d5da73d50870903a3ded677 Mon Sep 17 00:00:00 2001 From: Larry Finger Date: Fri, 14 Jul 2023 12:54:17 -0500 Subject: [PATCH 122/513] staging: r8712: Fix memory leak in _r8712_init_xmit_priv() commit ac83631230f77dda94154ed0ebfd368fc81c70a3 upstream. In the above mentioned routine, memory is allocated in several places. If the first succeeds and a later one fails, the routine will leak memory. This patch fixes commit 2865d42c78a9 ("staging: r8712u: Add the new driver to the mainline kernel"). A potential memory leak in r8712_xmit_resource_alloc() is also addressed. Fixes: 2865d42c78a9 ("staging: r8712u: Add the new driver to the mainline kernel") Reported-by: syzbot+cf71097ffb6755df8251@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/x/log.txt?x=11ac3fa0a80000 Cc: stable@vger.kernel.org Cc: Nam Cao Signed-off-by: Larry Finger Reviewed-by: Nam Cao Link: https://lore.kernel.org/r/20230714175417.18578-1-Larry.Finger@lwfinger.net Signed-off-by: Greg Kroah-Hartman --- drivers/staging/rtl8712/rtl871x_xmit.c | 43 ++++++++++++++++++++------ drivers/staging/rtl8712/xmit_linux.c | 6 ++++ 2 files changed, 40 insertions(+), 9 deletions(-) diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c index 090345bad223..6353dbe554d3 100644 --- a/drivers/staging/rtl8712/rtl871x_xmit.c +++ b/drivers/staging/rtl8712/rtl871x_xmit.c @@ -21,6 +21,7 @@ #include "osdep_intf.h" #include "usb_ops.h" +#include #include static const u8 P802_1H_OUI[P80211_OUI_LEN] = {0x00, 0x00, 0xf8}; @@ -55,6 +56,7 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv, sint i; struct xmit_buf *pxmitbuf; struct xmit_frame *pxframe; + int j; memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv)); spin_lock_init(&pxmitpriv->lock); @@ -117,11 +119,8 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv, _init_queue(&pxmitpriv->pending_xmitbuf_queue); pxmitpriv->pallocated_xmitbuf = kmalloc(NR_XMITBUFF * sizeof(struct xmit_buf) + 4, GFP_ATOMIC); - if (!pxmitpriv->pallocated_xmitbuf) { - kfree(pxmitpriv->pallocated_frame_buf); - pxmitpriv->pallocated_frame_buf = NULL; - return -ENOMEM; - } + if (!pxmitpriv->pallocated_xmitbuf) + goto clean_up_frame_buf; pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 - ((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3); pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf; @@ -129,13 +128,17 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv, INIT_LIST_HEAD(&pxmitbuf->list); pxmitbuf->pallocated_buf = kmalloc(MAX_XMITBUF_SZ + XMITBUF_ALIGN_SZ, GFP_ATOMIC); - if (!pxmitbuf->pallocated_buf) - return -ENOMEM; + if (!pxmitbuf->pallocated_buf) { + j = 0; + goto clean_up_alloc_buf; + } pxmitbuf->pbuf = pxmitbuf->pallocated_buf + XMITBUF_ALIGN_SZ - ((addr_t) (pxmitbuf->pallocated_buf) & (XMITBUF_ALIGN_SZ - 1)); - if (r8712_xmit_resource_alloc(padapter, pxmitbuf)) - return -ENOMEM; + if (r8712_xmit_resource_alloc(padapter, pxmitbuf)) { + j = 1; + goto clean_up_alloc_buf; + } list_add_tail(&pxmitbuf->list, &(pxmitpriv->free_xmitbuf_queue.queue)); pxmitbuf++; @@ -146,6 +149,28 @@ int _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv, init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry); tasklet_setup(&pxmitpriv->xmit_tasklet, r8712_xmit_bh); return 0; + +clean_up_alloc_buf: + if (j) { + /* failure happened in r8712_xmit_resource_alloc() + * delete extra pxmitbuf->pallocated_buf + */ + kfree(pxmitbuf->pallocated_buf); + } + for (j = 0; j < i; j++) { + int k; + + pxmitbuf--; /* reset pointer */ + kfree(pxmitbuf->pallocated_buf); + for (k = 0; k < 8; k++) /* delete xmit urb's */ + usb_free_urb(pxmitbuf->pxmit_urb[k]); + } + kfree(pxmitpriv->pallocated_xmitbuf); + pxmitpriv->pallocated_xmitbuf = NULL; +clean_up_frame_buf: + kfree(pxmitpriv->pallocated_frame_buf); + pxmitpriv->pallocated_frame_buf = NULL; + return -ENOMEM; } void _free_xmit_priv(struct xmit_priv *pxmitpriv) diff --git a/drivers/staging/rtl8712/xmit_linux.c b/drivers/staging/rtl8712/xmit_linux.c index 90d34cf9d2ff..a820ce7cce71 100644 --- a/drivers/staging/rtl8712/xmit_linux.c +++ b/drivers/staging/rtl8712/xmit_linux.c @@ -118,6 +118,12 @@ int r8712_xmit_resource_alloc(struct _adapter *padapter, for (i = 0; i < 8; i++) { pxmitbuf->pxmit_urb[i] = usb_alloc_urb(0, GFP_KERNEL); if (!pxmitbuf->pxmit_urb[i]) { + int k; + + for (k = i - 1; k >= 0; k--) { + /* handle allocation errors part way through loop */ + usb_free_urb(pxmitbuf->pxmit_urb[k]); + } netdev_err(padapter->pnetdev, "pxmitbuf->pxmit_urb[i] == NULL\n"); return -ENOMEM; } From baf420e30364ef9efe3e29a5c0e01e612aebf3fe Mon Sep 17 00:00:00 2001 From: Zhang Shurong Date: Sun, 9 Jul 2023 13:50:07 +0800 Subject: [PATCH 123/513] staging: ks7010: potential buffer overflow in ks_wlan_set_encode_ext() commit 5f1c7031e044cb2fba82836d55cc235e2ad619dc upstream. The "exc->key_len" is a u16 that comes from the user. If it's over IW_ENCODING_TOKEN_MAX (64) that could lead to memory corruption. Fixes: b121d84882b9 ("staging: ks7010: simplify calls to memcpy()") Cc: stable Signed-off-by: Zhang Shurong Reviewed-by: Dan Carpenter Link: https://lore.kernel.org/r/tencent_5153B668C0283CAA15AA518325346E026A09@qq.com Signed-off-by: Greg Kroah-Hartman --- drivers/staging/ks7010/ks_wlan_net.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/staging/ks7010/ks_wlan_net.c b/drivers/staging/ks7010/ks_wlan_net.c index 631ad769c3d5..8d8822c60a2b 100644 --- a/drivers/staging/ks7010/ks_wlan_net.c +++ b/drivers/staging/ks7010/ks_wlan_net.c @@ -1584,8 +1584,10 @@ static int ks_wlan_set_encode_ext(struct net_device *dev, commit |= SME_WEP_FLAG; } if (enc->key_len) { - memcpy(&key->key_val[0], &enc->key[0], enc->key_len); - key->key_len = enc->key_len; + int key_len = clamp_val(enc->key_len, 0, IW_ENCODING_TOKEN_MAX); + + memcpy(&key->key_val[0], &enc->key[0], key_len); + key->key_len = key_len; commit |= (SME_WEP_VAL1 << index); } break; From 5138c228311a863c3cf937b94a3ab4c87f1f70c4 Mon Sep 17 00:00:00 2001 From: Chaoyuan Peng Date: Tue, 18 Jul 2023 04:39:43 +0000 Subject: [PATCH 124/513] tty: n_gsm: fix UAF in gsm_cleanup_mux commit 9b9c8195f3f0d74a826077fc1c01b9ee74907239 upstream. In gsm_cleanup_mux() the 'gsm->dlci' pointer was not cleaned properly, leaving it a dangling pointer after gsm_dlci_release. This leads to use-after-free where 'gsm->dlci[0]' are freed and accessed by the subsequent gsm_cleanup_mux(). Such is the case in the following call trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x1e3/0x2cb lib/dump_stack.c:106 print_address_description+0x63/0x3b0 mm/kasan/report.c:248 __kasan_report mm/kasan/report.c:434 [inline] kasan_report+0x16b/0x1c0 mm/kasan/report.c:451 gsm_cleanup_mux+0x76a/0x850 drivers/tty/n_gsm.c:2397 gsm_config drivers/tty/n_gsm.c:2653 [inline] gsmld_ioctl+0xaae/0x15b0 drivers/tty/n_gsm.c:2986 tty_ioctl+0x8ff/0xc50 drivers/tty/tty_io.c:2816 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:874 [inline] __se_sys_ioctl+0xf1/0x160 fs/ioctl.c:860 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x61/0xcb Allocated by task 3501: kasan_save_stack mm/kasan/common.c:38 [inline] kasan_set_track mm/kasan/common.c:46 [inline] set_alloc_info mm/kasan/common.c:434 [inline] ____kasan_kmalloc+0xba/0xf0 mm/kasan/common.c:513 kasan_kmalloc include/linux/kasan.h:264 [inline] kmem_cache_alloc_trace+0x143/0x290 mm/slub.c:3247 kmalloc include/linux/slab.h:591 [inline] kzalloc include/linux/slab.h:721 [inline] gsm_dlci_alloc+0x53/0x3a0 drivers/tty/n_gsm.c:1932 gsm_activate_mux+0x1c/0x330 drivers/tty/n_gsm.c:2438 gsm_config drivers/tty/n_gsm.c:2677 [inline] gsmld_ioctl+0xd46/0x15b0 drivers/tty/n_gsm.c:2986 tty_ioctl+0x8ff/0xc50 drivers/tty/tty_io.c:2816 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:874 [inline] __se_sys_ioctl+0xf1/0x160 fs/ioctl.c:860 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x61/0xcb Freed by task 3501: kasan_save_stack mm/kasan/common.c:38 [inline] kasan_set_track+0x4b/0x80 mm/kasan/common.c:46 kasan_set_free_info+0x1f/0x40 mm/kasan/generic.c:360 ____kasan_slab_free+0xd8/0x120 mm/kasan/common.c:366 kasan_slab_free include/linux/kasan.h:230 [inline] slab_free_hook mm/slub.c:1705 [inline] slab_free_freelist_hook+0xdd/0x160 mm/slub.c:1731 slab_free mm/slub.c:3499 [inline] kfree+0xf1/0x270 mm/slub.c:4559 dlci_put drivers/tty/n_gsm.c:1988 [inline] gsm_dlci_release drivers/tty/n_gsm.c:2021 [inline] gsm_cleanup_mux+0x574/0x850 drivers/tty/n_gsm.c:2415 gsm_config drivers/tty/n_gsm.c:2653 [inline] gsmld_ioctl+0xaae/0x15b0 drivers/tty/n_gsm.c:2986 tty_ioctl+0x8ff/0xc50 drivers/tty/tty_io.c:2816 vfs_ioctl fs/ioctl.c:51 [inline] __do_sys_ioctl fs/ioctl.c:874 [inline] __se_sys_ioctl+0xf1/0x160 fs/ioctl.c:860 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x61/0xcb Fixes: aa371e96f05d ("tty: n_gsm: fix restart handling via CLD command") Signed-off-by: Chaoyuan Peng Cc: stable Signed-off-by: Greg Kroah-Hartman --- drivers/tty/n_gsm.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 813a45887171..54173c23263c 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -2411,8 +2411,10 @@ static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) gsm->has_devices = false; } for (i = NUM_DLCI - 1; i >= 0; i--) - if (gsm->dlci[i]) + if (gsm->dlci[i]) { gsm_dlci_release(gsm->dlci[i]); + gsm->dlci[i] = NULL; + } mutex_unlock(&gsm->mutex); /* Now wipe the queues */ tty_ldisc_flush(gsm->tty); From dd125fcd580acc2414a27d8626718e649385b266 Mon Sep 17 00:00:00 2001 From: Oliver Neukum Date: Thu, 13 Jul 2023 13:28:10 +0200 Subject: [PATCH 125/513] Revert "xhci: add quirk for host controllers that don't update endpoint DCS" commit 5bef4b3cb95a5b883dfec8b3ffc0d671323d55bb upstream. This reverts commit 5255660b208aebfdb71d574f3952cf48392f4306. This quirk breaks at least the following hardware: 0b:00.0 0c03: 1106:3483 (rev 01) (prog-if 30 [XHCI]) Subsystem: 1106:3483 Control: I/O+ Mem+ BusMaster+ SpecCycle- MemWINV- VGASnoop- ParErr- Stepping- SERR- FastB2B- DisINTx+ Status: Cap+ 66MHz- UDF- FastB2B- ParErr- DEVSEL=fast >TAbort- SERR- Fixes: 5255660b208a ("xhci: add quirk for host controllers that don't update endpoint DCS") Cc: stable Link: https://lore.kernel.org/r/20230713112830.21773-1-oneukum@suse.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/host/xhci-pci.c | 4 +--- drivers/usb/host/xhci-ring.c | 25 +------------------------ 2 files changed, 2 insertions(+), 27 deletions(-) diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 6c23b5156448..29a442b62118 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -294,10 +294,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) pdev->device == 0x3432) xhci->quirks |= XHCI_BROKEN_STREAMS; - if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) { + if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) xhci->quirks |= XHCI_LPM_SUPPORT; - xhci->quirks |= XHCI_EP_CTX_BROKEN_DCS; - } if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == PCI_DEVICE_ID_ASMEDIA_1042_XHCI) { diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 73f2ff83e1ad..15e44045230e 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -592,11 +592,8 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, struct xhci_ring *ep_ring; struct xhci_command *cmd; struct xhci_segment *new_seg; - struct xhci_segment *halted_seg = NULL; union xhci_trb *new_deq; int new_cycle; - union xhci_trb *halted_trb; - int index = 0; dma_addr_t addr; u64 hw_dequeue; bool cycle_found = false; @@ -634,27 +631,7 @@ static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci, hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id); new_seg = ep_ring->deq_seg; new_deq = ep_ring->dequeue; - - /* - * Quirk: xHC write-back of the DCS field in the hardware dequeue - * pointer is wrong - use the cycle state of the TRB pointed to by - * the dequeue pointer. - */ - if (xhci->quirks & XHCI_EP_CTX_BROKEN_DCS && - !(ep->ep_state & EP_HAS_STREAMS)) - halted_seg = trb_in_td(xhci, td->start_seg, - td->first_trb, td->last_trb, - hw_dequeue & ~0xf, false); - if (halted_seg) { - index = ((dma_addr_t)(hw_dequeue & ~0xf) - halted_seg->dma) / - sizeof(*halted_trb); - halted_trb = &halted_seg->trbs[index]; - new_cycle = halted_trb->generic.field[3] & 0x1; - xhci_dbg(xhci, "Endpoint DCS = %d TRB index = %d cycle = %d\n", - (u8)(hw_dequeue & 0x1), index, new_cycle); - } else { - new_cycle = hw_dequeue & 0x1; - } + new_cycle = hw_dequeue & 0x1; /* * We want to find the pointer, segment and cycle state of the new trb From a676ddc4ca96512309c80fd323a3df36c57bb639 Mon Sep 17 00:00:00 2001 From: Luka Guzenko Date: Tue, 25 Jul 2023 13:15:09 +0200 Subject: [PATCH 126/513] ALSA: hda/relatek: Enable Mute LED on HP 250 G8 commit d510acb610e6aa07a04b688236868b2a5fd60deb upstream. This HP Notebook used ALC236 codec with COEF 0x07 idx 1 controlling the mute LED. Enable already existing quirk for this device. Signed-off-by: Luka Guzenko Cc: Link: https://lore.kernel.org/r/20230725111509.623773-1-l.guzenko@web.de Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 0bc3f3f1b8a7..965720b1d1b1 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -9079,6 +9079,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x880d, "HP EliteBook 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8811, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), SND_PCI_QUIRK(0x103c, 0x8812, "HP Spectre x360 15-eb1xxx", ALC285_FIXUP_HP_SPECTRE_X360_EB1), + SND_PCI_QUIRK(0x103c, 0x881d, "HP 250 G8 Notebook PC", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2), SND_PCI_QUIRK(0x103c, 0x8846, "HP EliteBook 850 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8847, "HP EliteBook x360 830 G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x884b, "HP EliteBook 840 Aero G8 Notebook PC", ALC285_FIXUP_HP_GPIO_LED), From 3f3cdca84432eccba2447df20ddf0e8e95bafdc5 Mon Sep 17 00:00:00 2001 From: Baskaran Kannan Date: Thu, 27 Jul 2023 11:21:59 -0500 Subject: [PATCH 127/513] hwmon: (k10temp) Enable AMD3255 Proc to show negative temperature commit e146503ac68418859fb063a3a0cd9ec93bc52238 upstream. Industrial processor i3255 supports temperatures -40 deg celcius to 105 deg Celcius. The current implementation of k10temp_read_temp rounds off any negative temperatures to '0'. To fix this, the following changes have been made. A flag 'disp_negative' is added to struct k10temp_data to support AMD i3255 processors. Flag 'disp_negative' is set if 3255 processor is found during k10temp_probe. Flag 'disp_negative' is used to determine whether to round off negative temperatures to '0' in k10temp_read_temp. Signed-off-by: Baskaran Kannan Link: https://lore.kernel.org/r/20230727162159.1056136-1-Baski.Kannan@amd.com Fixes: aef17ca12719 ("hwmon: (k10temp) Only apply temperature offset if result is positive") Cc: stable@vger.kernel.org [groeck: Fixed multi-line comment] Signed-off-by: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- drivers/hwmon/k10temp.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/drivers/hwmon/k10temp.c b/drivers/hwmon/k10temp.c index e7ba05c50e61..f3cff6c9f745 100644 --- a/drivers/hwmon/k10temp.c +++ b/drivers/hwmon/k10temp.c @@ -97,6 +97,13 @@ static DEFINE_MUTEX(nb_smu_ind_mutex); #define F19H_M01H_CFACTOR_ICORE 1000000 /* 1A / LSB */ #define F19H_M01H_CFACTOR_ISOC 310000 /* 0.31A / LSB */ +/* + * AMD's Industrial processor 3255 supports temperature from -40 deg to 105 deg Celsius. + * Use the model name to identify 3255 CPUs and set a flag to display negative temperature. + * Do not round off to zero for negative Tctl or Tdie values if the flag is set + */ +#define AMD_I3255_STR "3255" + struct k10temp_data { struct pci_dev *pdev; void (*read_htcreg)(struct pci_dev *pdev, u32 *regval); @@ -106,6 +113,7 @@ struct k10temp_data { u32 show_temp; bool is_zen; u32 ccd_offset; + bool disp_negative; }; #define TCTL_BIT 0 @@ -220,12 +228,12 @@ static int k10temp_read_temp(struct device *dev, u32 attr, int channel, switch (channel) { case 0: /* Tctl */ *val = get_raw_temp(data); - if (*val < 0) + if (*val < 0 && !data->disp_negative) *val = 0; break; case 1: /* Tdie */ *val = get_raw_temp(data) - data->temp_offset; - if (*val < 0) + if (*val < 0 && !data->disp_negative) *val = 0; break; case 2 ... 9: /* Tccd{1-8} */ @@ -417,6 +425,11 @@ static int k10temp_probe(struct pci_dev *pdev, const struct pci_device_id *id) data->pdev = pdev; data->show_temp |= BIT(TCTL_BIT); /* Always show Tctl */ + if (boot_cpu_data.x86 == 0x17 && + strstr(boot_cpu_data.x86_model_id, AMD_I3255_STR)) { + data->disp_negative = true; + } + if (boot_cpu_data.x86 == 0x15 && ((boot_cpu_data.x86_model & 0xf0) == 0x60 || (boot_cpu_data.x86_model & 0xf0) == 0x70)) { From a7abb1690fe1ace47070ade58e93350166baa7f8 Mon Sep 17 00:00:00 2001 From: Gilles Buloz Date: Mon, 24 Jul 2023 08:04:44 +0000 Subject: [PATCH 128/513] hwmon: (nct7802) Fix for temp6 (PECI1) processed even if PECI1 disabled commit 54685abe660a59402344d5045ce08c43c6a5ac42 upstream. Because of hex value 0x46 used instead of decimal 46, the temp6 (PECI1) temperature is always declared visible and then displayed even if disabled in the chip Signed-off-by: Gilles Buloz Link: https://lore.kernel.org/r/DU0PR10MB62526435ADBC6A85243B90E08002A@DU0PR10MB6252.EURPRD10.PROD.OUTLOOK.COM Fixes: fcdc5739dce03 ("hwmon: (nct7802) add temperature sensor type attribute") Cc: stable@vger.kernel.org Signed-off-by: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- drivers/hwmon/nct7802.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c index 604af2f6103a..88eddb8d61d3 100644 --- a/drivers/hwmon/nct7802.c +++ b/drivers/hwmon/nct7802.c @@ -708,7 +708,7 @@ static umode_t nct7802_temp_is_visible(struct kobject *kobj, if (index >= 38 && index < 46 && !(reg & 0x01)) /* PECI 0 */ return 0; - if (index >= 0x46 && (!(reg & 0x02))) /* PECI 1 */ + if (index >= 46 && !(reg & 0x02)) /* PECI 1 */ return 0; return attr->mode; From 883c3ed9a16abae1b8bc2dfdb206cf314d96a4f6 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 21 Jul 2023 10:49:20 +0100 Subject: [PATCH 129/513] btrfs: check if the transaction was aborted at btrfs_wait_for_commit() commit bf7ecbe9875061bf3fce1883e3b26b77f847d1e8 upstream. At btrfs_wait_for_commit() we wait for a transaction to finish and then always return 0 (success) without checking if it was aborted, in which case the transaction didn't happen due to some critical error. Fix this by checking if the transaction was aborted. Fixes: 462045928bda ("Btrfs: add START_SYNC, WAIT_SYNC ioctls") CC: stable@vger.kernel.org # 4.19+ Reviewed-by: Qu Wenruo Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/transaction.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 642cd2b55fa0..538b34347a8e 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -936,6 +936,7 @@ int btrfs_wait_for_commit(struct btrfs_fs_info *fs_info, u64 transid) } wait_for_commit(cur_trans, TRANS_STATE_COMPLETED); + ret = cur_trans->aborted; btrfs_put_transaction(cur_trans); out: return ret; From 1f5ea62a0f4297770204134bbb371b82b39e2455 Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 21 Jul 2023 10:49:21 +0100 Subject: [PATCH 130/513] btrfs: check for commit error at btrfs_attach_transaction_barrier() commit b28ff3a7d7e97456fd86b68d24caa32e1cfa7064 upstream. btrfs_attach_transaction_barrier() is used to get a handle pointing to the current running transaction if the transaction has not started its commit yet (its state is < TRANS_STATE_COMMIT_START). If the transaction commit has started, then we wait for the transaction to commit and finish before returning - however we completely ignore if the transaction was aborted due to some error during its commit, we simply return ERR_PT(-ENOENT), which makes the caller assume everything is fine and no errors happened. This could make an fsync return success (0) to user space when in fact we had a transaction abort and the target inode changes were therefore not persisted. Fix this by checking for the return value from btrfs_wait_for_commit(), and if it returned an error, return it back to the caller. Fixes: d4edf39bd5db ("Btrfs: fix uncompleted transaction") CC: stable@vger.kernel.org # 4.19+ Reviewed-by: Qu Wenruo Signed-off-by: Filipe Manana Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/transaction.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 538b34347a8e..daaed37bba9e 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -840,8 +840,13 @@ btrfs_attach_transaction_barrier(struct btrfs_root *root) trans = start_transaction(root, 0, TRANS_ATTACH, BTRFS_RESERVE_NO_FLUSH, true); - if (trans == ERR_PTR(-ENOENT)) - btrfs_wait_for_commit(root->fs_info, 0); + if (trans == ERR_PTR(-ENOENT)) { + int ret; + + ret = btrfs_wait_for_commit(root->fs_info, 0); + if (ret) + return ERR_PTR(ret); + } return trans; } From 9b8a31a2315202694339192b7a4d47e5df44a8f2 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Mon, 24 Jul 2023 17:00:49 +0200 Subject: [PATCH 131/513] file: always lock position for FMODE_ATOMIC_POS commit 20ea1e7d13c1b544fe67c4a8dc3943bb1ab33e6f upstream. The pidfd_getfd() system call allows a caller with ptrace_may_access() abilities on another process to steal a file descriptor from this process. This system call is used by debuggers, container runtimes, system call supervisors, networking proxies etc. So while it is a special interest system call it is used in common tools. That ability ends up breaking our long-time optimization in fdget_pos(), which "knew" that if we had exclusive access to the file descriptor nobody else could access it, and we didn't need the lock for the file position. That check for file_count(file) was always fairly subtle - it depended on __fdget() not incrementing the file count for single-threaded processes and thus included that as part of the rule - but it did mean that we didn't need to take the lock in all those traditional unix process contexts. So it's sad to see this go, and I'd love to have some way to re-instate the optimization. At the same time, the lock obviously isn't ever contended in the case we optimized, so all we were optimizing away is the atomics and the cacheline dirtying. Let's see if anybody even notices that the optimization is gone. Link: https://lore.kernel.org/linux-fsdevel/20230724-vfs-fdget_pos-v1-1-a4abfd7103f3@kernel.org/ Fixes: 8649c322f75c ("pid: Implement pidfd_getfd syscall") Cc: stable@kernel.org Signed-off-by: Christian Brauner Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/file.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/fs/file.c b/fs/file.c index ee1c350ec58a..1501bbf6306e 100644 --- a/fs/file.c +++ b/fs/file.c @@ -1068,10 +1068,8 @@ unsigned long __fdget_pos(unsigned int fd) struct file *file = (struct file *)(v & ~3); if (file && (file->f_mode & FMODE_ATOMIC_POS)) { - if (file_count(file) > 1) { - v |= FDPUT_POS_UNLOCK; - mutex_lock(&file->f_pos_lock); - } + v |= FDPUT_POS_UNLOCK; + mutex_lock(&file->f_pos_lock); } return v; } From 8130c32b4ac124df268db71f649cfbf3d046db1e Mon Sep 17 00:00:00 2001 From: Trond Myklebust Date: Tue, 18 Jul 2023 08:38:37 -0400 Subject: [PATCH 132/513] nfsd: Remove incorrect check in nfsd4_validate_stateid commit f75546f58a70da5cfdcec5a45ffc377885ccbee8 upstream. If the client is calling TEST_STATEID, then it is because some event occurred that requires it to check all the stateids for validity and call FREE_STATEID on the ones that have been revoked. In this case, either the stateid exists in the list of stateids associated with that nfs4_client, in which case it should be tested, or it does not. There are no additional conditions to be considered. Reported-by: "Frank Ch. Eigler" Fixes: 7df302f75ee2 ("NFSD: TEST_STATEID should not return NFS4ERR_STALE_STATEID") Cc: stable@vger.kernel.org # v5.7+ Signed-off-by: Trond Myklebust Reviewed-by: Jeff Layton Signed-off-by: Chuck Lever Signed-off-by: Greg Kroah-Hartman --- fs/nfsd/nfs4state.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index c062728034ad..c8729493df5c 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -5833,8 +5833,6 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid) if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) || CLOSE_STATEID(stateid)) return status; - if (!same_clid(&stateid->si_opaque.so_clid, &cl->cl_clientid)) - return status; spin_lock(&cl->cl_lock); s = find_stateid_locked(cl, stateid); if (!s) From 354e8bd5f53214794f76f2c69aa4e18ceebb435e Mon Sep 17 00:00:00 2001 From: Alexander Steffen Date: Tue, 13 Jun 2023 20:02:56 +0200 Subject: [PATCH 133/513] tpm_tis: Explicitly check for error code commit 513253f8c293c0c8bd46d09d337fc892bf8f9f48 upstream. recv_data either returns the number of received bytes, or a negative value representing an error code. Adding the return value directly to the total number of received bytes therefore looks a little weird, since it might add a negative error code to a sum of bytes. The following check for size < expected usually makes the function return ETIME in that case, so it does not cause too many problems in practice. But to make the code look cleaner and because the caller might still be interested in the original error code, explicitly check for the presence of an error code and pass that through. Cc: stable@vger.kernel.org Fixes: cb5354253af2 ("[PATCH] tpm: spacing cleanups 2") Signed-off-by: Alexander Steffen Reviewed-by: Jarkko Sakkinen Signed-off-by: Jarkko Sakkinen Signed-off-by: Greg Kroah-Hartman --- drivers/char/tpm/tpm_tis_core.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/char/tpm/tpm_tis_core.c b/drivers/char/tpm/tpm_tis_core.c index 365761055df3..d7c440ac465f 100644 --- a/drivers/char/tpm/tpm_tis_core.c +++ b/drivers/char/tpm/tpm_tis_core.c @@ -314,6 +314,7 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) int size = 0; int status; u32 expected; + int rc; if (count < TPM_HEADER_SIZE) { size = -EIO; @@ -333,8 +334,13 @@ static int tpm_tis_recv(struct tpm_chip *chip, u8 *buf, size_t count) goto out; } - size += recv_data(chip, &buf[TPM_HEADER_SIZE], - expected - TPM_HEADER_SIZE); + rc = recv_data(chip, &buf[TPM_HEADER_SIZE], + expected - TPM_HEADER_SIZE); + if (rc < 0) { + size = rc; + goto out; + } + size += rc; if (size < expected) { dev_err(&chip->dev, "Unable to read remainder of result\n"); size = -ETIME; From 6cb3c511afcb0e8a4ee34d8ab73d166b976b24c4 Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Thu, 29 Jun 2023 09:26:20 +0200 Subject: [PATCH 134/513] irq-bcm6345-l1: Do not assume a fixed block to cpu mapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 55ad24857341c36616ecc1d9580af5626c226cf1 ] The irq to block mapping is fixed, and interrupts from the first block will always be routed to the first parent IRQ. But the parent interrupts themselves can be routed to any available CPU. This is used by the bootloader to map the first parent interrupt to the boot CPU, regardless wether the boot CPU is the first one or the second one. When booting from the second CPU, the assumption that the first block's IRQ is mapped to the first CPU breaks, and the system hangs because interrupts do not get routed correctly. Fix this by passing the appropriate bcm6434_l1_cpu to the interrupt handler instead of the chip itself, so the handler always has the right block. Fixes: c7c42ec2baa1 ("irqchips/bmips: Add bcm6345-l1 interrupt controller") Signed-off-by: Jonas Gorski Reviewed-by: Philippe Mathieu-Daudé Reviewed-by: Florian Fainelli Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20230629072620.62527-1-jonas.gorski@gmail.com Signed-off-by: Sasha Levin --- drivers/irqchip/irq-bcm6345-l1.c | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/irqchip/irq-bcm6345-l1.c b/drivers/irqchip/irq-bcm6345-l1.c index ebc3a253f735..7c5d8b791592 100644 --- a/drivers/irqchip/irq-bcm6345-l1.c +++ b/drivers/irqchip/irq-bcm6345-l1.c @@ -82,6 +82,7 @@ struct bcm6345_l1_chip { }; struct bcm6345_l1_cpu { + struct bcm6345_l1_chip *intc; void __iomem *map_base; unsigned int parent_irq; u32 enable_cache[]; @@ -115,17 +116,11 @@ static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc, static void bcm6345_l1_irq_handle(struct irq_desc *desc) { - struct bcm6345_l1_chip *intc = irq_desc_get_handler_data(desc); - struct bcm6345_l1_cpu *cpu; + struct bcm6345_l1_cpu *cpu = irq_desc_get_handler_data(desc); + struct bcm6345_l1_chip *intc = cpu->intc; struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int idx; -#ifdef CONFIG_SMP - cpu = intc->cpus[cpu_logical_map(smp_processor_id())]; -#else - cpu = intc->cpus[0]; -#endif - chained_irq_enter(chip, desc); for (idx = 0; idx < intc->n_words; idx++) { @@ -257,6 +252,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn, if (!cpu) return -ENOMEM; + cpu->intc = intc; cpu->map_base = ioremap(res.start, sz); if (!cpu->map_base) return -ENOMEM; @@ -272,7 +268,7 @@ static int __init bcm6345_l1_init_one(struct device_node *dn, return -EINVAL; } irq_set_chained_handler_and_data(cpu->parent_irq, - bcm6345_l1_irq_handle, intc); + bcm6345_l1_irq_handle, cpu); return 0; } From c579caef7c4654079c4d47071e79b61d5da31c81 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Sat, 17 Jun 2023 08:32:42 +0100 Subject: [PATCH 135/513] irqchip/gic-v4.1: Properly lock VPEs when doing a directLPI invalidation [ Upstream commit 926846a703cbf5d0635cc06e67d34b228746554b ] We normally rely on the irq_to_cpuid_[un]lock() primitives to make sure nothing will change col->idx while performing a LPI invalidation. However, these primitives do not cover VPE doorbells, and we have some open-coded locking for that. Unfortunately, this locking is pretty bogus. Instead, extend the above primitives to cover VPE doorbells and convert the whole thing to it. Fixes: f3a059219bc7 ("irqchip/gic-v4.1: Ensure mutual exclusion between vPE affinity change and RD access") Reported-by: Kunkun Jiang Signed-off-by: Marc Zyngier Cc: Zenghui Yu Cc: wanghaibin.wang@huawei.com Tested-by: Kunkun Jiang Reviewed-by: Zenghui Yu Link: https://lore.kernel.org/r/20230617073242.3199746-1-maz@kernel.org Signed-off-by: Sasha Levin --- drivers/irqchip/irq-gic-v3-its.c | 75 ++++++++++++++++++++------------ 1 file changed, 46 insertions(+), 29 deletions(-) diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 59a5d06b2d3e..490e6cfe510e 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -267,13 +267,23 @@ static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); } +static struct irq_chip its_vpe_irq_chip; + static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) { - struct its_vlpi_map *map = get_vlpi_map(d); + struct its_vpe *vpe = NULL; int cpu; - if (map) { - cpu = vpe_to_cpuid_lock(map->vpe, flags); + if (d->chip == &its_vpe_irq_chip) { + vpe = irq_data_get_irq_chip_data(d); + } else { + struct its_vlpi_map *map = get_vlpi_map(d); + if (map) + vpe = map->vpe; + } + + if (vpe) { + cpu = vpe_to_cpuid_lock(vpe, flags); } else { /* Physical LPIs are already locked via the irq_desc lock */ struct its_device *its_dev = irq_data_get_irq_chip_data(d); @@ -287,10 +297,18 @@ static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) { - struct its_vlpi_map *map = get_vlpi_map(d); + struct its_vpe *vpe = NULL; + + if (d->chip == &its_vpe_irq_chip) { + vpe = irq_data_get_irq_chip_data(d); + } else { + struct its_vlpi_map *map = get_vlpi_map(d); + if (map) + vpe = map->vpe; + } - if (map) - vpe_to_cpuid_unlock(map->vpe, flags); + if (vpe) + vpe_to_cpuid_unlock(vpe, flags); } static struct its_collection *valid_col(struct its_collection *col) @@ -1427,14 +1445,29 @@ static void wait_for_syncr(void __iomem *rdbase) cpu_relax(); } -static void direct_lpi_inv(struct irq_data *d) +static void __direct_lpi_inv(struct irq_data *d, u64 val) { - struct its_vlpi_map *map = get_vlpi_map(d); void __iomem *rdbase; unsigned long flags; - u64 val; int cpu; + /* Target the redistributor this LPI is currently routed to */ + cpu = irq_to_cpuid_lock(d, &flags); + raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); + + rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; + gic_write_lpir(val, rdbase + GICR_INVLPIR); + wait_for_syncr(rdbase); + + raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); + irq_to_cpuid_unlock(d, flags); +} + +static void direct_lpi_inv(struct irq_data *d) +{ + struct its_vlpi_map *map = get_vlpi_map(d); + u64 val; + if (map) { struct its_device *its_dev = irq_data_get_irq_chip_data(d); @@ -1447,15 +1480,7 @@ static void direct_lpi_inv(struct irq_data *d) val = d->hwirq; } - /* Target the redistributor this LPI is currently routed to */ - cpu = irq_to_cpuid_lock(d, &flags); - raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); - rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; - gic_write_lpir(val, rdbase + GICR_INVLPIR); - - wait_for_syncr(rdbase); - raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); - irq_to_cpuid_unlock(d, flags); + __direct_lpi_inv(d, val); } static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) @@ -3936,18 +3961,10 @@ static void its_vpe_send_inv(struct irq_data *d) { struct its_vpe *vpe = irq_data_get_irq_chip_data(d); - if (gic_rdists->has_direct_lpi) { - void __iomem *rdbase; - - /* Target the redistributor this VPE is currently known on */ - raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); - rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; - gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR); - wait_for_syncr(rdbase); - raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock); - } else { + if (gic_rdists->has_direct_lpi) + __direct_lpi_inv(d, d->parent_data->hwirq); + else its_vpe_send_cmd(vpe, its_send_inv); - } } static void its_vpe_mask_irq(struct irq_data *d) From 4ed1549129f9c0388b89654b6bb1a81b553a8edf Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 7 Jul 2023 16:19:09 +0200 Subject: [PATCH 136/513] locking/rtmutex: Fix task->pi_waiters integrity [ Upstream commit f7853c34241807bb97673a5e97719123be39a09e ] Henry reported that rt_mutex_adjust_prio_check() has an ordering problem and puts the lie to the comment in [7]. Sharing the sort key between lock->waiters and owner->pi_waiters *does* create problems, since unlike what the comment claims, holding [L] is insufficient. Notably, consider: A / \ M1 M2 | | B C That is, task A owns both M1 and M2, B and C block on them. In this case a concurrent chain walk (B & C) will modify their resp. sort keys in [7] while holding M1->wait_lock and M2->wait_lock. So holding [L] is meaningless, they're different Ls. This then gives rise to a race condition between [7] and [11], where the requeue of pi_waiters will observe an inconsistent tree order. B C (holds M1->wait_lock, (holds M2->wait_lock, holds B->pi_lock) holds A->pi_lock) [7] waiter_update_prio(); ... [8] raw_spin_unlock(B->pi_lock); ... [10] raw_spin_lock(A->pi_lock); [11] rt_mutex_enqueue_pi(); // observes inconsistent A->pi_waiters // tree order Fixing this means either extending the range of the owner lock from [10-13] to [6-13], with the immediate problem that this means [6-8] hold both blocked and owner locks, or duplicating the sort key. Since the locking in chain walk is horrible enough without having to consider pi_lock nesting rules, duplicate the sort key instead. By giving each tree their own sort key, the above race becomes harmless, if C sees B at the old location, then B will correct things (if they need correcting) when it walks up the chain and reaches A. Fixes: fb00aca47440 ("rtmutex: Turn the plist into an rb-tree") Reported-by: Henry Wu Signed-off-by: Peter Zijlstra (Intel) Acked-by: Thomas Gleixner Tested-by: Henry Wu Link: https://lkml.kernel.org/r/20230707161052.GF2883469%40hirez.programming.kicks-ass.net Signed-off-by: Sasha Levin --- kernel/locking/rtmutex.c | 170 +++++++++++++++++++++----------- kernel/locking/rtmutex_api.c | 2 +- kernel/locking/rtmutex_common.h | 47 ++++++--- kernel/locking/ww_mutex.h | 12 +-- 4 files changed, 155 insertions(+), 76 deletions(-) diff --git a/kernel/locking/rtmutex.c b/kernel/locking/rtmutex.c index b7fa3ee3aa1d..ee5be1dda0c4 100644 --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -331,21 +331,43 @@ static __always_inline int __waiter_prio(struct task_struct *task) return prio; } +/* + * Update the waiter->tree copy of the sort keys. + */ static __always_inline void waiter_update_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) { - waiter->prio = __waiter_prio(task); - waiter->deadline = task->dl.deadline; + lockdep_assert_held(&waiter->lock->wait_lock); + lockdep_assert(RB_EMPTY_NODE(&waiter->tree.entry)); + + waiter->tree.prio = __waiter_prio(task); + waiter->tree.deadline = task->dl.deadline; +} + +/* + * Update the waiter->pi_tree copy of the sort keys (from the tree copy). + */ +static __always_inline void +waiter_clone_prio(struct rt_mutex_waiter *waiter, struct task_struct *task) +{ + lockdep_assert_held(&waiter->lock->wait_lock); + lockdep_assert_held(&task->pi_lock); + lockdep_assert(RB_EMPTY_NODE(&waiter->pi_tree.entry)); + + waiter->pi_tree.prio = waiter->tree.prio; + waiter->pi_tree.deadline = waiter->tree.deadline; } /* - * Only use with rt_mutex_waiter_{less,equal}() + * Only use with rt_waiter_node_{less,equal}() */ +#define task_to_waiter_node(p) \ + &(struct rt_waiter_node){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline } #define task_to_waiter(p) \ - &(struct rt_mutex_waiter){ .prio = __waiter_prio(p), .deadline = (p)->dl.deadline } + &(struct rt_mutex_waiter){ .tree = *task_to_waiter_node(p) } -static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, - struct rt_mutex_waiter *right) +static __always_inline int rt_waiter_node_less(struct rt_waiter_node *left, + struct rt_waiter_node *right) { if (left->prio < right->prio) return 1; @@ -362,8 +384,8 @@ static __always_inline int rt_mutex_waiter_less(struct rt_mutex_waiter *left, return 0; } -static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, - struct rt_mutex_waiter *right) +static __always_inline int rt_waiter_node_equal(struct rt_waiter_node *left, + struct rt_waiter_node *right) { if (left->prio != right->prio) return 0; @@ -383,7 +405,7 @@ static __always_inline int rt_mutex_waiter_equal(struct rt_mutex_waiter *left, static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter, struct rt_mutex_waiter *top_waiter) { - if (rt_mutex_waiter_less(waiter, top_waiter)) + if (rt_waiter_node_less(&waiter->tree, &top_waiter->tree)) return true; #ifdef RT_MUTEX_BUILD_SPINLOCKS @@ -391,30 +413,30 @@ static inline bool rt_mutex_steal(struct rt_mutex_waiter *waiter, * Note that RT tasks are excluded from same priority (lateral) * steals to prevent the introduction of an unbounded latency. */ - if (rt_prio(waiter->prio) || dl_prio(waiter->prio)) + if (rt_prio(waiter->tree.prio) || dl_prio(waiter->tree.prio)) return false; - return rt_mutex_waiter_equal(waiter, top_waiter); + return rt_waiter_node_equal(&waiter->tree, &top_waiter->tree); #else return false; #endif } #define __node_2_waiter(node) \ - rb_entry((node), struct rt_mutex_waiter, tree_entry) + rb_entry((node), struct rt_mutex_waiter, tree.entry) static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_node *b) { struct rt_mutex_waiter *aw = __node_2_waiter(a); struct rt_mutex_waiter *bw = __node_2_waiter(b); - if (rt_mutex_waiter_less(aw, bw)) + if (rt_waiter_node_less(&aw->tree, &bw->tree)) return 1; if (!build_ww_mutex()) return 0; - if (rt_mutex_waiter_less(bw, aw)) + if (rt_waiter_node_less(&bw->tree, &aw->tree)) return 0; /* NOTE: relies on waiter->ww_ctx being set before insertion */ @@ -432,48 +454,58 @@ static __always_inline bool __waiter_less(struct rb_node *a, const struct rb_nod static __always_inline void rt_mutex_enqueue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) { - rb_add_cached(&waiter->tree_entry, &lock->waiters, __waiter_less); + lockdep_assert_held(&lock->wait_lock); + + rb_add_cached(&waiter->tree.entry, &lock->waiters, __waiter_less); } static __always_inline void rt_mutex_dequeue(struct rt_mutex_base *lock, struct rt_mutex_waiter *waiter) { - if (RB_EMPTY_NODE(&waiter->tree_entry)) + lockdep_assert_held(&lock->wait_lock); + + if (RB_EMPTY_NODE(&waiter->tree.entry)) return; - rb_erase_cached(&waiter->tree_entry, &lock->waiters); - RB_CLEAR_NODE(&waiter->tree_entry); + rb_erase_cached(&waiter->tree.entry, &lock->waiters); + RB_CLEAR_NODE(&waiter->tree.entry); } -#define __node_2_pi_waiter(node) \ - rb_entry((node), struct rt_mutex_waiter, pi_tree_entry) +#define __node_2_rt_node(node) \ + rb_entry((node), struct rt_waiter_node, entry) -static __always_inline bool -__pi_waiter_less(struct rb_node *a, const struct rb_node *b) +static __always_inline bool __pi_waiter_less(struct rb_node *a, const struct rb_node *b) { - return rt_mutex_waiter_less(__node_2_pi_waiter(a), __node_2_pi_waiter(b)); + return rt_waiter_node_less(__node_2_rt_node(a), __node_2_rt_node(b)); } static __always_inline void rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) { - rb_add_cached(&waiter->pi_tree_entry, &task->pi_waiters, __pi_waiter_less); + lockdep_assert_held(&task->pi_lock); + + rb_add_cached(&waiter->pi_tree.entry, &task->pi_waiters, __pi_waiter_less); } static __always_inline void rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter) { - if (RB_EMPTY_NODE(&waiter->pi_tree_entry)) + lockdep_assert_held(&task->pi_lock); + + if (RB_EMPTY_NODE(&waiter->pi_tree.entry)) return; - rb_erase_cached(&waiter->pi_tree_entry, &task->pi_waiters); - RB_CLEAR_NODE(&waiter->pi_tree_entry); + rb_erase_cached(&waiter->pi_tree.entry, &task->pi_waiters); + RB_CLEAR_NODE(&waiter->pi_tree.entry); } -static __always_inline void rt_mutex_adjust_prio(struct task_struct *p) +static __always_inline void rt_mutex_adjust_prio(struct rt_mutex_base *lock, + struct task_struct *p) { struct task_struct *pi_task = NULL; + lockdep_assert_held(&lock->wait_lock); + lockdep_assert(rt_mutex_owner(lock) == p); lockdep_assert_held(&p->pi_lock); if (task_has_pi_waiters(p)) @@ -562,9 +594,14 @@ static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_st * Chain walk basics and protection scope * * [R] refcount on task - * [P] task->pi_lock held + * [Pn] task->pi_lock held * [L] rtmutex->wait_lock held * + * Normal locking order: + * + * rtmutex->wait_lock + * task->pi_lock + * * Step Description Protected by * function arguments: * @task [R] @@ -579,27 +616,32 @@ static __always_inline struct rt_mutex_base *task_blocked_on_lock(struct task_st * again: * loop_sanity_check(); * retry: - * [1] lock(task->pi_lock); [R] acquire [P] - * [2] waiter = task->pi_blocked_on; [P] - * [3] check_exit_conditions_1(); [P] - * [4] lock = waiter->lock; [P] - * [5] if (!try_lock(lock->wait_lock)) { [P] try to acquire [L] - * unlock(task->pi_lock); release [P] + * [1] lock(task->pi_lock); [R] acquire [P1] + * [2] waiter = task->pi_blocked_on; [P1] + * [3] check_exit_conditions_1(); [P1] + * [4] lock = waiter->lock; [P1] + * [5] if (!try_lock(lock->wait_lock)) { [P1] try to acquire [L] + * unlock(task->pi_lock); release [P1] * goto retry; * } - * [6] check_exit_conditions_2(); [P] + [L] - * [7] requeue_lock_waiter(lock, waiter); [P] + [L] - * [8] unlock(task->pi_lock); release [P] + * [6] check_exit_conditions_2(); [P1] + [L] + * [7] requeue_lock_waiter(lock, waiter); [P1] + [L] + * [8] unlock(task->pi_lock); release [P1] * put_task_struct(task); release [R] * [9] check_exit_conditions_3(); [L] * [10] task = owner(lock); [L] * get_task_struct(task); [L] acquire [R] - * lock(task->pi_lock); [L] acquire [P] - * [11] requeue_pi_waiter(tsk, waiters(lock));[P] + [L] - * [12] check_exit_conditions_4(); [P] + [L] - * [13] unlock(task->pi_lock); release [P] + * lock(task->pi_lock); [L] acquire [P2] + * [11] requeue_pi_waiter(tsk, waiters(lock));[P2] + [L] + * [12] check_exit_conditions_4(); [P2] + [L] + * [13] unlock(task->pi_lock); release [P2] * unlock(lock->wait_lock); release [L] * goto again; + * + * Where P1 is the blocking task and P2 is the lock owner; going up one step + * the owner becomes the next blocked task etc.. + * +* */ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, enum rtmutex_chainwalk chwalk, @@ -747,7 +789,7 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, * enabled we continue, but stop the requeueing in the chain * walk. */ - if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { + if (rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) { if (!detect_deadlock) goto out_unlock_pi; else @@ -755,13 +797,18 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, } /* - * [4] Get the next lock + * [4] Get the next lock; per holding task->pi_lock we can't unblock + * and guarantee @lock's existence. */ lock = waiter->lock; /* * [5] We need to trylock here as we are holding task->pi_lock, * which is the reverse lock order versus the other rtmutex * operations. + * + * Per the above, holding task->pi_lock guarantees lock exists, so + * inverting this lock order is infeasible from a life-time + * perspective. */ if (!raw_spin_trylock(&lock->wait_lock)) { raw_spin_unlock_irq(&task->pi_lock); @@ -865,17 +912,18 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, * or * * DL CBS enforcement advancing the effective deadline. - * - * Even though pi_waiters also uses these fields, and that tree is only - * updated in [11], we can do this here, since we hold [L], which - * serializes all pi_waiters access and rb_erase() does not care about - * the values of the node being removed. */ waiter_update_prio(waiter, task); rt_mutex_enqueue(lock, waiter); - /* [8] Release the task */ + /* + * [8] Release the (blocking) task in preparation for + * taking the owner task in [10]. + * + * Since we hold lock->waiter_lock, task cannot unblock, even if we + * release task->pi_lock. + */ raw_spin_unlock(&task->pi_lock); put_task_struct(task); @@ -899,7 +947,12 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, return 0; } - /* [10] Grab the next task, i.e. the owner of @lock */ + /* + * [10] Grab the next task, i.e. the owner of @lock + * + * Per holding lock->wait_lock and checking for !owner above, there + * must be an owner and it cannot go away. + */ task = get_task_struct(rt_mutex_owner(lock)); raw_spin_lock(&task->pi_lock); @@ -912,8 +965,9 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, * and adjust the priority of the owner. */ rt_mutex_dequeue_pi(task, prerequeue_top_waiter); + waiter_clone_prio(waiter, task); rt_mutex_enqueue_pi(task, waiter); - rt_mutex_adjust_prio(task); + rt_mutex_adjust_prio(lock, task); } else if (prerequeue_top_waiter == waiter) { /* @@ -928,8 +982,9 @@ static int __sched rt_mutex_adjust_prio_chain(struct task_struct *task, */ rt_mutex_dequeue_pi(task, waiter); waiter = rt_mutex_top_waiter(lock); + waiter_clone_prio(waiter, task); rt_mutex_enqueue_pi(task, waiter); - rt_mutex_adjust_prio(task); + rt_mutex_adjust_prio(lock, task); } else { /* * Nothing changed. No need to do any priority @@ -1142,6 +1197,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, waiter->task = task; waiter->lock = lock; waiter_update_prio(waiter, task); + waiter_clone_prio(waiter, task); /* Get the top priority waiter on the lock */ if (rt_mutex_has_waiters(lock)) @@ -1175,7 +1231,7 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, rt_mutex_dequeue_pi(owner, top_waiter); rt_mutex_enqueue_pi(owner, waiter); - rt_mutex_adjust_prio(owner); + rt_mutex_adjust_prio(lock, owner); if (owner->pi_blocked_on) chain_walk = 1; } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { @@ -1222,6 +1278,8 @@ static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh, { struct rt_mutex_waiter *waiter; + lockdep_assert_held(&lock->wait_lock); + raw_spin_lock(¤t->pi_lock); waiter = rt_mutex_top_waiter(lock); @@ -1234,7 +1292,7 @@ static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh, * task unblocks. */ rt_mutex_dequeue_pi(current, waiter); - rt_mutex_adjust_prio(current); + rt_mutex_adjust_prio(lock, current); /* * As we are waking up the top waiter, and the waiter stays @@ -1471,7 +1529,7 @@ static void __sched remove_waiter(struct rt_mutex_base *lock, if (rt_mutex_has_waiters(lock)) rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock)); - rt_mutex_adjust_prio(owner); + rt_mutex_adjust_prio(lock, owner); /* Store the lock on which owner is blocked or NULL */ next_lock = task_blocked_on_lock(owner); diff --git a/kernel/locking/rtmutex_api.c b/kernel/locking/rtmutex_api.c index a461be2f873d..56d1938cb52a 100644 --- a/kernel/locking/rtmutex_api.c +++ b/kernel/locking/rtmutex_api.c @@ -437,7 +437,7 @@ void __sched rt_mutex_adjust_pi(struct task_struct *task) raw_spin_lock_irqsave(&task->pi_lock, flags); waiter = task->pi_blocked_on; - if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) { + if (!waiter || rt_waiter_node_equal(&waiter->tree, task_to_waiter_node(task))) { raw_spin_unlock_irqrestore(&task->pi_lock, flags); return; } diff --git a/kernel/locking/rtmutex_common.h b/kernel/locking/rtmutex_common.h index c47e8361bfb5..1162e07cdaea 100644 --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -17,27 +17,44 @@ #include #include + +/* + * This is a helper for the struct rt_mutex_waiter below. A waiter goes in two + * separate trees and they need their own copy of the sort keys because of + * different locking requirements. + * + * @entry: rbtree node to enqueue into the waiters tree + * @prio: Priority of the waiter + * @deadline: Deadline of the waiter if applicable + * + * See rt_waiter_node_less() and waiter_*_prio(). + */ +struct rt_waiter_node { + struct rb_node entry; + int prio; + u64 deadline; +}; + /* * This is the control structure for tasks blocked on a rt_mutex, * which is allocated on the kernel stack on of the blocked task. * - * @tree_entry: pi node to enqueue into the mutex waiters tree - * @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree + * @tree: node to enqueue into the mutex waiters tree + * @pi_tree: node to enqueue into the mutex owner waiters tree * @task: task reference to the blocked task * @lock: Pointer to the rt_mutex on which the waiter blocks * @wake_state: Wakeup state to use (TASK_NORMAL or TASK_RTLOCK_WAIT) - * @prio: Priority of the waiter - * @deadline: Deadline of the waiter if applicable * @ww_ctx: WW context pointer + * + * @tree is ordered by @lock->wait_lock + * @pi_tree is ordered by rt_mutex_owner(@lock)->pi_lock */ struct rt_mutex_waiter { - struct rb_node tree_entry; - struct rb_node pi_tree_entry; + struct rt_waiter_node tree; + struct rt_waiter_node pi_tree; struct task_struct *task; struct rt_mutex_base *lock; unsigned int wake_state; - int prio; - u64 deadline; struct ww_acquire_ctx *ww_ctx; }; @@ -105,7 +122,7 @@ static inline bool rt_mutex_waiter_is_top_waiter(struct rt_mutex_base *lock, { struct rb_node *leftmost = rb_first_cached(&lock->waiters); - return rb_entry(leftmost, struct rt_mutex_waiter, tree_entry) == waiter; + return rb_entry(leftmost, struct rt_mutex_waiter, tree.entry) == waiter; } static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base *lock) @@ -113,8 +130,10 @@ static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex_base * struct rb_node *leftmost = rb_first_cached(&lock->waiters); struct rt_mutex_waiter *w = NULL; + lockdep_assert_held(&lock->wait_lock); + if (leftmost) { - w = rb_entry(leftmost, struct rt_mutex_waiter, tree_entry); + w = rb_entry(leftmost, struct rt_mutex_waiter, tree.entry); BUG_ON(w->lock != lock); } return w; @@ -127,8 +146,10 @@ static inline int task_has_pi_waiters(struct task_struct *p) static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p) { + lockdep_assert_held(&p->pi_lock); + return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter, - pi_tree_entry); + pi_tree.entry); } #define RT_MUTEX_HAS_WAITERS 1UL @@ -190,8 +211,8 @@ static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) static inline void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) { debug_rt_mutex_init_waiter(waiter); - RB_CLEAR_NODE(&waiter->pi_tree_entry); - RB_CLEAR_NODE(&waiter->tree_entry); + RB_CLEAR_NODE(&waiter->pi_tree.entry); + RB_CLEAR_NODE(&waiter->tree.entry); waiter->wake_state = TASK_NORMAL; waiter->task = NULL; } diff --git a/kernel/locking/ww_mutex.h b/kernel/locking/ww_mutex.h index 56f139201f24..3ad2cc4823e5 100644 --- a/kernel/locking/ww_mutex.h +++ b/kernel/locking/ww_mutex.h @@ -96,25 +96,25 @@ __ww_waiter_first(struct rt_mutex *lock) struct rb_node *n = rb_first(&lock->rtmutex.waiters.rb_root); if (!n) return NULL; - return rb_entry(n, struct rt_mutex_waiter, tree_entry); + return rb_entry(n, struct rt_mutex_waiter, tree.entry); } static inline struct rt_mutex_waiter * __ww_waiter_next(struct rt_mutex *lock, struct rt_mutex_waiter *w) { - struct rb_node *n = rb_next(&w->tree_entry); + struct rb_node *n = rb_next(&w->tree.entry); if (!n) return NULL; - return rb_entry(n, struct rt_mutex_waiter, tree_entry); + return rb_entry(n, struct rt_mutex_waiter, tree.entry); } static inline struct rt_mutex_waiter * __ww_waiter_prev(struct rt_mutex *lock, struct rt_mutex_waiter *w) { - struct rb_node *n = rb_prev(&w->tree_entry); + struct rb_node *n = rb_prev(&w->tree.entry); if (!n) return NULL; - return rb_entry(n, struct rt_mutex_waiter, tree_entry); + return rb_entry(n, struct rt_mutex_waiter, tree.entry); } static inline struct rt_mutex_waiter * @@ -123,7 +123,7 @@ __ww_waiter_last(struct rt_mutex *lock) struct rb_node *n = rb_last(&lock->rtmutex.waiters.rb_root); if (!n) return NULL; - return rb_entry(n, struct rt_mutex_waiter, tree_entry); + return rb_entry(n, struct rt_mutex_waiter, tree.entry); } static inline void From 427d42838c16667ceec491c86bca60ba206225b9 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Tue, 13 Jun 2023 13:30:35 -0700 Subject: [PATCH 137/513] KVM: x86: Disallow KVM_SET_SREGS{2} if incoming CR0 is invalid [ Upstream commit 26a0652cb453c72f6aab0974bc4939e9b14f886b ] Reject KVM_SET_SREGS{2} with -EINVAL if the incoming CR0 is invalid, e.g. due to setting bits 63:32, illegal combinations, or to a value that isn't allowed in VMX (non-)root mode. The VMX checks in particular are "fun" as failure to disallow Real Mode for an L2 that is configured with unrestricted guest disabled, when KVM itself has unrestricted guest enabled, will result in KVM forcing VM86 mode to virtual Real Mode for L2, but then fail to unwind the related metadata when synthesizing a nested VM-Exit back to L1 (which has unrestricted guest enabled). Opportunistically fix a benign typo in the prototype for is_valid_cr4(). Cc: stable@vger.kernel.org Reported-by: syzbot+5feef0b9ee9c8e9e5689@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/000000000000f316b705fdf6e2b4@google.com Signed-off-by: Sean Christopherson Message-Id: <20230613203037.1968489-2-seanjc@google.com> Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin --- arch/x86/include/asm/kvm-x86-ops.h | 1 + arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/kvm/svm/svm.c | 6 ++++++ arch/x86/kvm/vmx/vmx.c | 28 ++++++++++++++++++------ arch/x86/kvm/x86.c | 34 +++++++++++++++++++----------- 5 files changed, 52 insertions(+), 20 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 23ea8a25cbbe..4bdcb91478a5 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -34,6 +34,7 @@ KVM_X86_OP(get_segment) KVM_X86_OP(get_cpl) KVM_X86_OP(set_segment) KVM_X86_OP_NULL(get_cs_db_l_bits) +KVM_X86_OP(is_valid_cr0) KVM_X86_OP(set_cr0) KVM_X86_OP(is_valid_cr4) KVM_X86_OP(set_cr4) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 9e800d4d323c..08cfc26ee7c6 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1333,8 +1333,9 @@ struct kvm_x86_ops { void (*set_segment)(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg); void (*get_cs_db_l_bits)(struct kvm_vcpu *vcpu, int *db, int *l); + bool (*is_valid_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); void (*set_cr0)(struct kvm_vcpu *vcpu, unsigned long cr0); - bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr0); + bool (*is_valid_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); void (*set_cr4)(struct kvm_vcpu *vcpu, unsigned long cr4); int (*set_efer)(struct kvm_vcpu *vcpu, u64 efer); void (*get_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 0611dac70c25..302a4669c5a1 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1734,6 +1734,11 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt) vmcb_mark_dirty(svm->vmcb, VMCB_DT); } +static bool svm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) +{ + return true; +} + void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_svm *svm = to_svm(vcpu); @@ -4596,6 +4601,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .set_segment = svm_set_segment, .get_cpl = svm_get_cpl, .get_cs_db_l_bits = kvm_get_cs_db_l_bits, + .is_valid_cr0 = svm_is_valid_cr0, .set_cr0 = svm_set_cr0, .is_valid_cr4 = svm_is_valid_cr4, .set_cr4 = svm_set_cr4, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 0841f9a34d1c..89744ee06101 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -2894,6 +2894,15 @@ static void enter_rmode(struct kvm_vcpu *vcpu) struct vcpu_vmx *vmx = to_vmx(vcpu); struct kvm_vmx *kvm_vmx = to_kvm_vmx(vcpu->kvm); + /* + * KVM should never use VM86 to virtualize Real Mode when L2 is active, + * as using VM86 is unnecessary if unrestricted guest is enabled, and + * if unrestricted guest is disabled, VM-Enter (from L1) with CR0.PG=0 + * should VM-Fail and KVM should reject userspace attempts to stuff + * CR0.PG=0 when L2 is active. + */ + WARN_ON_ONCE(is_guest_mode(vcpu)); + vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_TR], VCPU_SREG_TR); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_ES], VCPU_SREG_ES); vmx_get_segment(vcpu, &vmx->rmode.segs[VCPU_SREG_DS], VCPU_SREG_DS); @@ -3084,6 +3093,17 @@ void ept_save_pdptrs(struct kvm_vcpu *vcpu) #define CR3_EXITING_BITS (CPU_BASED_CR3_LOAD_EXITING | \ CPU_BASED_CR3_STORE_EXITING) +static bool vmx_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) +{ + if (is_guest_mode(vcpu)) + return nested_guest_cr0_valid(vcpu, cr0); + + if (to_vmx(vcpu)->nested.vmxon) + return nested_host_cr0_valid(vcpu, cr0); + + return true; +} + void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) { struct vcpu_vmx *vmx = to_vmx(vcpu); @@ -5027,18 +5047,11 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) val = (val & ~vmcs12->cr0_guest_host_mask) | (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); - if (!nested_guest_cr0_valid(vcpu, val)) - return 1; - if (kvm_set_cr0(vcpu, val)) return 1; vmcs_writel(CR0_READ_SHADOW, orig_val); return 0; } else { - if (to_vmx(vcpu)->nested.vmxon && - !nested_host_cr0_valid(vcpu, val)) - return 1; - return kvm_set_cr0(vcpu, val); } } @@ -7744,6 +7757,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .set_segment = vmx_set_segment, .get_cpl = vmx_get_cpl, .get_cs_db_l_bits = vmx_get_cs_db_l_bits, + .is_valid_cr0 = vmx_is_valid_cr0, .set_cr0 = vmx_set_cr0, .is_valid_cr4 = vmx_is_valid_cr4, .set_cr4 = vmx_set_cr4, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 7e1e3bc74562..285ba12be8ce 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -876,6 +876,22 @@ int load_pdptrs(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, unsigned long cr3) } EXPORT_SYMBOL_GPL(load_pdptrs); +static bool kvm_is_valid_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) +{ +#ifdef CONFIG_X86_64 + if (cr0 & 0xffffffff00000000UL) + return false; +#endif + + if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) + return false; + + if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) + return false; + + return static_call(kvm_x86_is_valid_cr0)(vcpu, cr0); +} + void kvm_post_set_cr0(struct kvm_vcpu *vcpu, unsigned long old_cr0, unsigned long cr0) { if ((cr0 ^ old_cr0) & X86_CR0_PG) { @@ -898,20 +914,13 @@ int kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) unsigned long old_cr0 = kvm_read_cr0(vcpu); unsigned long pdptr_bits = X86_CR0_CD | X86_CR0_NW | X86_CR0_PG; - cr0 |= X86_CR0_ET; - -#ifdef CONFIG_X86_64 - if (cr0 & 0xffffffff00000000UL) + if (!kvm_is_valid_cr0(vcpu, cr0)) return 1; -#endif - - cr0 &= ~CR0_RESERVED_BITS; - if ((cr0 & X86_CR0_NW) && !(cr0 & X86_CR0_CD)) - return 1; + cr0 |= X86_CR0_ET; - if ((cr0 & X86_CR0_PG) && !(cr0 & X86_CR0_PE)) - return 1; + /* Write to CR0 reserved bits are ignored, even on Intel. */ + cr0 &= ~CR0_RESERVED_BITS; #ifdef CONFIG_X86_64 if ((vcpu->arch.efer & EFER_LME) && !is_paging(vcpu) && @@ -10643,7 +10652,8 @@ static bool kvm_is_valid_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) return false; } - return kvm_is_valid_cr4(vcpu, sregs->cr4); + return kvm_is_valid_cr4(vcpu, sregs->cr4) && + kvm_is_valid_cr0(vcpu, sregs->cr0); } static int __set_sregs_common(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs, From 0061453d6ea1e72adbc167190cf76c6b65656f70 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Tue, 25 Jul 2023 03:20:49 -0400 Subject: [PATCH 138/513] virtio-net: fix race between set queues and probe commit 25266128fe16d5632d43ada34c847d7b8daba539 upstream. A race were found where set_channels could be called after registering but before virtnet_set_queues() in virtnet_probe(). Fixing this by moving the virtnet_set_queues() before netdevice registering. While at it, use _virtnet_set_queues() to avoid holding rtnl as the device is not even registered at that time. Cc: stable@vger.kernel.org Fixes: a220871be66f ("virtio-net: correctly enable multiqueue") Signed-off-by: Jason Wang Acked-by: Michael S. Tsirkin Reviewed-by: Xuan Zhuo Link: https://lore.kernel.org/r/20230725072049.617289-1-jasowang@redhat.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- drivers/net/virtio_net.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index 0351f86494f1..af335f8266c2 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3319,6 +3319,8 @@ static int virtnet_probe(struct virtio_device *vdev) } } + _virtnet_set_queues(vi, vi->curr_queue_pairs); + /* serialize netdev register + virtio_device_ready() with ndo_open() */ rtnl_lock(); @@ -3339,8 +3341,6 @@ static int virtnet_probe(struct virtio_device *vdev) goto free_unregister_netdev; } - virtnet_set_queues(vi, vi->curr_queue_pairs); - /* Assume link up if device can't report link status, otherwise get link status from config. */ netif_carrier_off(dev); From 585355a76e052578498552d3f6764642a0b3ed00 Mon Sep 17 00:00:00 2001 From: Stefan Haberland Date: Fri, 21 Jul 2023 21:36:44 +0200 Subject: [PATCH 139/513] s390/dasd: fix hanging device after quiesce/resume commit 05f1d8ed03f547054efbc4d29bb7991c958ede95 upstream. Quiesce and resume are functions that tell the DASD driver to stop/resume issuing I/Os to a specific DASD. On resume dasd_schedule_block_bh() is called to kick handling of IO requests again. This does unfortunately not cover internal requests which are used for path verification for example. This could lead to a hanging device when a path event or anything else that triggers internal requests occurs on a quiesced device. Fix by also calling dasd_schedule_device_bh() which triggers handling of internal requests on resume. Fixes: 8e09f21574ea ("[S390] dasd: add hyper PAV support to DASD device driver, part 1") Cc: stable@vger.kernel.org Signed-off-by: Stefan Haberland Reviewed-by: Jan Hoeppner Link: https://lore.kernel.org/r/20230721193647.3889634-2-sth@linux.ibm.com Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- drivers/s390/block/dasd_ioctl.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 7b76491f4fe9..7dad0428d73c 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c @@ -131,6 +131,7 @@ static int dasd_ioctl_resume(struct dasd_block *block) spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags); dasd_schedule_block_bh(block); + dasd_schedule_device_bh(base); return 0; } From e443b3a508b0bb228e4f5291442dbffbb18ce786 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Sun, 23 Jul 2023 00:27:22 +0100 Subject: [PATCH 140/513] ASoC: wm8904: Fill the cache for WM8904_ADC_TEST_0 register commit f061e2be8689057cb4ec0dbffa9f03e1a23cdcb2 upstream. The WM8904_ADC_TEST_0 register is modified as part of updating the OSR controls but does not have a cache default, leading to errors when we try to modify these controls in cache only mode with no prior read: wm8904 3-001a: ASoC: error at snd_soc_component_update_bits on wm8904.3-001a for register: [0x000000c6] -16 Add a read of the register to probe() to fill the cache and avoid both the error messages and the misconfiguration of the chip which will result. Acked-by: Charles Keepax Signed-off-by: Mark Brown Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230723-asoc-fix-wm8904-adc-test-read-v1-1-2cdf2edd83fd@kernel.org Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/codecs/wm8904.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c index 6759ce7e09ff..7aed412b19c7 100644 --- a/sound/soc/codecs/wm8904.c +++ b/sound/soc/codecs/wm8904.c @@ -2306,6 +2306,9 @@ static int wm8904_i2c_probe(struct i2c_client *i2c, regmap_update_bits(wm8904->regmap, WM8904_BIAS_CONTROL_0, WM8904_POBCTRL, 0); + /* Fill the cache for the ADC test register */ + regmap_read(wm8904->regmap, WM8904_ADC_TEST_0, &val); + /* Can leave the device powered off until we need it */ regcache_cache_only(wm8904->regmap, true); regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies), wm8904->supplies); From 7c9b8cca49176232f155ddbd78bb8066defec551 Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Thu, 20 Jul 2023 11:33:55 +0800 Subject: [PATCH 141/513] ceph: never send metrics if disable_send_metrics is set commit 50164507f6b7b7ed85d8c3ac0266849fbd908db7 upstream. Even the 'disable_send_metrics' is true so when the session is being opened it will always trigger to send the metric for the first time. Cc: stable@vger.kernel.org Signed-off-by: Xiubo Li Reviewed-by: Venky Shankar Reviewed-by: Jeff Layton Signed-off-by: Ilya Dryomov Signed-off-by: Greg Kroah-Hartman --- fs/ceph/metric.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c index 04d5df29bbbf..d21ff4354332 100644 --- a/fs/ceph/metric.c +++ b/fs/ceph/metric.c @@ -202,7 +202,7 @@ static void metric_delayed_work(struct work_struct *work) struct ceph_mds_client *mdsc = container_of(m, struct ceph_mds_client, metric); - if (mdsc->stopping) + if (mdsc->stopping || disable_send_metrics) return; if (!m->session || !check_session_state(m->session)) { From 098d0b9ba03c96ffbba1f236038bbea4933372f6 Mon Sep 17 00:00:00 2001 From: Joe Thornber Date: Tue, 25 Jul 2023 11:44:41 -0400 Subject: [PATCH 142/513] dm cache policy smq: ensure IO doesn't prevent cleaner policy progress commit 1e4ab7b4c881cf26c1c72b3f56519e03475486fb upstream. When using the cleaner policy to decommission the cache, there is never any writeback started from the cache as it is constantly delayed due to normal I/O keeping the device busy. Meaning @idle=false was always being passed to clean_target_met() Fix this by adding a specific 'cleaner' flag that is set when the cleaner policy is configured. This flag serves to always allow the cleaner's writeback work to be queued until the cache is decommissioned (even if the cache isn't idle). Reported-by: David Jeffery Fixes: b29d4986d0da ("dm cache: significant rework to leverage dm-bio-prison-v2") Cc: stable@vger.kernel.org Signed-off-by: Joe Thornber Signed-off-by: Mike Snitzer Signed-off-by: Greg Kroah-Hartman --- drivers/md/dm-cache-policy-smq.c | 28 ++++++++++++++++++---------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c index b61aac00ff40..859073193f5b 100644 --- a/drivers/md/dm-cache-policy-smq.c +++ b/drivers/md/dm-cache-policy-smq.c @@ -854,7 +854,13 @@ struct smq_policy { struct background_tracker *bg_work; - bool migrations_allowed; + bool migrations_allowed:1; + + /* + * If this is set the policy will try and clean the whole cache + * even if the device is not idle. + */ + bool cleaner:1; }; /*----------------------------------------------------------------*/ @@ -1133,7 +1139,7 @@ static bool clean_target_met(struct smq_policy *mq, bool idle) * Cache entries may not be populated. So we cannot rely on the * size of the clean queue. */ - if (idle) { + if (idle || mq->cleaner) { /* * We'd like to clean everything. */ @@ -1716,11 +1722,9 @@ static void calc_hotspot_params(sector_t origin_size, *hotspot_block_size /= 2u; } -static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size, - sector_t origin_size, - sector_t cache_block_size, - bool mimic_mq, - bool migrations_allowed) +static struct dm_cache_policy * +__smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size, + bool mimic_mq, bool migrations_allowed, bool cleaner) { unsigned i; unsigned nr_sentinels_per_queue = 2u * NR_CACHE_LEVELS; @@ -1807,6 +1811,7 @@ static struct dm_cache_policy *__smq_create(dm_cblock_t cache_size, goto bad_btracker; mq->migrations_allowed = migrations_allowed; + mq->cleaner = cleaner; return &mq->policy; @@ -1830,21 +1835,24 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size) { - return __smq_create(cache_size, origin_size, cache_block_size, false, true); + return __smq_create(cache_size, origin_size, cache_block_size, + false, true, false); } static struct dm_cache_policy *mq_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size) { - return __smq_create(cache_size, origin_size, cache_block_size, true, true); + return __smq_create(cache_size, origin_size, cache_block_size, + true, true, false); } static struct dm_cache_policy *cleaner_create(dm_cblock_t cache_size, sector_t origin_size, sector_t cache_block_size) { - return __smq_create(cache_size, origin_size, cache_block_size, false, false); + return __smq_create(cache_size, origin_size, cache_block_size, + false, false, true); } /*----------------------------------------------------------------*/ From b223e9ffb64d74822d71d2db23052a713bea4bff Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Fri, 30 Jun 2023 13:52:13 +0200 Subject: [PATCH 143/513] rbd: make get_lock_owner_info() return a single locker or NULL commit f38cb9d9c2045dad16eead4a2e1aedfddd94603b upstream. Make the "num_lockers can be only 0 or 1" assumption explicit and simplify the API by getting rid of output parameters in preparation for calling get_lock_owner_info() twice before blocklisting. Signed-off-by: Ilya Dryomov Reviewed-by: Dongsheng Yang Signed-off-by: Greg Kroah-Hartman --- drivers/block/rbd.c | 84 +++++++++++++++++++++++++++------------------ 1 file changed, 51 insertions(+), 33 deletions(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index a4188825bd19..189e2f74b914 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3851,10 +3851,17 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result) list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list); } -static int get_lock_owner_info(struct rbd_device *rbd_dev, - struct ceph_locker **lockers, u32 *num_lockers) +static void free_locker(struct ceph_locker *locker) +{ + if (locker) + ceph_free_lockers(locker, 1); +} + +static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev) { struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc; + struct ceph_locker *lockers; + u32 num_lockers; u8 lock_type; char *lock_tag; int ret; @@ -3863,39 +3870,45 @@ static int get_lock_owner_info(struct rbd_device *rbd_dev, ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, RBD_LOCK_NAME, - &lock_type, &lock_tag, lockers, num_lockers); - if (ret) - return ret; + &lock_type, &lock_tag, &lockers, &num_lockers); + if (ret) { + rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret); + return ERR_PTR(ret); + } - if (*num_lockers == 0) { + if (num_lockers == 0) { dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev); + lockers = NULL; goto out; } if (strcmp(lock_tag, RBD_LOCK_TAG)) { rbd_warn(rbd_dev, "locked by external mechanism, tag %s", lock_tag); - ret = -EBUSY; - goto out; + goto err_busy; } if (lock_type == CEPH_CLS_LOCK_SHARED) { rbd_warn(rbd_dev, "shared lock type detected"); - ret = -EBUSY; - goto out; + goto err_busy; } - if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX, + WARN_ON(num_lockers != 1); + if (strncmp(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX, strlen(RBD_LOCK_COOKIE_PREFIX))) { rbd_warn(rbd_dev, "locked by external mechanism, cookie %s", - (*lockers)[0].id.cookie); - ret = -EBUSY; - goto out; + lockers[0].id.cookie); + goto err_busy; } out: kfree(lock_tag); - return ret; + return lockers; + +err_busy: + kfree(lock_tag); + ceph_free_lockers(lockers, num_lockers); + return ERR_PTR(-EBUSY); } static int find_watcher(struct rbd_device *rbd_dev, @@ -3949,51 +3962,56 @@ static int find_watcher(struct rbd_device *rbd_dev, static int rbd_try_lock(struct rbd_device *rbd_dev) { struct ceph_client *client = rbd_dev->rbd_client->client; - struct ceph_locker *lockers; - u32 num_lockers; + struct ceph_locker *locker; int ret; for (;;) { + locker = NULL; + ret = rbd_lock(rbd_dev); if (ret != -EBUSY) - return ret; + goto out; /* determine if the current lock holder is still alive */ - ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers); - if (ret) - return ret; - - if (num_lockers == 0) + locker = get_lock_owner_info(rbd_dev); + if (IS_ERR(locker)) { + ret = PTR_ERR(locker); + locker = NULL; + goto out; + } + if (!locker) goto again; - ret = find_watcher(rbd_dev, lockers); + ret = find_watcher(rbd_dev, locker); if (ret) goto out; /* request lock or error */ rbd_warn(rbd_dev, "breaking header lock owned by %s%llu", - ENTITY_NAME(lockers[0].id.name)); + ENTITY_NAME(locker->id.name)); ret = ceph_monc_blocklist_add(&client->monc, - &lockers[0].info.addr); + &locker->info.addr); if (ret) { - rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d", - ENTITY_NAME(lockers[0].id.name), ret); + rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d", + ENTITY_NAME(locker->id.name), ret); goto out; } ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, RBD_LOCK_NAME, - lockers[0].id.cookie, - &lockers[0].id.name); - if (ret && ret != -ENOENT) + locker->id.cookie, &locker->id.name); + if (ret && ret != -ENOENT) { + rbd_warn(rbd_dev, "failed to break header lock: %d", + ret); goto out; + } again: - ceph_free_lockers(lockers, num_lockers); + free_locker(locker); } out: - ceph_free_lockers(lockers, num_lockers); + free_locker(locker); return ret; } From bb25c5c0e4ae164d631396bc4e8d9afe2121a4eb Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Sat, 8 Jul 2023 16:16:59 +0200 Subject: [PATCH 144/513] rbd: harden get_lock_owner_info() a bit commit 8ff2c64c9765446c3cef804fb99da04916603e27 upstream. - we want the exclusive lock type, so test for it directly - use sscanf() to actually parse the lock cookie and avoid admitting invalid handles - bail if locker has a blank address Signed-off-by: Ilya Dryomov Reviewed-by: Dongsheng Yang Signed-off-by: Greg Kroah-Hartman --- drivers/block/rbd.c | 21 +++++++++++++++------ net/ceph/messenger.c | 1 + 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 189e2f74b914..1d2ba739fb26 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3864,10 +3864,9 @@ static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev) u32 num_lockers; u8 lock_type; char *lock_tag; + u64 handle; int ret; - dout("%s rbd_dev %p\n", __func__, rbd_dev); - ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, RBD_LOCK_NAME, &lock_type, &lock_tag, &lockers, &num_lockers); @@ -3888,18 +3887,28 @@ static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev) goto err_busy; } - if (lock_type == CEPH_CLS_LOCK_SHARED) { - rbd_warn(rbd_dev, "shared lock type detected"); + if (lock_type != CEPH_CLS_LOCK_EXCLUSIVE) { + rbd_warn(rbd_dev, "incompatible lock type detected"); goto err_busy; } WARN_ON(num_lockers != 1); - if (strncmp(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX, - strlen(RBD_LOCK_COOKIE_PREFIX))) { + ret = sscanf(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", + &handle); + if (ret != 1) { rbd_warn(rbd_dev, "locked by external mechanism, cookie %s", lockers[0].id.cookie); goto err_busy; } + if (ceph_addr_is_blank(&lockers[0].info.addr)) { + rbd_warn(rbd_dev, "locker has a blank address"); + goto err_busy; + } + + dout("%s rbd_dev %p got locker %s%llu@%pISpc/%u handle %llu\n", + __func__, rbd_dev, ENTITY_NAME(lockers[0].id.name), + &lockers[0].info.addr.in_addr, + le32_to_cpu(lockers[0].info.addr.nonce), handle); out: kfree(lock_tag); diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c index 57d043b382ed..9bf085ddbe51 100644 --- a/net/ceph/messenger.c +++ b/net/ceph/messenger.c @@ -1144,6 +1144,7 @@ bool ceph_addr_is_blank(const struct ceph_entity_addr *addr) return true; } } +EXPORT_SYMBOL(ceph_addr_is_blank); int ceph_addr_port(const struct ceph_entity_addr *addr) { From f3b6e63004f6cd35456a97589e79fbc5826deaf0 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Sat, 22 Jul 2023 20:28:08 +0200 Subject: [PATCH 145/513] rbd: retrieve and check lock owner twice before blocklisting commit 588159009d5b7a09c3e5904cffddbe4a4e170301 upstream. An attempt to acquire exclusive lock can race with the current lock owner closing the image: 1. lock is held by client123, rbd_lock() returns -EBUSY 2. get_lock_owner_info() returns client123 instance details 3. client123 closes the image, lock is released 4. find_watcher() returns 0 as there is no matching watcher anymore 5. client123 instance gets erroneously blocklisted Particularly impacted is mirror snapshot scheduler in snapshot-based mirroring since it happens to open and close images a lot (images are opened only for as long as it takes to take the next mirror snapshot, the same client instance is used for all images). To reduce the potential for erroneous blocklisting, retrieve the lock owner again after find_watcher() returns 0. If it's still there, make sure it matches the previously detected lock owner. Cc: stable@vger.kernel.org # f38cb9d9c204: rbd: make get_lock_owner_info() return a single locker or NULL Cc: stable@vger.kernel.org # 8ff2c64c9765: rbd: harden get_lock_owner_info() a bit Cc: stable@vger.kernel.org Signed-off-by: Ilya Dryomov Reviewed-by: Dongsheng Yang Signed-off-by: Greg Kroah-Hartman --- drivers/block/rbd.c | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index 1d2ba739fb26..c269c552a43a 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3851,6 +3851,15 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result) list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list); } +static bool locker_equal(const struct ceph_locker *lhs, + const struct ceph_locker *rhs) +{ + return lhs->id.name.type == rhs->id.name.type && + lhs->id.name.num == rhs->id.name.num && + !strcmp(lhs->id.cookie, rhs->id.cookie) && + ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr); +} + static void free_locker(struct ceph_locker *locker) { if (locker) @@ -3971,11 +3980,11 @@ static int find_watcher(struct rbd_device *rbd_dev, static int rbd_try_lock(struct rbd_device *rbd_dev) { struct ceph_client *client = rbd_dev->rbd_client->client; - struct ceph_locker *locker; + struct ceph_locker *locker, *refreshed_locker; int ret; for (;;) { - locker = NULL; + locker = refreshed_locker = NULL; ret = rbd_lock(rbd_dev); if (ret != -EBUSY) @@ -3995,6 +4004,16 @@ static int rbd_try_lock(struct rbd_device *rbd_dev) if (ret) goto out; /* request lock or error */ + refreshed_locker = get_lock_owner_info(rbd_dev); + if (IS_ERR(refreshed_locker)) { + ret = PTR_ERR(refreshed_locker); + refreshed_locker = NULL; + goto out; + } + if (!refreshed_locker || + !locker_equal(locker, refreshed_locker)) + goto again; + rbd_warn(rbd_dev, "breaking header lock owned by %s%llu", ENTITY_NAME(locker->id.name)); @@ -4016,10 +4035,12 @@ static int rbd_try_lock(struct rbd_device *rbd_dev) } again: + free_locker(refreshed_locker); free_locker(locker); } out: + free_locker(refreshed_locker); free_locker(locker); return ret; } From e8e93e2f017e5467912d0746aee58a1f3cf501eb Mon Sep 17 00:00:00 2001 From: "Steven Rostedt (Google)" Date: Tue, 31 Jan 2023 09:52:37 -0500 Subject: [PATCH 146/513] tracing: Fix trace_event_raw_event_synth() if else statement commit 9971c3f944489ff7aacb9d25e0cde841a5f6018a upstream. The test to check if the field is a stack is to be done if it is not a string. But the code had: } if (event->fields[i]->is_stack) { and not } else if (event->fields[i]->is_stack) { which would cause it to always be tested. Worse yet, this also included an "else" statement that was only to be called if the field was not a string and a stack, but this code allows it to be called if it was a string (and not a stack). Also fixed some whitespace issues. Link: https://lore.kernel.org/all/202301302110.mEtNwkBD-lkp@intel.com/ Link: https://lore.kernel.org/linux-trace-kernel/20230131095237.63e3ca8d@gandalf.local.home Cc: Tom Zanussi Fixes: 00cf3d672a9d ("tracing: Allow synthetic events to pass around stacktraces") Reported-by: kernel test robot Signed-off-by: Steven Rostedt (Google) Acked-by: Masami Hiramatsu (Google) Signed-off-by: Greg Kroah-Hartman --- kernel/trace/trace_events_synth.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/trace/trace_events_synth.c b/kernel/trace/trace_events_synth.c index ce8fad956b38..1e02bb431dcb 100644 --- a/kernel/trace/trace_events_synth.c +++ b/kernel/trace/trace_events_synth.c @@ -557,8 +557,8 @@ static notrace void trace_event_raw_event_synth(void *__data, event->fields[i]->is_dynamic, data_size, &n_u64); data_size += len; /* only dynamic string increments */ - } if (event->fields[i]->is_stack) { - long *stack = (long *)(long)var_ref_vals[val_idx]; + } else if (event->fields[i]->is_stack) { + long *stack = (long *)(long)var_ref_vals[val_idx]; len = trace_stack(entry, event, stack, data_size, &n_u64); From cd031669682ec0c4653cfd223644e1ec98a80179 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 28 Dec 2022 22:21:49 +0100 Subject: [PATCH 147/513] ACPI: processor: perflib: Use the "no limit" frequency QoS commit c02d5feb6e2f60affc6ba8606d8d614c071e2ba6 upstream. When _PPC returns 0, it means that the CPU frequency is not limited by the platform firmware, so make acpi_processor_get_platform_limit() update the frequency QoS request used by it to "no limit" in that case. This addresses a problem with limiting CPU frequency artificially on some systems after CPU offline/online to the frequency that corresponds to the first entry in the _PSS return package. Reported-by: Pratyush Yadav Signed-off-by: Rafael J. Wysocki Reviewed-by: Pratyush Yadav Tested-by: Pratyush Yadav Tested-by: Hagar Hemdan Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/processor_perflib.c | 20 ++++++++++++++++---- 1 file changed, 16 insertions(+), 4 deletions(-) diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index 757a98f6d7a2..c1a815a47116 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -53,6 +53,8 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) { acpi_status status = 0; unsigned long long ppc = 0; + s32 qos_value; + int index; int ret; if (!pr) @@ -72,17 +74,27 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) } } + index = ppc; + pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, - (int)ppc, ppc ? "" : "not"); + index, index ? "is" : "is not"); - pr->performance_platform_limit = (int)ppc; + pr->performance_platform_limit = index; if (ppc >= pr->performance->state_count || unlikely(!freq_qos_request_active(&pr->perflib_req))) return 0; - ret = freq_qos_update_request(&pr->perflib_req, - pr->performance->states[ppc].core_frequency * 1000); + /* + * If _PPC returns 0, it means that all of the available states can be + * used ("no limit"). + */ + if (index == 0) + qos_value = FREQ_QOS_MAX_DEFAULT_VALUE; + else + qos_value = pr->performance->states[index].core_frequency * 1000; + + ret = freq_qos_update_request(&pr->perflib_req, qos_value); if (ret < 0) { pr_warn("Failed to update perflib freq constraint: CPU%d (%d)\n", pr->id, ret); From 73b4cbed9176a4009bb3cb9b04e46f8163f6e452 Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 28 Dec 2022 22:24:10 +0100 Subject: [PATCH 148/513] ACPI: processor: perflib: Avoid updating frequency QoS unnecessarily commit 99387b016022c29234c4ebf9abd34358c6e56532 upstream. Modify acpi_processor_get_platform_limit() to avoid updating its frequency QoS request when the _PPC return value has not changed by comparing that value to the previous _PPC return value stored in the performance_platform_limit field of the struct acpi_processor corresponding to the given CPU. While at it, do the _PPC return value check against the state count earlier, to avoid setting performance_platform_limit to an invalid value, and make acpi_processor_ppc_init() use FREQ_QOS_MAX_DEFAULT_VALUE as the "no limit" frequency QoS for consistency. Signed-off-by: Rafael J. Wysocki Tested-by: Hagar Hemdan Signed-off-by: Greg Kroah-Hartman --- drivers/acpi/processor_perflib.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index c1a815a47116..1696700fd2fb 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c @@ -76,13 +76,16 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) index = ppc; + if (pr->performance_platform_limit == index || + ppc >= pr->performance->state_count) + return 0; + pr_debug("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, index, index ? "is" : "is not"); pr->performance_platform_limit = index; - if (ppc >= pr->performance->state_count || - unlikely(!freq_qos_request_active(&pr->perflib_req))) + if (unlikely(!freq_qos_request_active(&pr->perflib_req))) return 0; /* @@ -177,9 +180,16 @@ void acpi_processor_ppc_init(struct cpufreq_policy *policy) if (!pr) continue; + /* + * Reset performance_platform_limit in case there is a stale + * value in it, so as to make it match the "no limit" QoS value + * below. + */ + pr->performance_platform_limit = 0; + ret = freq_qos_add_request(&policy->constraints, - &pr->perflib_req, - FREQ_QOS_MAX, INT_MAX); + &pr->perflib_req, FREQ_QOS_MAX, + FREQ_QOS_MAX_DEFAULT_VALUE); if (ret < 0) pr_err("Failed to add freq constraint for CPU%d (%d)\n", cpu, ret); From 43bbe1a091e0d125789bf5ba15f297c92cb367fa Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 28 Dec 2022 22:26:04 +0100 Subject: [PATCH 149/513] cpufreq: intel_pstate: Drop ACPI _PSS states table patching commit e8a0e30b742f76ebd0f3b196973df4bf65d8fbbb upstream. After making acpi_processor_get_platform_limit() use the "no limit" value for its frequency QoS request when _PPC returns 0, it is not necessary to replace the frequency corresponding to the first _PSS return package entry with the maximum turbo frequency of the given CPU in intel_pstate_init_acpi_perf_limits() any more, so drop the code doing that along with the comment explaining it. Signed-off-by: Rafael J. Wysocki Tested-by: Hagar Hemdan Signed-off-by: Greg Kroah-Hartman --- drivers/cpufreq/intel_pstate.c | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index 2f5063db2f1f..736cb2cfcbb0 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -448,20 +448,6 @@ static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy) (u32) cpu->acpi_perf_data.states[i].control); } - /* - * The _PSS table doesn't contain whole turbo frequency range. - * This just contains +1 MHZ above the max non turbo frequency, - * with control value corresponding to max turbo ratio. But - * when cpufreq set policy is called, it will call with this - * max frequency, which will cause a reduced performance as - * this driver uses real max turbo frequency as the max - * frequency. So correct this frequency in _PSS table to - * correct max turbo frequency based on the turbo state. - * Also need to convert to MHz as _PSS freq is in MHz. - */ - if (!global.turbo_disabled) - cpu->acpi_perf_data.states[0].core_frequency = - policy->cpuinfo.max_freq / 1000; cpu->valid_pss_table = true; pr_debug("_PPC limits will be enforced\n"); From 374edda0db7069816508c7c1ffaede45b18c37e7 Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Tue, 4 Jul 2023 22:44:36 +0200 Subject: [PATCH 150/513] selftests: mptcp: sockopt: use 'iptables-legacy' if available commit a5a5990c099dd354e05e89ee77cd2dbf6655d4a1 upstream. IPTables commands using 'iptables-nft' fail on old kernels, at least on v5.15 because it doesn't see the default IPTables chains: $ iptables -L iptables/1.8.2 Failed to initialize nft: Protocol not supported As a first step before switching to NFTables, we can use iptables-legacy if available. Link: https://github.com/multipath-tcp/mptcp_net-next/issues/368 Fixes: dc65fe82fb07 ("selftests: mptcp: add packet mark test case") Cc: stable@vger.kernel.org Acked-by: Paolo Abeni Signed-off-by: Matthieu Baerts Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- .../selftests/net/mptcp/mptcp_sockopt.sh | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh index d5c79e4a8f1e..3432d11e0a03 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_sockopt.sh @@ -13,13 +13,15 @@ timeout_poll=30 timeout_test=$((timeout_poll * 2 + 1)) mptcp_connect="" do_all_tests=1 +iptables="iptables" +ip6tables="ip6tables" add_mark_rules() { local ns=$1 local m=$2 - for t in iptables ip6tables; do + for t in ${iptables} ${ip6tables}; do # just to debug: check we have multiple subflows connection requests ip netns exec $ns $t -A OUTPUT -p tcp --syn -m mark --mark $m -j ACCEPT @@ -90,14 +92,14 @@ if [ $? -ne 0 ];then exit $ksft_skip fi -iptables -V > /dev/null 2>&1 -if [ $? -ne 0 ];then +# Use the legacy version if available to support old kernel versions +if iptables-legacy -V &> /dev/null; then + iptables="iptables-legacy" + ip6tables="ip6tables-legacy" +elif ! iptables -V &> /dev/null; then echo "SKIP: Could not run all tests without iptables tool" exit $ksft_skip -fi - -ip6tables -V > /dev/null 2>&1 -if [ $? -ne 0 ];then +elif ! ip6tables -V &> /dev/null; then echo "SKIP: Could not run all tests without ip6tables tool" exit $ksft_skip fi @@ -107,10 +109,10 @@ check_mark() local ns=$1 local af=$2 - tables=iptables + tables=${iptables} if [ $af -eq 6 ];then - tables=ip6tables + tables=${ip6tables} fi counters=$(ip netns exec $ns $tables -v -L OUTPUT | grep DROP) From 3359fdf49de48aa7ef898d0e52132d9ec561b741 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Thu, 20 Jul 2023 13:16:53 -0600 Subject: [PATCH 151/513] io_uring: treat -EAGAIN for REQ_F_NOWAIT as final for io-wq commit a9be202269580ca611c6cebac90eaf1795497800 upstream. io-wq assumes that an issue is blocking, but it may not be if the request type has asked for a non-blocking attempt. If we get -EAGAIN for that case, then we need to treat it as a final result and not retry or arm poll for it. Cc: stable@vger.kernel.org # 5.10+ Link: https://github.com/axboe/liburing/issues/897 Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- io_uring/io_uring.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index f65c6811ede8..774b1ae8adf7 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -7066,6 +7066,14 @@ static void io_wq_submit_work(struct io_wq_work *work) */ if (ret != -EAGAIN || !(req->ctx->flags & IORING_SETUP_IOPOLL)) break; + + /* + * If REQ_F_NOWAIT is set, then don't wait or retry with + * poll. -EAGAIN is final for that case. + */ + if (req->flags & REQ_F_NOWAIT) + break; + cond_resched(); } while (1); } From 66cf5f394abe33226834f5d3bcdff020abd6b586 Mon Sep 17 00:00:00 2001 From: Thomas Petazzoni Date: Thu, 13 Jul 2023 13:21:12 +0200 Subject: [PATCH 152/513] ASoC: cs42l51: fix driver to properly autoload with automatic module loading commit e51df4f81b02bcdd828a04de7c1eb6a92988b61e upstream. In commit 2cb1e0259f50 ("ASoC: cs42l51: re-hook of_match_table pointer"), 9 years ago, some random guy fixed the cs42l51 after it was split into a core part and an I2C part to properly match based on a Device Tree compatible string. However, the fix in this commit is wrong: the MODULE_DEVICE_TABLE(of, ....) is in the core part of the driver, not the I2C part. Therefore, automatic module loading based on module.alias, based on matching with the DT compatible string, loads the core part of the driver, but not the I2C part. And threfore, the i2c_driver is not registered, and the codec is not known to the system, nor matched with a DT node with the corresponding compatible string. In order to fix that, we move the MODULE_DEVICE_TABLE(of, ...) into the I2C part of the driver. The cs42l51_of_match[] array is also moved as well, as it is not possible to have this definition in one file, and the MODULE_DEVICE_TABLE(of, ...) invocation in another file, due to how MODULE_DEVICE_TABLE works. Thanks to this commit, the I2C part of the driver now properly autoloads, and thanks to its dependency on the core part, the core part gets autoloaded as well, resulting in a functional sound card without having to manually load kernel modules. Fixes: 2cb1e0259f50 ("ASoC: cs42l51: re-hook of_match_table pointer") Cc: stable@vger.kernel.org Signed-off-by: Thomas Petazzoni Link: https://lore.kernel.org/r/20230713112112.778576-1-thomas.petazzoni@bootlin.com Signed-off-by: Mark Brown Signed-off-by: Greg Kroah-Hartman --- sound/soc/codecs/cs42l51-i2c.c | 6 ++++++ sound/soc/codecs/cs42l51.c | 7 ------- sound/soc/codecs/cs42l51.h | 1 - 3 files changed, 6 insertions(+), 8 deletions(-) diff --git a/sound/soc/codecs/cs42l51-i2c.c b/sound/soc/codecs/cs42l51-i2c.c index 70260e0a8f09..3ff73367897d 100644 --- a/sound/soc/codecs/cs42l51-i2c.c +++ b/sound/soc/codecs/cs42l51-i2c.c @@ -19,6 +19,12 @@ static struct i2c_device_id cs42l51_i2c_id[] = { }; MODULE_DEVICE_TABLE(i2c, cs42l51_i2c_id); +const struct of_device_id cs42l51_of_match[] = { + { .compatible = "cirrus,cs42l51", }, + { } +}; +MODULE_DEVICE_TABLE(of, cs42l51_of_match); + static int cs42l51_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c index c61b17dc2af8..4b026e1c3fe3 100644 --- a/sound/soc/codecs/cs42l51.c +++ b/sound/soc/codecs/cs42l51.c @@ -825,13 +825,6 @@ int __maybe_unused cs42l51_resume(struct device *dev) } EXPORT_SYMBOL_GPL(cs42l51_resume); -const struct of_device_id cs42l51_of_match[] = { - { .compatible = "cirrus,cs42l51", }, - { } -}; -MODULE_DEVICE_TABLE(of, cs42l51_of_match); -EXPORT_SYMBOL_GPL(cs42l51_of_match); - MODULE_AUTHOR("Arnaud Patard "); MODULE_DESCRIPTION("Cirrus Logic CS42L51 ALSA SoC Codec Driver"); MODULE_LICENSE("GPL"); diff --git a/sound/soc/codecs/cs42l51.h b/sound/soc/codecs/cs42l51.h index 9d06cf7f8876..4f13c38484b7 100644 --- a/sound/soc/codecs/cs42l51.h +++ b/sound/soc/codecs/cs42l51.h @@ -16,7 +16,6 @@ int cs42l51_probe(struct device *dev, struct regmap *regmap); int cs42l51_remove(struct device *dev); int __maybe_unused cs42l51_suspend(struct device *dev); int __maybe_unused cs42l51_resume(struct device *dev); -extern const struct of_device_id cs42l51_of_match[]; #define CS42L51_CHIP_ID 0x1B #define CS42L51_CHIP_REV_A 0x00 From 78001ffa9bc48c214abff011b4d9a1e5800bf3b1 Mon Sep 17 00:00:00 2001 From: Matthieu Baerts Date: Tue, 25 Jul 2023 11:34:55 -0700 Subject: [PATCH 153/513] selftests: mptcp: join: only check for ip6tables if needed commit 016e7ba47f33064fbef8c4307a2485d2669dfd03 upstream. If 'iptables-legacy' is available, 'ip6tables-legacy' command will be used instead of 'ip6tables'. So no need to look if 'ip6tables' is available in this case. Cc: stable@vger.kernel.org Fixes: 0c4cd3f86a40 ("selftests: mptcp: join: use 'iptables-legacy' if available") Acked-by: Paolo Abeni Signed-off-by: Matthieu Baerts Signed-off-by: Mat Martineau Link: https://lore.kernel.org/r/20230725-send-net-20230725-v1-1-6f60fe7137a9@kernel.org Signed-off-by: Jakub Kicinski Signed-off-by: Matthieu Baerts Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/net/mptcp/mptcp_join.sh | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index 368fae525fea..a68048f1fc5a 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -179,10 +179,7 @@ if iptables-legacy -V &> /dev/null; then elif ! iptables -V &> /dev/null; then echo "SKIP: Could not run all tests without iptables tool" exit $ksft_skip -fi - -ip6tables -V > /dev/null 2>&1 -if [ $? -ne 0 ];then +elif ! ip6tables -V &> /dev/null; then echo "SKIP: Could not run all tests without ip6tables tool" exit $ksft_skip fi From 38d4ca22a5288c4bae7e6d62a1728b0718d51866 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 3 Aug 2023 10:22:47 +0200 Subject: [PATCH 154/513] Linux 5.15.124 Link: https://lore.kernel.org/r/20230801091910.165050260@linuxfoundation.org Tested-by: Jon Hunter Tested-by: SeongJae Park Tested-by: Florian Fainelli Tested-by: Shuah Khan Tested-by: Harshit Mogalapalli Link: https://lore.kernel.org/r/20230802065452.161574662@linuxfoundation.org Tested-by: Chris Paterson (CIP) Tested-by: SeongJae Park Tested-by: Ron Economos Tested-by: Florian Fainelli Tested-by: Guenter Roeck Tested-by: Linux Kernel Functional Testing Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 38ef4f11f530..6fb94face8d7 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 15 -SUBLEVEL = 123 +SUBLEVEL = 124 EXTRAVERSION = NAME = Trick or Treat From 0774fc2177c3a0576d9d06c5dd507be8efc686ac Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 155/513] init: Provide arch_cpu_finalize_init() commit 7725acaa4f0c04fbefb0e0d342635b967bb7d414 upstream check_bugs() has become a dumping ground for all sorts of activities to finalize the CPU initialization before running the rest of the init code. Most are empty, a few do actual bug checks, some do alternative patching and some cobble a CPU advertisement string together.... Aside of that the current implementation requires duplicated function declaration and mostly empty header files for them. Provide a new function arch_cpu_finalize_init(). Provide a generic declaration if CONFIG_ARCH_HAS_CPU_FINALIZE_INIT is selected and a stub inline otherwise. This requires a temporary #ifdef in start_kernel() which will be removed along with check_bugs() once the architectures are converted over. Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20230613224544.957805717@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/Kconfig | 3 +++ include/linux/cpu.h | 6 ++++++ init/main.c | 5 +++++ 3 files changed, 14 insertions(+) diff --git a/arch/Kconfig b/arch/Kconfig index 5987363b41c2..b45c699c2bac 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -264,6 +264,9 @@ config ARCH_HAS_DMA_SET_UNCACHED config ARCH_HAS_DMA_CLEAR_UNCACHED bool +config ARCH_HAS_CPU_FINALIZE_INIT + bool + # Select if arch init_task must go in the __init_task_data section config ARCH_TASK_STRUCT_ON_STACK bool diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 6102a21a01d9..94d26089d52a 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -187,6 +187,12 @@ void arch_cpu_idle_enter(void); void arch_cpu_idle_exit(void); void arch_cpu_idle_dead(void); +#ifdef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT +void arch_cpu_finalize_init(void); +#else +static inline void arch_cpu_finalize_init(void) { } +#endif + int cpu_report_state(int cpu); int cpu_check_up_prepare(int cpu); void cpu_set_state_online(int cpu); diff --git a/init/main.c b/init/main.c index 649d9e4201a8..b426fdce0da7 100644 --- a/init/main.c +++ b/init/main.c @@ -1134,7 +1134,12 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) delayacct_init(); poking_init(); + + arch_cpu_finalize_init(); + /* Temporary conditional until everything has been converted */ +#ifndef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT check_bugs(); +#endif acpi_subsystem_init(); arch_post_acpi_subsys_init(); From 75da6209d3ba7a75d18fd4614198630f3c4dfab1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 156/513] x86/cpu: Switch to arch_cpu_finalize_init() commit 7c7077a72674402654f3291354720cd73cdf649e upstream check_bugs() is a dumping ground for finalizing the CPU bringup. Only parts of it has to do with actual CPU bugs. Split it apart into arch_cpu_finalize_init() and cpu_select_mitigations(). Fixup the bogus 32bit comments while at it. No functional change. Signed-off-by: Thomas Gleixner Reviewed-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230613224545.019583869@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/x86/Kconfig | 1 + arch/x86/include/asm/bugs.h | 2 -- arch/x86/kernel/cpu/bugs.c | 51 +--------------------------------- arch/x86/kernel/cpu/common.c | 53 ++++++++++++++++++++++++++++++++++++ arch/x86/kernel/cpu/cpu.h | 1 + 5 files changed, 56 insertions(+), 52 deletions(-) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a08ce6360382..e9e1ccaf4ff2 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -68,6 +68,7 @@ config X86 select ARCH_ENABLE_THP_MIGRATION if X86_64 && TRANSPARENT_HUGEPAGE select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_CACHE_LINE_SIZE + select ARCH_HAS_CPU_FINALIZE_INIT select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VM_PGTABLE if !X86_PAE select ARCH_HAS_DEVMEM_IS_ALLOWED diff --git a/arch/x86/include/asm/bugs.h b/arch/x86/include/asm/bugs.h index 92ae28389940..f25ca2d709d4 100644 --- a/arch/x86/include/asm/bugs.h +++ b/arch/x86/include/asm/bugs.h @@ -4,8 +4,6 @@ #include -extern void check_bugs(void); - #if defined(CONFIG_CPU_SUP_INTEL) && defined(CONFIG_X86_32) int ppro_with_ram_bug(void); #else diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 2627e97e6e2e..a4a25ad949a1 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -9,7 +9,6 @@ * - Andrew D. Balsa (code cleanup). */ #include -#include #include #include #include @@ -27,8 +26,6 @@ #include #include #include -#include -#include #include #include #include @@ -124,21 +121,8 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush); DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear); EXPORT_SYMBOL_GPL(mmio_stale_data_clear); -void __init check_bugs(void) +void __init cpu_select_mitigations(void) { - identify_boot_cpu(); - - /* - * identify_boot_cpu() initialized SMT support information, let the - * core code know. - */ - cpu_smt_check_topology(); - - if (!IS_ENABLED(CONFIG_SMP)) { - pr_info("CPU: "); - print_cpu_info(&boot_cpu_data); - } - /* * Read the SPEC_CTRL MSR to account for reserved bits which may * have unknown values. AMD64_LS_CFG MSR is cached in the early AMD @@ -175,39 +159,6 @@ void __init check_bugs(void) md_clear_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); - - arch_smt_update(); - -#ifdef CONFIG_X86_32 - /* - * Check whether we are able to run this kernel safely on SMP. - * - * - i386 is no longer supported. - * - In order to run on anything without a TSC, we need to be - * compiled for a i486. - */ - if (boot_cpu_data.x86 < 4) - panic("Kernel requires i486+ for 'invlpg' and other features"); - - init_utsname()->machine[1] = - '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); - alternative_instructions(); - - fpu__init_check_bugs(); -#else /* CONFIG_X86_64 */ - alternative_instructions(); - - /* - * Make sure the first 2MB area is not mapped by huge pages - * There are typically fixed size MTRRs in there and overlapping - * MTRRs into large pages causes slow downs. - * - * Right now we don't do that with gbpages because there seems - * very little benefit for that case. - */ - if (!direct_gbpages) - set_memory_4k((unsigned long)__va(0), 1); -#endif } /* diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 69752745a5b1..f9909e7fe56b 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -19,10 +19,13 @@ #include #include #include +#include #include #include #include +#include +#include #include #include #include @@ -58,6 +61,7 @@ #include #include #include +#include #include #include "cpu.h" @@ -2208,3 +2212,52 @@ void arch_smt_update(void) /* Check whether IPI broadcasting can be enabled */ apic_smt_update(); } + +void __init arch_cpu_finalize_init(void) +{ + identify_boot_cpu(); + + /* + * identify_boot_cpu() initialized SMT support information, let the + * core code know. + */ + cpu_smt_check_topology(); + + if (!IS_ENABLED(CONFIG_SMP)) { + pr_info("CPU: "); + print_cpu_info(&boot_cpu_data); + } + + cpu_select_mitigations(); + + arch_smt_update(); + + if (IS_ENABLED(CONFIG_X86_32)) { + /* + * Check whether this is a real i386 which is not longer + * supported and fixup the utsname. + */ + if (boot_cpu_data.x86 < 4) + panic("Kernel requires i486+ for 'invlpg' and other features"); + + init_utsname()->machine[1] = + '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); + } + + alternative_instructions(); + + if (IS_ENABLED(CONFIG_X86_64)) { + /* + * Make sure the first 2MB area is not mapped by huge pages + * There are typically fixed size MTRRs in there and overlapping + * MTRRs into large pages causes slow downs. + * + * Right now we don't do that with gbpages because there seems + * very little benefit for that case. + */ + if (!direct_gbpages) + set_memory_4k((unsigned long)__va(0), 1); + } else { + fpu__init_check_bugs(); + } +} diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 7c9b5893c30a..61dbb9b216e6 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -79,6 +79,7 @@ extern void detect_ht(struct cpuinfo_x86 *c); extern void check_null_seg_clears_base(struct cpuinfo_x86 *c); unsigned int aperfmperf_get_khz(int cpu); +void cpu_select_mitigations(void); extern void x86_spec_ctrl_setup_ap(void); extern void update_srbds_msr(void); From de8c592cc5a1fa76b35d48916c4d372d4b4d3729 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 157/513] ARM: cpu: Switch to arch_cpu_finalize_init() commit ee31bb0524a2e7c99b03f50249a411cc1eaa411f upstream check_bugs() is about to be phased out. Switch over to the new arch_cpu_finalize_init() implementation. No functional change. Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20230613224545.078124882@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/arm/Kconfig | 1 + arch/arm/include/asm/bugs.h | 4 ---- arch/arm/kernel/bugs.c | 3 ++- 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a8ae17f5740d..f2fbb170d813 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -4,6 +4,7 @@ config ARM default y select ARCH_32BIT_OFF_T select ARCH_HAS_BINFMT_FLAT + select ARCH_HAS_CPU_FINALIZE_INIT if MMU select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE select ARCH_HAS_ELF_RANDOMIZE diff --git a/arch/arm/include/asm/bugs.h b/arch/arm/include/asm/bugs.h index 97a312ba0840..fe385551edec 100644 --- a/arch/arm/include/asm/bugs.h +++ b/arch/arm/include/asm/bugs.h @@ -1,7 +1,5 @@ /* SPDX-License-Identifier: GPL-2.0-only */ /* - * arch/arm/include/asm/bugs.h - * * Copyright (C) 1995-2003 Russell King */ #ifndef __ASM_BUGS_H @@ -10,10 +8,8 @@ extern void check_writebuffer_bugs(void); #ifdef CONFIG_MMU -extern void check_bugs(void); extern void check_other_bugs(void); #else -#define check_bugs() do { } while (0) #define check_other_bugs() do { } while (0) #endif diff --git a/arch/arm/kernel/bugs.c b/arch/arm/kernel/bugs.c index 14c8dbbb7d2d..087bce6ec8e9 100644 --- a/arch/arm/kernel/bugs.c +++ b/arch/arm/kernel/bugs.c @@ -1,5 +1,6 @@ // SPDX-License-Identifier: GPL-2.0 #include +#include #include #include @@ -11,7 +12,7 @@ void check_other_bugs(void) #endif } -void __init check_bugs(void) +void __init arch_cpu_finalize_init(void) { check_writebuffer_bugs(); check_other_bugs(); From 8c8165cd25cfd86e0aa1126fb89e47f53f29f02f Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 158/513] ia64/cpu: Switch to arch_cpu_finalize_init() commit 6c38e3005621800263f117fb00d6787a76e16de7 upstream check_bugs() is about to be phased out. Switch over to the new arch_cpu_finalize_init() implementation. No functional change. Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20230613224545.137045745@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/ia64/Kconfig | 1 + arch/ia64/include/asm/bugs.h | 20 -------------------- arch/ia64/kernel/setup.c | 3 +-- 3 files changed, 2 insertions(+), 22 deletions(-) delete mode 100644 arch/ia64/include/asm/bugs.h diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index b1f2b6ac9b1d..89869aff8ca2 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -8,6 +8,7 @@ menu "Processor type and features" config IA64 bool + select ARCH_HAS_CPU_FINALIZE_INIT select ARCH_HAS_DMA_MARK_CLEAN select ARCH_HAS_STRNCPY_FROM_USER select ARCH_HAS_STRNLEN_USER diff --git a/arch/ia64/include/asm/bugs.h b/arch/ia64/include/asm/bugs.h deleted file mode 100644 index 0d6b9bded56c..000000000000 --- a/arch/ia64/include/asm/bugs.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * This is included by init/main.c to check for architecture-dependent bugs. - * - * Needs: - * void check_bugs(void); - * - * Based on . - * - * Modified 1998, 1999, 2003 - * David Mosberger-Tang , Hewlett-Packard Co. - */ -#ifndef _ASM_IA64_BUGS_H -#define _ASM_IA64_BUGS_H - -#include - -extern void check_bugs (void); - -#endif /* _ASM_IA64_BUGS_H */ diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 31fb84de2d21..041681e5de47 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -1070,8 +1070,7 @@ cpu_init (void) } } -void __init -check_bugs (void) +void __init arch_cpu_finalize_init(void) { ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, (unsigned long) __end___mckinley_e9_bundles); From 4baf46a3ba00be6bff6d680544211eee5827ee76 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 159/513] m68k/cpu: Switch to arch_cpu_finalize_init() commit 9ceecc2589b9d7cef6b321339ed8de484eac4b20 upstream check_bugs() is about to be phased out. Switch over to the new arch_cpu_finalize_init() implementation. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/20230613224545.254342916@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/m68k/Kconfig | 1 + arch/m68k/include/asm/bugs.h | 21 --------------------- arch/m68k/kernel/setup_mm.c | 3 ++- 3 files changed, 3 insertions(+), 22 deletions(-) delete mode 100644 arch/m68k/include/asm/bugs.h diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 0b50da08a9c5..810056d8ea67 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -4,6 +4,7 @@ config M68K default y select ARCH_32BIT_OFF_T select ARCH_HAS_BINFMT_FLAT + select ARCH_HAS_CPU_FINALIZE_INIT if MMU select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS diff --git a/arch/m68k/include/asm/bugs.h b/arch/m68k/include/asm/bugs.h deleted file mode 100644 index 745530651e0b..000000000000 --- a/arch/m68k/include/asm/bugs.h +++ /dev/null @@ -1,21 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * include/asm-m68k/bugs.h - * - * Copyright (C) 1994 Linus Torvalds - */ - -/* - * This is included by init/main.c to check for architecture-dependent bugs. - * - * Needs: - * void check_bugs(void); - */ - -#ifdef CONFIG_MMU -extern void check_bugs(void); /* in arch/m68k/kernel/setup.c */ -#else -static void check_bugs(void) -{ -} -#endif diff --git a/arch/m68k/kernel/setup_mm.c b/arch/m68k/kernel/setup_mm.c index f24410a54dcb..868641a53623 100644 --- a/arch/m68k/kernel/setup_mm.c +++ b/arch/m68k/kernel/setup_mm.c @@ -10,6 +10,7 @@ */ #include +#include #include #include #include @@ -512,7 +513,7 @@ static int __init proc_hardware_init(void) module_init(proc_hardware_init); #endif -void check_bugs(void) +void __init arch_cpu_finalize_init(void) { #if defined(CONFIG_FPU) && !defined(CONFIG_M68KFPU_EMU) if (m68k_fputype == 0) { From dfeb371a2707c0a70dcf2058ce436f81ab7babb3 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 160/513] mips/cpu: Switch to arch_cpu_finalize_init() commit 7f066a22fe353a827a402ee2835e81f045b1574d upstream check_bugs() is about to be phased out. Switch over to the new arch_cpu_finalize_init() implementation. No functional change. Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20230613224545.312438573@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/mips/Kconfig | 1 + arch/mips/include/asm/bugs.h | 17 ----------------- arch/mips/kernel/setup.c | 13 +++++++++++++ 3 files changed, 14 insertions(+), 17 deletions(-) diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 56c0f75e7a76..13b09c7516e9 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -4,6 +4,7 @@ config MIPS default y select ARCH_32BIT_OFF_T if !64BIT select ARCH_BINFMT_ELF_STATE if MIPS_FP_SUPPORT + select ARCH_HAS_CPU_FINALIZE_INIT select ARCH_HAS_DEBUG_VIRTUAL if !64BIT select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_KCOV diff --git a/arch/mips/include/asm/bugs.h b/arch/mips/include/asm/bugs.h index d72dc6e1cf3c..8d4cf29861b8 100644 --- a/arch/mips/include/asm/bugs.h +++ b/arch/mips/include/asm/bugs.h @@ -1,17 +1,11 @@ /* SPDX-License-Identifier: GPL-2.0 */ /* - * This is included by init/main.c to check for architecture-dependent bugs. - * * Copyright (C) 2007 Maciej W. Rozycki - * - * Needs: - * void check_bugs(void); */ #ifndef _ASM_BUGS_H #define _ASM_BUGS_H #include -#include #include #include @@ -30,17 +24,6 @@ static inline void check_bugs_early(void) check_bugs64_early(); } -static inline void check_bugs(void) -{ - unsigned int cpu = smp_processor_id(); - - cpu_data[cpu].udelay_val = loops_per_jiffy; - check_bugs32(); - - if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) - check_bugs64(); -} - static inline int r4k_daddiu_bug(void) { if (!IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c index c8d849d8a844..145f905fb362 100644 --- a/arch/mips/kernel/setup.c +++ b/arch/mips/kernel/setup.c @@ -11,6 +11,8 @@ * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki */ #include +#include +#include #include #include #include @@ -810,3 +812,14 @@ static int __init setnocoherentio(char *str) } early_param("nocoherentio", setnocoherentio); #endif + +void __init arch_cpu_finalize_init(void) +{ + unsigned int cpu = smp_processor_id(); + + cpu_data[cpu].udelay_val = loops_per_jiffy; + check_bugs32(); + + if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64)) + check_bugs64(); +} From 21a1fc8d13d8f4cda5211fab491f7c07ad02be16 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 161/513] sh/cpu: Switch to arch_cpu_finalize_init() commit 01eb454e9bfe593f320ecbc9aaec60bf87cd453d upstream check_bugs() is about to be phased out. Switch over to the new arch_cpu_finalize_init() implementation. No functional change. Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20230613224545.371697797@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/sh/Kconfig | 1 + arch/sh/include/asm/bugs.h | 74 --------------------------------- arch/sh/include/asm/processor.h | 2 + arch/sh/kernel/idle.c | 1 + arch/sh/kernel/setup.c | 55 ++++++++++++++++++++++++ 5 files changed, 59 insertions(+), 74 deletions(-) delete mode 100644 arch/sh/include/asm/bugs.h diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index 6904f4bdbf00..101b95f26a91 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -7,6 +7,7 @@ config SUPERH select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_NMI_SAFE_CMPXCHG if (GUSA_RB || CPU_SH4A) select ARCH_HAS_BINFMT_FLAT if !MMU + select ARCH_HAS_CPU_FINALIZE_INIT select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_PTE_SPECIAL diff --git a/arch/sh/include/asm/bugs.h b/arch/sh/include/asm/bugs.h deleted file mode 100644 index fe52abb69cea..000000000000 --- a/arch/sh/include/asm/bugs.h +++ /dev/null @@ -1,74 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_SH_BUGS_H -#define __ASM_SH_BUGS_H - -/* - * This is included by init/main.c to check for architecture-dependent bugs. - * - * Needs: - * void check_bugs(void); - */ - -/* - * I don't know of any Super-H bugs yet. - */ - -#include - -extern void select_idle_routine(void); - -static void __init check_bugs(void) -{ - extern unsigned long loops_per_jiffy; - char *p = &init_utsname()->machine[2]; /* "sh" */ - - select_idle_routine(); - - current_cpu_data.loops_per_jiffy = loops_per_jiffy; - - switch (current_cpu_data.family) { - case CPU_FAMILY_SH2: - *p++ = '2'; - break; - case CPU_FAMILY_SH2A: - *p++ = '2'; - *p++ = 'a'; - break; - case CPU_FAMILY_SH3: - *p++ = '3'; - break; - case CPU_FAMILY_SH4: - *p++ = '4'; - break; - case CPU_FAMILY_SH4A: - *p++ = '4'; - *p++ = 'a'; - break; - case CPU_FAMILY_SH4AL_DSP: - *p++ = '4'; - *p++ = 'a'; - *p++ = 'l'; - *p++ = '-'; - *p++ = 'd'; - *p++ = 's'; - *p++ = 'p'; - break; - case CPU_FAMILY_UNKNOWN: - /* - * Specifically use CPU_FAMILY_UNKNOWN rather than - * default:, so we're able to have the compiler whine - * about unhandled enumerations. - */ - break; - } - - printk("CPU: %s\n", get_cpu_subtype(¤t_cpu_data)); - -#ifndef __LITTLE_ENDIAN__ - /* 'eb' means 'Endian Big' */ - *p++ = 'e'; - *p++ = 'b'; -#endif - *p = '\0'; -} -#endif /* __ASM_SH_BUGS_H */ diff --git a/arch/sh/include/asm/processor.h b/arch/sh/include/asm/processor.h index 3820d698846e..97af2d9b0269 100644 --- a/arch/sh/include/asm/processor.h +++ b/arch/sh/include/asm/processor.h @@ -167,6 +167,8 @@ extern unsigned int instruction_size(unsigned int insn); #define instruction_size(insn) (2) #endif +void select_idle_routine(void); + #endif /* __ASSEMBLY__ */ #include diff --git a/arch/sh/kernel/idle.c b/arch/sh/kernel/idle.c index f59814983bd5..a80b2a5b25c7 100644 --- a/arch/sh/kernel/idle.c +++ b/arch/sh/kernel/idle.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c index af977ec4ca5e..cf7c0f72f293 100644 --- a/arch/sh/kernel/setup.c +++ b/arch/sh/kernel/setup.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -354,3 +355,57 @@ int test_mode_pin(int pin) { return sh_mv.mv_mode_pins() & pin; } + +void __init arch_cpu_finalize_init(void) +{ + char *p = &init_utsname()->machine[2]; /* "sh" */ + + select_idle_routine(); + + current_cpu_data.loops_per_jiffy = loops_per_jiffy; + + switch (current_cpu_data.family) { + case CPU_FAMILY_SH2: + *p++ = '2'; + break; + case CPU_FAMILY_SH2A: + *p++ = '2'; + *p++ = 'a'; + break; + case CPU_FAMILY_SH3: + *p++ = '3'; + break; + case CPU_FAMILY_SH4: + *p++ = '4'; + break; + case CPU_FAMILY_SH4A: + *p++ = '4'; + *p++ = 'a'; + break; + case CPU_FAMILY_SH4AL_DSP: + *p++ = '4'; + *p++ = 'a'; + *p++ = 'l'; + *p++ = '-'; + *p++ = 'd'; + *p++ = 's'; + *p++ = 'p'; + break; + case CPU_FAMILY_UNKNOWN: + /* + * Specifically use CPU_FAMILY_UNKNOWN rather than + * default:, so we're able to have the compiler whine + * about unhandled enumerations. + */ + break; + } + + pr_info("CPU: %s\n", get_cpu_subtype(¤t_cpu_data)); + +#ifndef __LITTLE_ENDIAN__ + /* 'eb' means 'Endian Big' */ + *p++ = 'e'; + *p++ = 'b'; +#endif + *p = '\0'; +} From 6ea42178642654385e5394928fe5f69797e011c4 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 162/513] sparc/cpu: Switch to arch_cpu_finalize_init() commit 44ade508e3bfac45ae97864587de29eb1a881ec0 upstream check_bugs() is about to be phased out. Switch over to the new arch_cpu_finalize_init() implementation. No functional change. Signed-off-by: Thomas Gleixner Reviewed-by: Sam Ravnborg Link: https://lore.kernel.org/r/20230613224545.431995857@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/sparc/Kconfig | 1 + arch/sparc/include/asm/bugs.h | 18 ------------------ arch/sparc/kernel/setup_32.c | 7 +++++++ 3 files changed, 8 insertions(+), 18 deletions(-) delete mode 100644 arch/sparc/include/asm/bugs.h diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index eff9116bf7be..1176f0de6a0f 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -52,6 +52,7 @@ config SPARC config SPARC32 def_bool !64BIT select ARCH_32BIT_OFF_T + select ARCH_HAS_CPU_FINALIZE_INIT if !SMP select ARCH_HAS_SYNC_DMA_FOR_CPU select GENERIC_ATOMIC64 select CLZ_TAB diff --git a/arch/sparc/include/asm/bugs.h b/arch/sparc/include/asm/bugs.h deleted file mode 100644 index 02fa369b9c21..000000000000 --- a/arch/sparc/include/asm/bugs.h +++ /dev/null @@ -1,18 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* include/asm/bugs.h: Sparc probes for various bugs. - * - * Copyright (C) 1996, 2007 David S. Miller (davem@davemloft.net) - */ - -#ifdef CONFIG_SPARC32 -#include -#endif - -extern unsigned long loops_per_jiffy; - -static void __init check_bugs(void) -{ -#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP) - cpu_data(0).udelay_val = loops_per_jiffy; -#endif -} diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c index c8e0dd99f370..c9d1ba4f311b 100644 --- a/arch/sparc/kernel/setup_32.c +++ b/arch/sparc/kernel/setup_32.c @@ -412,3 +412,10 @@ static int __init topology_init(void) } subsys_initcall(topology_init); + +#if defined(CONFIG_SPARC32) && !defined(CONFIG_SMP) +void __init arch_cpu_finalize_init(void) +{ + cpu_data(0).udelay_val = loops_per_jiffy; +} +#endif From 285384ac24c38d775e7b0271c67f67165258f16a Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 163/513] um/cpu: Switch to arch_cpu_finalize_init() commit 9349b5cd0908f8afe95529fc7a8cbb1417df9b0c upstream check_bugs() is about to be phased out. Switch over to the new arch_cpu_finalize_init() implementation. No functional change. Signed-off-by: Thomas Gleixner Acked-by: Richard Weinberger Link: https://lore.kernel.org/r/20230613224545.493148694@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/um/Kconfig | 1 + arch/um/include/asm/bugs.h | 7 ------- arch/um/kernel/um_arch.c | 3 ++- 3 files changed, 3 insertions(+), 8 deletions(-) delete mode 100644 arch/um/include/asm/bugs.h diff --git a/arch/um/Kconfig b/arch/um/Kconfig index c18b45f75d41..b0584453d2a0 100644 --- a/arch/um/Kconfig +++ b/arch/um/Kconfig @@ -6,6 +6,7 @@ config UML bool default y select ARCH_EPHEMERAL_INODES + select ARCH_HAS_CPU_FINALIZE_INIT select ARCH_HAS_KCOV select ARCH_HAS_STRNCPY_FROM_USER select ARCH_HAS_STRNLEN_USER diff --git a/arch/um/include/asm/bugs.h b/arch/um/include/asm/bugs.h deleted file mode 100644 index 4473942a0839..000000000000 --- a/arch/um/include/asm/bugs.h +++ /dev/null @@ -1,7 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __UM_BUGS_H -#define __UM_BUGS_H - -void check_bugs(void); - -#endif diff --git a/arch/um/kernel/um_arch.c b/arch/um/kernel/um_arch.c index 4c8d2dc27084..748595b054c4 100644 --- a/arch/um/kernel/um_arch.c +++ b/arch/um/kernel/um_arch.c @@ -3,6 +3,7 @@ * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) */ +#include #include #include #include @@ -423,7 +424,7 @@ void __init setup_arch(char **cmdline_p) } } -void __init check_bugs(void) +void __init arch_cpu_finalize_init(void) { arch_check_bugs(); os_check_bugs(); From 7e270cebaffd12337e5851b0bb36a347d04ca528 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 164/513] init: Remove check_bugs() leftovers commit 61235b24b9cb37c13fcad5b9596d59a1afdcec30 upstream Everything is converted over to arch_cpu_finalize_init(). Remove the check_bugs() leftovers including the empty stubs in asm-generic, alpha, parisc, powerpc and xtensa. Signed-off-by: Thomas Gleixner Reviewed-by: Richard Henderson Link: https://lore.kernel.org/r/20230613224545.553215951@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/alpha/include/asm/bugs.h | 20 -------------------- arch/parisc/include/asm/bugs.h | 20 -------------------- arch/powerpc/include/asm/bugs.h | 15 --------------- arch/xtensa/include/asm/bugs.h | 18 ------------------ include/asm-generic/bugs.h | 11 ----------- init/main.c | 5 ----- 6 files changed, 89 deletions(-) delete mode 100644 arch/alpha/include/asm/bugs.h delete mode 100644 arch/parisc/include/asm/bugs.h delete mode 100644 arch/powerpc/include/asm/bugs.h delete mode 100644 arch/xtensa/include/asm/bugs.h delete mode 100644 include/asm-generic/bugs.h diff --git a/arch/alpha/include/asm/bugs.h b/arch/alpha/include/asm/bugs.h deleted file mode 100644 index 78030d1c7e7e..000000000000 --- a/arch/alpha/include/asm/bugs.h +++ /dev/null @@ -1,20 +0,0 @@ -/* - * include/asm-alpha/bugs.h - * - * Copyright (C) 1994 Linus Torvalds - */ - -/* - * This is included by init/main.c to check for architecture-dependent bugs. - * - * Needs: - * void check_bugs(void); - */ - -/* - * I don't know of any alpha bugs yet.. Nice chip - */ - -static void check_bugs(void) -{ -} diff --git a/arch/parisc/include/asm/bugs.h b/arch/parisc/include/asm/bugs.h deleted file mode 100644 index 0a7f9db6bd1c..000000000000 --- a/arch/parisc/include/asm/bugs.h +++ /dev/null @@ -1,20 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -/* - * include/asm-parisc/bugs.h - * - * Copyright (C) 1999 Mike Shaver - */ - -/* - * This is included by init/main.c to check for architecture-dependent bugs. - * - * Needs: - * void check_bugs(void); - */ - -#include - -static inline void check_bugs(void) -{ -// identify_cpu(&boot_cpu_data); -} diff --git a/arch/powerpc/include/asm/bugs.h b/arch/powerpc/include/asm/bugs.h deleted file mode 100644 index 01b8f6ca4dbb..000000000000 --- a/arch/powerpc/include/asm/bugs.h +++ /dev/null @@ -1,15 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-or-later */ -#ifndef _ASM_POWERPC_BUGS_H -#define _ASM_POWERPC_BUGS_H - -/* - */ - -/* - * This file is included by 'init/main.c' to check for - * architecture-dependent bugs. - */ - -static inline void check_bugs(void) { } - -#endif /* _ASM_POWERPC_BUGS_H */ diff --git a/arch/xtensa/include/asm/bugs.h b/arch/xtensa/include/asm/bugs.h deleted file mode 100644 index 69b29d198249..000000000000 --- a/arch/xtensa/include/asm/bugs.h +++ /dev/null @@ -1,18 +0,0 @@ -/* - * include/asm-xtensa/bugs.h - * - * This is included by init/main.c to check for architecture-dependent bugs. - * - * Xtensa processors don't have any bugs. :) - * - * This file is subject to the terms and conditions of the GNU General - * Public License. See the file "COPYING" in the main directory of - * this archive for more details. - */ - -#ifndef _XTENSA_BUGS_H -#define _XTENSA_BUGS_H - -static void check_bugs(void) { } - -#endif /* _XTENSA_BUGS_H */ diff --git a/include/asm-generic/bugs.h b/include/asm-generic/bugs.h deleted file mode 100644 index 69021830f078..000000000000 --- a/include/asm-generic/bugs.h +++ /dev/null @@ -1,11 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 */ -#ifndef __ASM_GENERIC_BUGS_H -#define __ASM_GENERIC_BUGS_H -/* - * This file is included by 'init/main.c' to check for - * architecture-dependent bugs. - */ - -static inline void check_bugs(void) { } - -#endif /* __ASM_GENERIC_BUGS_H */ diff --git a/init/main.c b/init/main.c index b426fdce0da7..242b1b6d61cc 100644 --- a/init/main.c +++ b/init/main.c @@ -104,7 +104,6 @@ #include #include -#include #include #include #include @@ -1136,10 +1135,6 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) poking_init(); arch_cpu_finalize_init(); - /* Temporary conditional until everything has been converted */ -#ifndef CONFIG_ARCH_HAS_CPU_FINALIZE_INIT - check_bugs(); -#endif acpi_subsystem_init(); arch_post_acpi_subsys_init(); From 8ae795ed611538828858aaf98fe186ee7f6d5295 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 165/513] init: Invoke arch_cpu_finalize_init() earlier commit 9df9d2f0471b4c4702670380b8d8a45b40b23a7d upstream X86 is reworking the boot process so that initializations which are not required during early boot can be moved into the late boot process and out of the fragile and restricted initial boot phase. arch_cpu_finalize_init() is the obvious place to do such initializations, but arch_cpu_finalize_init() is invoked too late in start_kernel() e.g. for initializing the FPU completely. fork_init() requires that the FPU is initialized as the size of task_struct on X86 depends on the size of the required FPU register buffer. Fortunately none of the init calls between calibrate_delay() and arch_cpu_finalize_init() is relevant for the functionality of arch_cpu_finalize_init(). Invoke it right after calibrate_delay() where everything which is relevant for arch_cpu_finalize_init() has been set up already. No functional change intended. Signed-off-by: Thomas Gleixner Reviewed-by: Rick Edgecombe Link: https://lore.kernel.org/r/20230613224545.612182854@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- init/main.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/init/main.c b/init/main.c index 242b1b6d61cc..828f2547fa67 100644 --- a/init/main.c +++ b/init/main.c @@ -1106,6 +1106,9 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) late_time_init(); sched_clock_init(); calibrate_delay(); + + arch_cpu_finalize_init(); + pid_idr_init(); anon_vma_init(); #ifdef CONFIG_X86 @@ -1134,8 +1137,6 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) poking_init(); - arch_cpu_finalize_init(); - acpi_subsystem_init(); arch_post_acpi_subsys_init(); kcsan_init(); From 041d929233bb2cadee3116742f400ac7004c8569 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 166/513] init, x86: Move mem_encrypt_init() into arch_cpu_finalize_init() commit 439e17576eb47f26b78c5bbc72e344d4206d2327 upstream Invoke the X86ism mem_encrypt_init() from X86 arch_cpu_finalize_init() and remove the weak fallback from the core code. No functional change. Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20230613224545.670360645@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/mem_encrypt.h | 7 ++++--- arch/x86/kernel/cpu/common.c | 11 +++++++++++ init/main.c | 11 ----------- 3 files changed, 15 insertions(+), 14 deletions(-) diff --git a/arch/x86/include/asm/mem_encrypt.h b/arch/x86/include/asm/mem_encrypt.h index 3fb9f5ebefa4..2356fdddd3e6 100644 --- a/arch/x86/include/asm/mem_encrypt.h +++ b/arch/x86/include/asm/mem_encrypt.h @@ -47,14 +47,13 @@ int __init early_set_memory_encrypted(unsigned long vaddr, unsigned long size); void __init mem_encrypt_free_decrypted_mem(void); -/* Architecture __weak replacement functions */ -void __init mem_encrypt_init(void); - void __init sev_es_init_vc_handling(void); bool sme_active(void); bool sev_active(void); bool sev_es_active(void); +void __init mem_encrypt_init(void); + #define __bss_decrypted __section(".bss..decrypted") #else /* !CONFIG_AMD_MEM_ENCRYPT */ @@ -87,6 +86,8 @@ early_set_memory_encrypted(unsigned long vaddr, unsigned long size) { return 0; static inline void mem_encrypt_free_decrypted_mem(void) { } +static inline void mem_encrypt_init(void) { } + #define __bss_decrypted #endif /* CONFIG_AMD_MEM_ENCRYPT */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index f9909e7fe56b..6f4016f5f4f3 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -2260,4 +2261,14 @@ void __init arch_cpu_finalize_init(void) } else { fpu__init_check_bugs(); } + + /* + * This needs to be called before any devices perform DMA + * operations that might use the SWIOTLB bounce buffers. It will + * mark the bounce buffers as decrypted so that their usage will + * not cause "plain-text" data to be decrypted when accessed. It + * must be called after late_time_init() so that Hyper-V x86/x64 + * hypercalls work when the SWIOTLB bounce buffers are decrypted. + */ + mem_encrypt_init(); } diff --git a/init/main.c b/init/main.c index 828f2547fa67..1efd414cefeb 100644 --- a/init/main.c +++ b/init/main.c @@ -96,7 +96,6 @@ #include #include #include -#include #include #include #include @@ -787,8 +786,6 @@ void __init __weak thread_stack_cache_init(void) } #endif -void __init __weak mem_encrypt_init(void) { } - void __init __weak poking_init(void) { } void __init __weak pgtable_cache_init(void) { } @@ -1082,14 +1079,6 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) */ locking_selftest(); - /* - * This needs to be called before any devices perform DMA - * operations that might use the SWIOTLB bounce buffers. It will - * mark the bounce buffers as decrypted so that their usage will - * not cause "plain-text" data to be decrypted when accessed. - */ - mem_encrypt_init(); - #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start && !initrd_below_start_ok && page_to_pfn(virt_to_page((void *)initrd_start)) < min_low_pfn) { From de8b7ce4c533f808105ae60e95d777a99eca2091 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 167/513] x86/init: Initialize signal frame size late commit 54d9a91a3d6713d1332e93be13b4eaf0fa54349d upstream No point in doing this during really early boot. Move it to an early initcall so that it is set up before possible user mode helpers are started during device initialization. Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20230613224545.727330699@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/sigframe.h | 2 -- arch/x86/kernel/cpu/common.c | 3 --- arch/x86/kernel/signal.c | 4 +++- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/arch/x86/include/asm/sigframe.h b/arch/x86/include/asm/sigframe.h index 5b1ed650b124..84eab2724875 100644 --- a/arch/x86/include/asm/sigframe.h +++ b/arch/x86/include/asm/sigframe.h @@ -85,6 +85,4 @@ struct rt_sigframe_x32 { #endif /* CONFIG_X86_64 */ -void __init init_sigframe_size(void); - #endif /* _ASM_X86_SIGFRAME_H */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 6f4016f5f4f3..c51716bd0c9f 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -63,7 +63,6 @@ #include #include #include -#include #include "cpu.h" @@ -1428,8 +1427,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) fpu__init_system(c); - init_sigframe_size(); - #ifdef CONFIG_X86_32 /* * Regardless of whether PCID is enumerated, the SDM says diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c index f4d21e470083..bf10340a9b71 100644 --- a/arch/x86/kernel/signal.c +++ b/arch/x86/kernel/signal.c @@ -722,7 +722,7 @@ SYSCALL_DEFINE0(rt_sigreturn) /* max_frame_size tells userspace the worst case signal stack size. */ static unsigned long __ro_after_init max_frame_size; -void __init init_sigframe_size(void) +static int __init init_sigframe_size(void) { max_frame_size = MAX_FRAME_SIGINFO_UCTXT_SIZE + MAX_FRAME_PADDING; @@ -732,7 +732,9 @@ void __init init_sigframe_size(void) max_frame_size = round_up(max_frame_size, FRAME_ALIGNMENT); pr_info("max sigframe size: %lu\n", max_frame_size); + return 0; } +early_initcall(init_sigframe_size); unsigned long get_sigframe_size(void) { From bb9c20d903f6f9d4bca812220ac8b2348aa6c1b6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 168/513] x86/fpu: Remove cpuinfo argument from init functions commit 1f34bb2a24643e0087652d81078e4f616562738d upstream Nothing in the call chain requires it Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20230613224545.783704297@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/fpu/internal.h | 2 +- arch/x86/kernel/cpu/common.c | 2 +- arch/x86/kernel/fpu/init.c | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h index ce6fc4f8d1d1..d4201fb2c46d 100644 --- a/arch/x86/include/asm/fpu/internal.h +++ b/arch/x86/include/asm/fpu/internal.h @@ -43,7 +43,7 @@ extern void fpu_flush_thread(void); extern void fpu__init_cpu(void); extern void fpu__init_system_xstate(void); extern void fpu__init_cpu_xstate(void); -extern void fpu__init_system(struct cpuinfo_x86 *c); +extern void fpu__init_system(void); extern void fpu__init_check_bugs(void); extern void fpu__resume_cpu(void); diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c51716bd0c9f..7bff7b5116d9 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1425,7 +1425,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) sld_setup(c); - fpu__init_system(c); + fpu__init_system(); #ifdef CONFIG_X86_32 /* diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index c949424a11c1..30980ca854cd 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -67,7 +67,7 @@ static bool fpu__probe_without_cpuid(void) return fsw == 0 && (fcw & 0x103f) == 0x003f; } -static void fpu__init_system_early_generic(struct cpuinfo_x86 *c) +static void fpu__init_system_early_generic(void) { if (!boot_cpu_has(X86_FEATURE_CPUID) && !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) { @@ -226,9 +226,9 @@ static void __init fpu__init_system_ctx_switch(void) * Called on the boot CPU once per system bootup, to set up the initial * FPU state that is later cloned into all processes: */ -void __init fpu__init_system(struct cpuinfo_x86 *c) +void __init fpu__init_system(void) { - fpu__init_system_early_generic(c); + fpu__init_system_early_generic(); /* * The FPU has to be operational for some of the From 59f2739111cad3c322870185c91ca42f31d66ee1 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 169/513] x86/fpu: Mark init functions __init commit 1703db2b90c91b2eb2d699519fc505fe431dde0e upstream No point in keeping them around. Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20230613224545.841685728@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/fpu/init.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c index 30980ca854cd..ddf65f1927e1 100644 --- a/arch/x86/kernel/fpu/init.c +++ b/arch/x86/kernel/fpu/init.c @@ -49,7 +49,7 @@ void fpu__init_cpu(void) fpu__init_cpu_xstate(); } -static bool fpu__probe_without_cpuid(void) +static bool __init fpu__probe_without_cpuid(void) { unsigned long cr0; u16 fsw, fcw; @@ -67,7 +67,7 @@ static bool fpu__probe_without_cpuid(void) return fsw == 0 && (fcw & 0x103f) == 0x003f; } -static void fpu__init_system_early_generic(void) +static void __init fpu__init_system_early_generic(void) { if (!boot_cpu_has(X86_FEATURE_CPUID) && !test_bit(X86_FEATURE_FPU, (unsigned long *)cpu_caps_cleared)) { From a094d3b309670961ed2432389c1193ed5ab2e564 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Tue, 1 Aug 2023 18:58:30 +0200 Subject: [PATCH 170/513] x86/fpu: Move FPU initialization into arch_cpu_finalize_init() commit b81fac906a8f9e682e513ddd95697ec7a20878d4 upstream Initializing the FPU during the early boot process is a pointless exercise. Early boot is convoluted and fragile enough. Nothing requires that the FPU is set up early. It has to be initialized before fork_init() because the task_struct size depends on the FPU register buffer size. Move the initialization to arch_cpu_finalize_init() which is the perfect place to do so. No functional change. This allows to remove quite some of the custom early command line parsing, but that's subject to the next installment. Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20230613224545.902376621@linutronix.de Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/common.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 7bff7b5116d9..78e9f2e422f0 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1425,8 +1425,6 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c) sld_setup(c); - fpu__init_system(); - #ifdef CONFIG_X86_32 /* * Regardless of whether PCID is enumerated, the SDM says @@ -2131,8 +2129,6 @@ void cpu_init(void) doublefault_init_cpu_tss(); - fpu__init_cpu(); - if (is_uv_system()) uv_cpu_init(); @@ -2148,6 +2144,7 @@ void cpu_init_secondary(void) */ cpu_init_exception_handling(); cpu_init(); + fpu__init_cpu(); } #endif @@ -2242,6 +2239,13 @@ void __init arch_cpu_finalize_init(void) '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86); } + /* + * Must be before alternatives because it might set or clear + * feature bits. + */ + fpu__init_system(); + fpu__init_cpu(); + alternative_instructions(); if (IS_ENABLED(CONFIG_X86_64)) { From 348a89e2018428c3e55a87cdd9ae3cbd6cc8248a Mon Sep 17 00:00:00 2001 From: Daniel Sneddon Date: Tue, 1 Aug 2023 18:58:31 +0200 Subject: [PATCH 171/513] x86/speculation: Add Gather Data Sampling mitigation commit 8974eb588283b7d44a7c91fa09fcbaf380339f3a upstream Gather Data Sampling (GDS) is a hardware vulnerability which allows unprivileged speculative access to data which was previously stored in vector registers. Intel processors that support AVX2 and AVX512 have gather instructions that fetch non-contiguous data elements from memory. On vulnerable hardware, when a gather instruction is transiently executed and encounters a fault, stale data from architectural or internal vector registers may get transiently stored to the destination vector register allowing an attacker to infer the stale data using typical side channel techniques like cache timing attacks. This mitigation is different from many earlier ones for two reasons. First, it is enabled by default and a bit must be set to *DISABLE* it. This is the opposite of normal mitigation polarity. This means GDS can be mitigated simply by updating microcode and leaving the new control bit alone. Second, GDS has a "lock" bit. This lock bit is there because the mitigation affects the hardware security features KeyLocker and SGX. It needs to be enabled and *STAY* enabled for these features to be mitigated against GDS. The mitigation is enabled in the microcode by default. Disable it by setting gather_data_sampling=off or by disabling all mitigations with mitigations=off. The mitigation status can be checked by reading: /sys/devices/system/cpu/vulnerabilities/gather_data_sampling Signed-off-by: Daniel Sneddon Signed-off-by: Dave Hansen Acked-by: Josh Poimboeuf Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- .../ABI/testing/sysfs-devices-system-cpu | 13 +- .../hw-vuln/gather_data_sampling.rst | 99 ++++++++++++++ Documentation/admin-guide/hw-vuln/index.rst | 1 + .../admin-guide/kernel-parameters.txt | 35 +++-- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/msr-index.h | 11 ++ arch/x86/kernel/cpu/bugs.c | 129 ++++++++++++++++++ arch/x86/kernel/cpu/common.c | 34 +++-- arch/x86/kernel/cpu/cpu.h | 1 + drivers/base/cpu.c | 8 ++ 10 files changed, 307 insertions(+), 25 deletions(-) create mode 100644 Documentation/admin-guide/hw-vuln/gather_data_sampling.rst diff --git a/Documentation/ABI/testing/sysfs-devices-system-cpu b/Documentation/ABI/testing/sysfs-devices-system-cpu index a7362b1096c4..eecbd1603349 100644 --- a/Documentation/ABI/testing/sysfs-devices-system-cpu +++ b/Documentation/ABI/testing/sysfs-devices-system-cpu @@ -511,17 +511,18 @@ Description: information about CPUs heterogeneity. cpu_capacity: capacity of cpu#. What: /sys/devices/system/cpu/vulnerabilities + /sys/devices/system/cpu/vulnerabilities/gather_data_sampling + /sys/devices/system/cpu/vulnerabilities/itlb_multihit + /sys/devices/system/cpu/vulnerabilities/l1tf + /sys/devices/system/cpu/vulnerabilities/mds /sys/devices/system/cpu/vulnerabilities/meltdown + /sys/devices/system/cpu/vulnerabilities/mmio_stale_data + /sys/devices/system/cpu/vulnerabilities/retbleed + /sys/devices/system/cpu/vulnerabilities/spec_store_bypass /sys/devices/system/cpu/vulnerabilities/spectre_v1 /sys/devices/system/cpu/vulnerabilities/spectre_v2 - /sys/devices/system/cpu/vulnerabilities/spec_store_bypass - /sys/devices/system/cpu/vulnerabilities/l1tf - /sys/devices/system/cpu/vulnerabilities/mds /sys/devices/system/cpu/vulnerabilities/srbds /sys/devices/system/cpu/vulnerabilities/tsx_async_abort - /sys/devices/system/cpu/vulnerabilities/itlb_multihit - /sys/devices/system/cpu/vulnerabilities/mmio_stale_data - /sys/devices/system/cpu/vulnerabilities/retbleed Date: January 2018 Contact: Linux kernel mailing list Description: Information about CPU vulnerabilities diff --git a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst new file mode 100644 index 000000000000..74dab6af7fe1 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst @@ -0,0 +1,99 @@ +.. SPDX-License-Identifier: GPL-2.0 + +GDS - Gather Data Sampling +========================== + +Gather Data Sampling is a hardware vulnerability which allows unprivileged +speculative access to data which was previously stored in vector registers. + +Problem +------- +When a gather instruction performs loads from memory, different data elements +are merged into the destination vector register. However, when a gather +instruction that is transiently executed encounters a fault, stale data from +architectural or internal vector registers may get transiently forwarded to the +destination vector register instead. This will allow a malicious attacker to +infer stale data using typical side channel techniques like cache timing +attacks. GDS is a purely sampling-based attack. + +The attacker uses gather instructions to infer the stale vector register data. +The victim does not need to do anything special other than use the vector +registers. The victim does not need to use gather instructions to be +vulnerable. + +Because the buffers are shared between Hyper-Threads cross Hyper-Thread attacks +are possible. + +Attack scenarios +---------------- +Without mitigation, GDS can infer stale data across virtually all +permission boundaries: + + Non-enclaves can infer SGX enclave data + Userspace can infer kernel data + Guests can infer data from hosts + Guest can infer guest from other guests + Users can infer data from other users + +Because of this, it is important to ensure that the mitigation stays enabled in +lower-privilege contexts like guests and when running outside SGX enclaves. + +The hardware enforces the mitigation for SGX. Likewise, VMMs should ensure +that guests are not allowed to disable the GDS mitigation. If a host erred and +allowed this, a guest could theoretically disable GDS mitigation, mount an +attack, and re-enable it. + +Mitigation mechanism +-------------------- +This issue is mitigated in microcode. The microcode defines the following new +bits: + + ================================ === ============================ + IA32_ARCH_CAPABILITIES[GDS_CTRL] R/O Enumerates GDS vulnerability + and mitigation support. + IA32_ARCH_CAPABILITIES[GDS_NO] R/O Processor is not vulnerable. + IA32_MCU_OPT_CTRL[GDS_MITG_DIS] R/W Disables the mitigation + 0 by default. + IA32_MCU_OPT_CTRL[GDS_MITG_LOCK] R/W Locks GDS_MITG_DIS=0. Writes + to GDS_MITG_DIS are ignored + Can't be cleared once set. + ================================ === ============================ + +GDS can also be mitigated on systems that don't have updated microcode by +disabling AVX. This can be done by setting "clearcpuid=avx" on the kernel +command-line. + +Mitigation control on the kernel command line +--------------------------------------------- +The mitigation can be disabled by setting "gather_data_sampling=off" or +"mitigations=off" on the kernel command line. Not specifying either will +default to the mitigation being enabled. + +GDS System Information +------------------------ +The kernel provides vulnerability status information through sysfs. For +GDS this can be accessed by the following sysfs file: + +/sys/devices/system/cpu/vulnerabilities/gather_data_sampling + +The possible values contained in this file are: + + ============================== ============================================= + Not affected Processor not vulnerable. + Vulnerable Processor vulnerable and mitigation disabled. + Vulnerable: No microcode Processor vulnerable and microcode is missing + mitigation. + Mitigation: Microcode Processor is vulnerable and mitigation is in + effect. + Mitigation: Microcode (locked) Processor is vulnerable and mitigation is in + effect and cannot be disabled. + Unknown: Dependent on + hypervisor status Running on a virtual guest processor that is + affected but with no way to know if host + processor is mitigated or vulnerable. + ============================== ============================================= + +GDS Default mitigation +---------------------- +The updated microcode will enable the mitigation by default. The kernel's +default action is to leave the mitigation enabled. diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst index e0614760a99e..436fac0bd9c3 100644 --- a/Documentation/admin-guide/hw-vuln/index.rst +++ b/Documentation/admin-guide/hw-vuln/index.rst @@ -19,3 +19,4 @@ are configurable at compile, boot or run time. l1d_flush.rst processor_mmio_stale_data.rst cross-thread-rsb.rst + gather_data_sampling.rst diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 962180711bd8..82d11504f0bb 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1501,6 +1501,20 @@ Format: off | on default: on + gather_data_sampling= + [X86,INTEL] Control the Gather Data Sampling (GDS) + mitigation. + + Gather Data Sampling is a hardware vulnerability which + allows unprivileged speculative access to data which was + previously stored in vector registers. + + This issue is mitigated by default in updated microcode. + The mitigation may have a performance impact but can be + disabled. + + off: Disable GDS mitigation. + gcov_persist= [GCOV] When non-zero (default), profiling data for kernel modules is saved and remains accessible via debugfs, even when the module is unloaded/reloaded. @@ -3034,22 +3048,23 @@ Disable all optional CPU mitigations. This improves system performance, but it may also expose users to several CPU vulnerabilities. - Equivalent to: nopti [X86,PPC] + Equivalent to: gather_data_sampling=off [X86] kpti=0 [ARM64] - nospectre_v1 [X86,PPC] - nobp=0 [S390] - nospectre_v2 [X86,PPC,S390,ARM64] - spectre_v2_user=off [X86] - spec_store_bypass_disable=off [X86,PPC] - ssbd=force-off [ARM64] + kvm.nx_huge_pages=off [X86] l1tf=off [X86] mds=off [X86] - tsx_async_abort=off [X86] - kvm.nx_huge_pages=off [X86] + mmio_stale_data=off [X86] no_entry_flush [PPC] no_uaccess_flush [PPC] - mmio_stale_data=off [X86] + nobp=0 [S390] + nopti [X86,PPC] + nospectre_v1 [X86,PPC] + nospectre_v2 [X86,PPC,S390,ARM64] retbleed=off [X86] + spec_store_bypass_disable=off [X86,PPC] + spectre_v2_user=off [X86] + ssbd=force-off [ARM64] + tsx_async_abort=off [X86] Exceptions: This does not have any effect on diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index e31c7e75d6b0..6afec5442a95 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -453,5 +453,6 @@ #define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */ #define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */ #define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */ +#define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index e78755ed82cf..bad29c2deecf 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -156,6 +156,15 @@ * Not susceptible to Post-Barrier * Return Stack Buffer Predictions. */ +#define ARCH_CAP_GDS_CTRL BIT(25) /* + * CPU is vulnerable to Gather + * Data Sampling (GDS) and + * has controls for mitigation. + */ +#define ARCH_CAP_GDS_NO BIT(26) /* + * CPU is not vulnerable to Gather + * Data Sampling (GDS). + */ #define MSR_IA32_FLUSH_CMD 0x0000010b #define L1D_FLUSH BIT(0) /* @@ -174,6 +183,8 @@ #define RNGDS_MITG_DIS BIT(0) /* SRBDS support */ #define RTM_ALLOW BIT(1) /* TSX development mode */ #define FB_CLEAR_DIS BIT(3) /* CPU Fill buffer clear disable */ +#define GDS_MITG_DIS BIT(4) /* Disable GDS mitigation */ +#define GDS_MITG_LOCKED BIT(5) /* GDS mitigation locked */ #define MSR_IA32_SYSENTER_CS 0x00000174 #define MSR_IA32_SYSENTER_ESP 0x00000175 diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index a4a25ad949a1..b7e039a671eb 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -46,6 +46,7 @@ static void __init taa_select_mitigation(void); static void __init mmio_select_mitigation(void); static void __init srbds_select_mitigation(void); static void __init l1d_flush_select_mitigation(void); +static void __init gds_select_mitigation(void); /* The base value of the SPEC_CTRL MSR without task-specific bits set */ u64 x86_spec_ctrl_base; @@ -159,6 +160,7 @@ void __init cpu_select_mitigations(void) md_clear_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); + gds_select_mitigation(); } /* @@ -644,6 +646,120 @@ static int __init l1d_flush_parse_cmdline(char *str) } early_param("l1d_flush", l1d_flush_parse_cmdline); +#undef pr_fmt +#define pr_fmt(fmt) "GDS: " fmt + +enum gds_mitigations { + GDS_MITIGATION_OFF, + GDS_MITIGATION_UCODE_NEEDED, + GDS_MITIGATION_FULL, + GDS_MITIGATION_FULL_LOCKED, + GDS_MITIGATION_HYPERVISOR, +}; + +static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL; + +static const char * const gds_strings[] = { + [GDS_MITIGATION_OFF] = "Vulnerable", + [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", + [GDS_MITIGATION_FULL] = "Mitigation: Microcode", + [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", + [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", +}; + +void update_gds_msr(void) +{ + u64 mcu_ctrl_after; + u64 mcu_ctrl; + + switch (gds_mitigation) { + case GDS_MITIGATION_OFF: + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + mcu_ctrl |= GDS_MITG_DIS; + break; + case GDS_MITIGATION_FULL_LOCKED: + /* + * The LOCKED state comes from the boot CPU. APs might not have + * the same state. Make sure the mitigation is enabled on all + * CPUs. + */ + case GDS_MITIGATION_FULL: + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + mcu_ctrl &= ~GDS_MITG_DIS; + break; + case GDS_MITIGATION_UCODE_NEEDED: + case GDS_MITIGATION_HYPERVISOR: + return; + }; + + wrmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + + /* + * Check to make sure that the WRMSR value was not ignored. Writes to + * GDS_MITG_DIS will be ignored if this processor is locked but the boot + * processor was not. + */ + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl_after); + WARN_ON_ONCE(mcu_ctrl != mcu_ctrl_after); +} + +static void __init gds_select_mitigation(void) +{ + u64 mcu_ctrl; + + if (!boot_cpu_has_bug(X86_BUG_GDS)) + return; + + if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) { + gds_mitigation = GDS_MITIGATION_HYPERVISOR; + goto out; + } + + if (cpu_mitigations_off()) + gds_mitigation = GDS_MITIGATION_OFF; + /* Will verify below that mitigation _can_ be disabled */ + + /* No microcode */ + if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) { + gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; + goto out; + } + + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); + if (mcu_ctrl & GDS_MITG_LOCKED) { + if (gds_mitigation == GDS_MITIGATION_OFF) + pr_warn("Mitigation locked. Disable failed.\n"); + + /* + * The mitigation is selected from the boot CPU. All other CPUs + * _should_ have the same state. If the boot CPU isn't locked + * but others are then update_gds_msr() will WARN() of the state + * mismatch. If the boot CPU is locked update_gds_msr() will + * ensure the other CPUs have the mitigation enabled. + */ + gds_mitigation = GDS_MITIGATION_FULL_LOCKED; + } + + update_gds_msr(); +out: + pr_info("%s\n", gds_strings[gds_mitigation]); +} + +static int __init gds_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!boot_cpu_has_bug(X86_BUG_GDS)) + return 0; + + if (!strcmp(str, "off")) + gds_mitigation = GDS_MITIGATION_OFF; + + return 0; +} +early_param("gather_data_sampling", gds_parse_cmdline); + #undef pr_fmt #define pr_fmt(fmt) "Spectre V1 : " fmt @@ -2356,6 +2472,11 @@ static ssize_t retbleed_show_state(char *buf) return sprintf(buf, "%s\n", retbleed_strings[retbleed_mitigation]); } +static ssize_t gds_show_state(char *buf) +{ + return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); +} + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { @@ -2405,6 +2526,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_RETBLEED: return retbleed_show_state(buf); + case X86_BUG_GDS: + return gds_show_state(buf); + default: break; } @@ -2469,4 +2593,9 @@ ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, cha { return cpu_show_common(dev, attr, buf, X86_BUG_RETBLEED); } + +ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_GDS); +} #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 78e9f2e422f0..96be8c03fd22 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1131,6 +1131,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { #define RETBLEED BIT(3) /* CPU is affected by SMT (cross-thread) return predictions */ #define SMT_RSB BIT(4) +/* CPU is affected by GDS */ +#define GDS BIT(5) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), @@ -1143,19 +1145,21 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS), VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), - VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), - VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), - VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, SRBDS | MMIO | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED), - VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), - VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO), - VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO), - VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), + VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS), + VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS), + VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED), - VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), + VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS), + VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS), + VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS), VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED), - VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED), + VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS), VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS), VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO), VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS), @@ -1284,6 +1288,16 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) if (cpu_matches(cpu_vuln_blacklist, SMT_RSB)) setup_force_cpu_bug(X86_BUG_SMT_RSB); + /* + * Check if CPU is vulnerable to GDS. If running in a virtual machine on + * an affected processor, the VMM may have disabled the use of GATHER by + * disabling AVX2. The only way to do this in HW is to clear XCR0[2], + * which means that AVX will be disabled. + */ + if (cpu_matches(cpu_vuln_blacklist, GDS) && !(ia32_cap & ARCH_CAP_GDS_NO) && + boot_cpu_has(X86_FEATURE_AVX)) + setup_force_cpu_bug(X86_BUG_GDS); + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; @@ -1802,6 +1816,8 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c) validate_apic_and_package_id(c); x86_spec_ctrl_setup_ap(); update_srbds_msr(); + if (boot_cpu_has_bug(X86_BUG_GDS)) + update_gds_msr(); tsx_ap_init(); } diff --git a/arch/x86/kernel/cpu/cpu.h b/arch/x86/kernel/cpu/cpu.h index 61dbb9b216e6..d9aeb335002d 100644 --- a/arch/x86/kernel/cpu/cpu.h +++ b/arch/x86/kernel/cpu/cpu.h @@ -83,6 +83,7 @@ void cpu_select_mitigations(void); extern void x86_spec_ctrl_setup_ap(void); extern void update_srbds_msr(void); +extern void update_gds_msr(void); extern u64 x86_read_arch_cap_msr(void); diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 450dca235a2f..2d376438ffad 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -577,6 +577,12 @@ ssize_t __weak cpu_show_retbleed(struct device *dev, return sysfs_emit(buf, "Not affected\n"); } +ssize_t __weak cpu_show_gds(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -588,6 +594,7 @@ static DEVICE_ATTR(itlb_multihit, 0444, cpu_show_itlb_multihit, NULL); static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); +static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -601,6 +608,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_srbds.attr, &dev_attr_mmio_stale_data.attr, &dev_attr_retbleed.attr, + &dev_attr_gather_data_sampling.attr, NULL }; From 0cc5643b63aef90165488dabaeff92697925baa6 Mon Sep 17 00:00:00 2001 From: Daniel Sneddon Date: Tue, 1 Aug 2023 18:58:31 +0200 Subject: [PATCH 172/513] x86/speculation: Add force option to GDS mitigation commit 553a5c03e90a6087e88f8ff878335ef0621536fb upstream The Gather Data Sampling (GDS) vulnerability allows malicious software to infer stale data previously stored in vector registers. This may include sensitive data such as cryptographic keys. GDS is mitigated in microcode, and systems with up-to-date microcode are protected by default. However, any affected system that is running with older microcode will still be vulnerable to GDS attacks. Since the gather instructions used by the attacker are part of the AVX2 and AVX512 extensions, disabling these extensions prevents gather instructions from being executed, thereby mitigating the system from GDS. Disabling AVX2 is sufficient, but we don't have the granularity to do this. The XCR0[2] disables AVX, with no option to just disable AVX2. Add a kernel parameter gather_data_sampling=force that will enable the microcode mitigation if available, otherwise it will disable AVX on affected systems. This option will be ignored if cmdline mitigations=off. This is a *big* hammer. It is known to break buggy userspace that uses incomplete, buggy AVX enumeration. Unfortunately, such userspace does exist in the wild: https://www.mail-archive.com/bug-coreutils@gnu.org/msg33046.html [ dhansen: add some more ominous warnings about disabling AVX ] Signed-off-by: Daniel Sneddon Signed-off-by: Dave Hansen Acked-by: Josh Poimboeuf Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- .../hw-vuln/gather_data_sampling.rst | 18 +++++++++++++---- .../admin-guide/kernel-parameters.txt | 8 +++++++- arch/x86/kernel/cpu/bugs.c | 20 ++++++++++++++++++- 3 files changed, 40 insertions(+), 6 deletions(-) diff --git a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst index 74dab6af7fe1..40b7a6260010 100644 --- a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst +++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst @@ -60,14 +60,21 @@ bits: ================================ === ============================ GDS can also be mitigated on systems that don't have updated microcode by -disabling AVX. This can be done by setting "clearcpuid=avx" on the kernel -command-line. +disabling AVX. This can be done by setting gather_data_sampling="force" or +"clearcpuid=avx" on the kernel command-line. + +If used, these options will disable AVX use by turning on XSAVE YMM support. +However, the processor will still enumerate AVX support. Userspace that +does not follow proper AVX enumeration to check both AVX *and* XSAVE YMM +support will break. Mitigation control on the kernel command line --------------------------------------------- The mitigation can be disabled by setting "gather_data_sampling=off" or -"mitigations=off" on the kernel command line. Not specifying either will -default to the mitigation being enabled. +"mitigations=off" on the kernel command line. Not specifying either will default +to the mitigation being enabled. Specifying "gather_data_sampling=force" will +use the microcode mitigation when available or disable AVX on affected systems +where the microcode hasn't been updated to include the mitigation. GDS System Information ------------------------ @@ -83,6 +90,9 @@ The possible values contained in this file are: Vulnerable Processor vulnerable and mitigation disabled. Vulnerable: No microcode Processor vulnerable and microcode is missing mitigation. + Mitigation: AVX disabled, + no microcode Processor is vulnerable and microcode is missing + mitigation. AVX disabled as mitigation. Mitigation: Microcode Processor is vulnerable and mitigation is in effect. Mitigation: Microcode (locked) Processor is vulnerable and mitigation is in diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 82d11504f0bb..495cd57138ab 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -1511,7 +1511,13 @@ This issue is mitigated by default in updated microcode. The mitigation may have a performance impact but can be - disabled. + disabled. On systems without the microcode mitigation + disabling AVX serves as a mitigation. + + force: Disable AVX to mitigate systems without + microcode mitigation. No effect if the microcode + mitigation is present. Known to cause crashes in + userspace with buggy AVX enumeration. off: Disable GDS mitigation. diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index b7e039a671eb..9b047051fb11 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -652,6 +652,7 @@ early_param("l1d_flush", l1d_flush_parse_cmdline); enum gds_mitigations { GDS_MITIGATION_OFF, GDS_MITIGATION_UCODE_NEEDED, + GDS_MITIGATION_FORCE, GDS_MITIGATION_FULL, GDS_MITIGATION_FULL_LOCKED, GDS_MITIGATION_HYPERVISOR, @@ -662,6 +663,7 @@ static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL static const char * const gds_strings[] = { [GDS_MITIGATION_OFF] = "Vulnerable", [GDS_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode", + [GDS_MITIGATION_FORCE] = "Mitigation: AVX disabled, no microcode", [GDS_MITIGATION_FULL] = "Mitigation: Microcode", [GDS_MITIGATION_FULL_LOCKED] = "Mitigation: Microcode (locked)", [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", @@ -687,6 +689,7 @@ void update_gds_msr(void) rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); mcu_ctrl &= ~GDS_MITG_DIS; break; + case GDS_MITIGATION_FORCE: case GDS_MITIGATION_UCODE_NEEDED: case GDS_MITIGATION_HYPERVISOR: return; @@ -721,10 +724,23 @@ static void __init gds_select_mitigation(void) /* No microcode */ if (!(x86_read_arch_cap_msr() & ARCH_CAP_GDS_CTRL)) { - gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; + if (gds_mitigation == GDS_MITIGATION_FORCE) { + /* + * This only needs to be done on the boot CPU so do it + * here rather than in update_gds_msr() + */ + setup_clear_cpu_cap(X86_FEATURE_AVX); + pr_warn("Microcode update needed! Disabling AVX as mitigation.\n"); + } else { + gds_mitigation = GDS_MITIGATION_UCODE_NEEDED; + } goto out; } + /* Microcode has mitigation, use it */ + if (gds_mitigation == GDS_MITIGATION_FORCE) + gds_mitigation = GDS_MITIGATION_FULL; + rdmsrl(MSR_IA32_MCU_OPT_CTRL, mcu_ctrl); if (mcu_ctrl & GDS_MITG_LOCKED) { if (gds_mitigation == GDS_MITIGATION_OFF) @@ -755,6 +771,8 @@ static int __init gds_parse_cmdline(char *str) if (!strcmp(str, "off")) gds_mitigation = GDS_MITIGATION_OFF; + else if (!strcmp(str, "force")) + gds_mitigation = GDS_MITIGATION_FORCE; return 0; } From 59d78655f808eb82a103f0fb79df9aa20ee2c7c1 Mon Sep 17 00:00:00 2001 From: Daniel Sneddon Date: Tue, 1 Aug 2023 18:58:31 +0200 Subject: [PATCH 173/513] x86/speculation: Add Kconfig option for GDS commit 53cf5797f114ba2bd86d23a862302119848eff19 upstream Gather Data Sampling (GDS) is mitigated in microcode. However, on systems that haven't received the updated microcode, disabling AVX can act as a mitigation. Add a Kconfig option that uses the microcode mitigation if available and disables AVX otherwise. Setting this option has no effect on systems not affected by GDS. This is the equivalent of setting gather_data_sampling=force. Signed-off-by: Daniel Sneddon Signed-off-by: Dave Hansen Acked-by: Josh Poimboeuf Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/x86/Kconfig | 19 +++++++++++++++++++ arch/x86/kernel/cpu/bugs.c | 4 ++++ 2 files changed, 23 insertions(+) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index e9e1ccaf4ff2..c62c2b9c3773 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2466,6 +2466,25 @@ config SLS against straight line speculation. The kernel image might be slightly larger. +config GDS_FORCE_MITIGATION + bool "Force GDS Mitigation" + depends on CPU_SUP_INTEL + default n + help + Gather Data Sampling (GDS) is a hardware vulnerability which allows + unprivileged speculative access to data which was previously stored in + vector registers. + + This option is equivalent to setting gather_data_sampling=force on the + command line. The microcode mitigation is used if present, otherwise + AVX is disabled as a mitigation. On affected systems that are missing + the microcode any userspace code that unconditionally uses AVX will + break with this option set. + + Setting this option on systems not vulnerable to GDS has no effect. + + If in doubt, say N. + endif config ARCH_HAS_ADD_PAGES diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 9b047051fb11..f9a1293e9920 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -658,7 +658,11 @@ enum gds_mitigations { GDS_MITIGATION_HYPERVISOR, }; +#if IS_ENABLED(CONFIG_GDS_FORCE_MITIGATION) +static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE; +#else static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL; +#endif static const char * const gds_strings[] = { [GDS_MITIGATION_OFF] = "Vulnerable", From 348741a9e4d37d83024b45acce9b65233127f8b2 Mon Sep 17 00:00:00 2001 From: Daniel Sneddon Date: Tue, 1 Aug 2023 18:58:31 +0200 Subject: [PATCH 174/513] KVM: Add GDS_NO support to KVM commit 81ac7e5d741742d650b4ed6186c4826c1a0631a7 upstream Gather Data Sampling (GDS) is a transient execution attack using gather instructions from the AVX2 and AVX512 extensions. This attack allows malicious code to infer data that was previously stored in vector registers. Systems that are not vulnerable to GDS will set the GDS_NO bit of the IA32_ARCH_CAPABILITIES MSR. This is useful for VM guests that may think they are on vulnerable systems that are, in fact, not affected. Guests that are running on affected hosts where the mitigation is enabled are protected as if they were running on an unaffected system. On all hosts that are not affected or that are mitigated, set the GDS_NO bit. Signed-off-by: Daniel Sneddon Signed-off-by: Dave Hansen Acked-by: Josh Poimboeuf Signed-off-by: Daniel Sneddon Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/bugs.c | 7 +++++++ arch/x86/kvm/x86.c | 7 ++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index f9a1293e9920..322d1a421d8c 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -673,6 +673,13 @@ static const char * const gds_strings[] = { [GDS_MITIGATION_HYPERVISOR] = "Unknown: Dependent on hypervisor status", }; +bool gds_ucode_mitigated(void) +{ + return (gds_mitigation == GDS_MITIGATION_FULL || + gds_mitigation == GDS_MITIGATION_FULL_LOCKED); +} +EXPORT_SYMBOL_GPL(gds_ucode_mitigated); + void update_gds_msr(void) { u64 mcu_ctrl_after; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 285ba12be8ce..2686c4dcdb1a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -303,6 +303,8 @@ static struct kmem_cache *x86_fpu_cache; static struct kmem_cache *x86_emulator_cache; +extern bool gds_ucode_mitigated(void); + /* * When called, it means the previous get/set msr reached an invalid msr. * Return true if we want to ignore/silent this failed msr access. @@ -1498,7 +1500,7 @@ static unsigned int num_msr_based_features; ARCH_CAP_SKIP_VMENTRY_L1DFLUSH | ARCH_CAP_SSB_NO | ARCH_CAP_MDS_NO | \ ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \ ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \ - ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO) + ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO) static u64 kvm_get_arch_capabilities(void) { @@ -1555,6 +1557,9 @@ static u64 kvm_get_arch_capabilities(void) */ } + if (!boot_cpu_has_bug(X86_BUG_GDS) || gds_ucode_mitigated()) + data |= ARCH_CAP_GDS_NO; + return data; } From 3e90080d56652be6cc9e5355ea65f569bf005fba Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 3 Jul 2023 15:00:32 +0200 Subject: [PATCH 175/513] x86/xen: Fix secondary processors' FPU initialization commit fe3e0a13e597c1c8617814bf9b42ab732db5c26e upstream. Moving the call of fpu__init_cpu() from cpu_init() to start_secondary() broke Xen PV guests, as those don't call start_secondary() for APs. Call fpu__init_cpu() in Xen's cpu_bringup(), which is the Xen PV replacement of start_secondary(). Fixes: b81fac906a8f ("x86/fpu: Move FPU initialization into arch_cpu_finalize_init()") Signed-off-by: Juergen Gross Signed-off-by: Borislav Petkov (AMD) Reviewed-by: Boris Ostrovsky Acked-by: Thomas Gleixner Link: https://lore.kernel.org/r/20230703130032.22916-1-jgross@suse.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/xen/smp_pv.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c index b47b5111397a..a1f974309b1c 100644 --- a/arch/x86/xen/smp_pv.c +++ b/arch/x86/xen/smp_pv.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -63,6 +64,7 @@ static void cpu_bringup(void) cr4_init(); cpu_init(); + fpu__init_cpu(); touch_softlockup_watchdog(); /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */ From 13ec5cb4c113d9159b5138d45f588f40ba66ca56 Mon Sep 17 00:00:00 2001 From: Juergen Gross Date: Mon, 9 Jan 2023 16:09:22 +0100 Subject: [PATCH 176/513] x86/mm: fix poking_init() for Xen PV guests commit 26ce6ec364f18d2915923bc05784084e54a5c4cc upstream. Commit 3f4c8211d982 ("x86/mm: Use mm_alloc() in poking_init()") broke the kernel for running as Xen PV guest. It seems as if the new address space is never activated before being used, resulting in Xen rejecting to accept the new CR3 value (the PGD isn't pinned). Fix that by adding the now missing call of paravirt_arch_dup_mmap() to poking_init(). That call was previously done by dup_mm()->dup_mmap() and it is a NOP for all cases but for Xen PV, where it is just doing the pinning of the PGD. Fixes: 3f4c8211d982 ("x86/mm: Use mm_alloc() in poking_init()") Signed-off-by: Juergen Gross Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20230109150922.10578-1-jgross@suse.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/init.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 34a08f6a528e..5bb3cc66cccc 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -27,6 +27,7 @@ #include #include #include +#include /* * We need to define the tracepoints somewhere, and tlb.c @@ -825,6 +826,9 @@ void __init poking_init(void) poking_mm = copy_init_mm(); BUG_ON(!poking_mm); + /* Xen PV guests need the PGD to be pinned. */ + paravirt_arch_dup_mmap(NULL, poking_mm); + /* * Randomize the poking address, but make sure that the following page * will be mapped at the same PMD. We need 2 pages, so find space for 3, From 8e4c2530879d7ac1eecc1815b1e34e597c7d4f22 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 25 Oct 2022 21:38:21 +0200 Subject: [PATCH 177/513] x86/mm: Use mm_alloc() in poking_init() commit 3f4c8211d982099be693be9aa7d6fc4607dff290 upstream. Instead of duplicating init_mm, allocate a fresh mm. The advantage is that mm_alloc() has much simpler dependencies. Additionally it makes more conceptual sense, init_mm has no (and must not have) user state to duplicate. Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20221025201057.816175235@infradead.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/mm/init.c | 2 +- include/linux/sched/task.h | 1 - kernel/fork.c | 5 ----- 3 files changed, 1 insertion(+), 7 deletions(-) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 5bb3cc66cccc..56d5ab70bfa1 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -823,7 +823,7 @@ void __init poking_init(void) spinlock_t *ptl; pte_t *ptep; - poking_mm = copy_init_mm(); + poking_mm = mm_alloc(); BUG_ON(!poking_mm); /* Xen PV guests need the PGD to be pinned. */ diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index d351f1b362ef..a702d4d1a495 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -87,7 +87,6 @@ extern void exit_itimers(struct task_struct *); extern pid_t kernel_clone(struct kernel_clone_args *kargs); struct task_struct *create_io_thread(int (*fn)(void *), void *arg, int node); struct task_struct *fork_idle(int); -struct mm_struct *copy_init_mm(void); extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags); extern long kernel_wait4(pid_t, int __user *, int, struct rusage *); int kernel_wait(pid_t pid, int *stat); diff --git a/kernel/fork.c b/kernel/fork.c index 1906230a000e..0835c3e89311 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2522,11 +2522,6 @@ struct task_struct * __init fork_idle(int cpu) return task; } -struct mm_struct *copy_init_mm(void) -{ - return dup_mm(NULL, &init_mm); -} - /* * This is like kernel_clone(), but shaved down and tailored to just * creating io_uring workers. It returns a created task, or an error pointer. From d0317b9502ea8a8f66cd568e90f19b595b5d4a07 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 25 Oct 2022 21:38:18 +0200 Subject: [PATCH 178/513] mm: Move mm_cachep initialization to mm_init() commit af80602799681c78f14fbe20b6185a56020dedee upstream. In order to allow using mm_alloc() much earlier, move initializing mm_cachep into mm_init(). Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20221025201057.751153381@infradead.org Signed-off-by: Greg Kroah-Hartman --- include/linux/sched/task.h | 1 + init/main.c | 1 + kernel/fork.c | 32 ++++++++++++++++++-------------- 3 files changed, 20 insertions(+), 14 deletions(-) diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h index a702d4d1a495..d23977e9035d 100644 --- a/include/linux/sched/task.h +++ b/include/linux/sched/task.h @@ -61,6 +61,7 @@ extern void sched_dead(struct task_struct *p); void __noreturn do_task_dead(void); void __noreturn make_task_dead(int signr); +extern void mm_cache_init(void); extern void proc_caches_init(void); extern void fork_init(void); diff --git a/init/main.c b/init/main.c index 1efd414cefeb..45b3a2beb383 100644 --- a/init/main.c +++ b/init/main.c @@ -855,6 +855,7 @@ static void __init mm_init(void) init_espfix_bsp(); /* Should be run after espfix64 is set up. */ pti_init(); + mm_cache_init(); } #ifdef CONFIG_HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET diff --git a/kernel/fork.c b/kernel/fork.c index 0835c3e89311..ace0717c71e2 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -2916,10 +2916,27 @@ static void sighand_ctor(void *data) init_waitqueue_head(&sighand->signalfd_wqh); } -void __init proc_caches_init(void) +void __init mm_cache_init(void) { unsigned int mm_size; + /* + * The mm_cpumask is located at the end of mm_struct, and is + * dynamically sized based on the maximum CPU number this system + * can have, taking hotplug into account (nr_cpu_ids). + */ + mm_size = sizeof(struct mm_struct) + cpumask_size(); + + mm_cachep = kmem_cache_create_usercopy("mm_struct", + mm_size, ARCH_MIN_MMSTRUCT_ALIGN, + SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, + offsetof(struct mm_struct, saved_auxv), + sizeof_field(struct mm_struct, saved_auxv), + NULL); +} + +void __init proc_caches_init(void) +{ sighand_cachep = kmem_cache_create("sighand_cache", sizeof(struct sighand_struct), 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_TYPESAFE_BY_RCU| @@ -2937,19 +2954,6 @@ void __init proc_caches_init(void) SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, NULL); - /* - * The mm_cpumask is located at the end of mm_struct, and is - * dynamically sized based on the maximum CPU number this system - * can have, taking hotplug into account (nr_cpu_ids). - */ - mm_size = sizeof(struct mm_struct) + cpumask_size(); - - mm_cachep = kmem_cache_create_usercopy("mm_struct", - mm_size, ARCH_MIN_MMSTRUCT_ALIGN, - SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_ACCOUNT, - offsetof(struct mm_struct, saved_auxv), - sizeof_field(struct mm_struct, saved_auxv), - NULL); vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC|SLAB_ACCOUNT); mmap_init(); nsproxy_cache_init(); From 27a72e350869d5b06cc44d882cd34fb0f88f3f8b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 25 Oct 2022 21:38:25 +0200 Subject: [PATCH 179/513] x86/mm: Initialize text poking earlier commit 5b93a83649c7cba3a15eb7e8959b250841acb1b1 upstream. Move poking_init() up a bunch; specifically move it right after mm_init() which is right before ftrace_init(). This will allow simplifying ftrace text poking which currently has a bunch of exceptions for early boot. Signed-off-by: Peter Zijlstra (Intel) Link: https://lkml.kernel.org/r/20221025201057.881703081@infradead.org Signed-off-by: Greg Kroah-Hartman --- init/main.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/init/main.c b/init/main.c index 45b3a2beb383..63737af8de51 100644 --- a/init/main.c +++ b/init/main.c @@ -988,7 +988,7 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) sort_main_extable(); trap_init(); mm_init(); - + poking_init(); ftrace_init(); /* trace_printk can be enabled here */ @@ -1125,8 +1125,6 @@ asmlinkage __visible void __init __no_sanitize_address start_kernel(void) taskstats_init_early(); delayacct_init(); - poking_init(); - acpi_subsystem_init(); arch_post_acpi_subsys_init(); kcsan_init(); From 0242a8bdef560523b5d81f5979854266092ab0a6 Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Tue, 1 Aug 2023 07:31:07 -0700 Subject: [PATCH 180/513] Documentation/x86: Fix backwards on/off logic about YMM support commit 1b0fc0345f2852ffe54fb9ae0e12e2ee69ad6a20 upstream These options clearly turn *off* XSAVE YMM support. Correct the typo. Reported-by: Ben Hutchings Fixes: 553a5c03e90a ("x86/speculation: Add force option to GDS mitigation") Signed-off-by: Dave Hansen Signed-off-by: Greg Kroah-Hartman --- Documentation/admin-guide/hw-vuln/gather_data_sampling.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst index 40b7a6260010..264bfa937f7d 100644 --- a/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst +++ b/Documentation/admin-guide/hw-vuln/gather_data_sampling.rst @@ -63,7 +63,7 @@ GDS can also be mitigated on systems that don't have updated microcode by disabling AVX. This can be done by setting gather_data_sampling="force" or "clearcpuid=avx" on the kernel command-line. -If used, these options will disable AVX use by turning on XSAVE YMM support. +If used, these options will disable AVX use by turning off XSAVE YMM support. However, the processor will still enumerate AVX support. Userspace that does not follow proper AVX enumeration to check both AVX *and* XSAVE YMM support will break. From 236dd7133394bfe30275191e3aefcc6b3b09962b Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Sat, 8 Jul 2023 10:21:35 +0200 Subject: [PATCH 181/513] x86/bugs: Increase the x86 bugs vector size to two u32s Upstream commit: 0e52740ffd10c6c316837c6c128f460f1aaba1ea There was never a doubt in my mind that they would not fit into a single u32 eventually. Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 2 +- tools/arch/x86/include/asm/cpufeatures.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 6afec5442a95..1dd57d0151f7 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -14,7 +14,7 @@ * Defines x86 CPU feature bits */ #define NCAPINTS 20 /* N 32-bit words worth of info */ -#define NBUGINTS 1 /* N 32-bit bug flags */ +#define NBUGINTS 2 /* N 32-bit bug flags */ /* * Note: If the comment begins with a quoted string, that string is used diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index bcaedfe60572..861451839cf2 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -14,7 +14,7 @@ * Defines x86 CPU feature bits */ #define NCAPINTS 20 /* N 32-bit words worth of info */ -#define NBUGINTS 1 /* N 32-bit bug flags */ +#define NBUGINTS 2 /* N 32-bit bug flags */ /* * Note: If the comment begins with a quoted string, that string is used From c3b4c644525e1ffa291794dce5567ae2ca9c9d37 Mon Sep 17 00:00:00 2001 From: Kim Phillips Date: Tue, 10 Jan 2023 16:46:37 -0600 Subject: [PATCH 182/513] x86/cpu, kvm: Add support for CPUID_80000021_EAX commit 8415a74852d7c24795007ee9862d25feb519007c upstream. Add support for CPUID leaf 80000021, EAX. The majority of the features will be used in the kernel and thus a separate leaf is appropriate. Include KVM's reverse_cpuid entry because features are used by VM guests, too. [ bp: Massage commit message. ] Signed-off-by: Kim Phillips Signed-off-by: Borislav Petkov (AMD) Acked-by: Sean Christopherson Link: https://lore.kernel.org/r/20230124163319.2277355-2-kim.phillips@amd.com [bwh: Backported to 6.1: adjust context] Signed-off-by: Ben Hutchings Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeature.h | 7 +++++-- arch/x86/include/asm/cpufeatures.h | 2 +- arch/x86/include/asm/disabled-features.h | 3 ++- arch/x86/include/asm/required-features.h | 3 ++- arch/x86/kernel/cpu/common.c | 3 +++ arch/x86/kvm/reverse_cpuid.h | 1 + 6 files changed, 14 insertions(+), 5 deletions(-) diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h index b0f206681fde..cc3f62f5d551 100644 --- a/arch/x86/include/asm/cpufeature.h +++ b/arch/x86/include/asm/cpufeature.h @@ -32,6 +32,7 @@ enum cpuid_leafs CPUID_8000_0007_EBX, CPUID_7_EDX, CPUID_8000_001F_EAX, + CPUID_8000_0021_EAX, }; #ifdef CONFIG_X86_FEATURE_NAMES @@ -91,8 +92,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 17, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 18, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 19, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(REQUIRED_MASK, 20, feature_bit) || \ REQUIRED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 20)) + BUILD_BUG_ON_ZERO(NCAPINTS != 21)) #define DISABLED_MASK_BIT_SET(feature_bit) \ ( CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 0, feature_bit) || \ @@ -115,8 +117,9 @@ extern const char * const x86_bug_flags[NBUGINTS*32]; CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 17, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 18, feature_bit) || \ CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 19, feature_bit) || \ + CHECK_BIT_IN_MASK_WORD(DISABLED_MASK, 20, feature_bit) || \ DISABLED_MASK_CHECK || \ - BUILD_BUG_ON_ZERO(NCAPINTS != 20)) + BUILD_BUG_ON_ZERO(NCAPINTS != 21)) #define cpu_has(c, bit) \ (__builtin_constant_p(bit) && REQUIRED_MASK_BIT_SET(bit) ? 1 : \ diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 1dd57d0151f7..0a23f7d4aff7 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -13,7 +13,7 @@ /* * Defines x86 CPU feature bits */ -#define NCAPINTS 20 /* N 32-bit words worth of info */ +#define NCAPINTS 21 /* N 32-bit words worth of info */ #define NBUGINTS 2 /* N 32-bit bug flags */ /* diff --git a/arch/x86/include/asm/disabled-features.h b/arch/x86/include/asm/disabled-features.h index 834a3b6d81e1..99a12012c66e 100644 --- a/arch/x86/include/asm/disabled-features.h +++ b/arch/x86/include/asm/disabled-features.h @@ -108,6 +108,7 @@ #define DISABLED_MASK17 0 #define DISABLED_MASK18 0 #define DISABLED_MASK19 0 -#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20) +#define DISABLED_MASK20 0 +#define DISABLED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) #endif /* _ASM_X86_DISABLED_FEATURES_H */ diff --git a/arch/x86/include/asm/required-features.h b/arch/x86/include/asm/required-features.h index b2d504f11937..9bf60a8b9e9c 100644 --- a/arch/x86/include/asm/required-features.h +++ b/arch/x86/include/asm/required-features.h @@ -102,6 +102,7 @@ #define REQUIRED_MASK17 0 #define REQUIRED_MASK18 0 #define REQUIRED_MASK19 0 -#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 20) +#define REQUIRED_MASK20 0 +#define REQUIRED_MASK_CHECK BUILD_BUG_ON_ZERO(NCAPINTS != 21) #endif /* _ASM_X86_REQUIRED_FEATURES_H */ diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 96be8c03fd22..6555c0c83e79 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -968,6 +968,9 @@ void get_cpu_cap(struct cpuinfo_x86 *c) if (c->extended_cpuid_level >= 0x8000001f) c->x86_capability[CPUID_8000_001F_EAX] = cpuid_eax(0x8000001f); + if (c->extended_cpuid_level >= 0x80000021) + c->x86_capability[CPUID_8000_0021_EAX] = cpuid_eax(0x80000021); + init_scattered_cpuid_features(c); init_speculation_control(c); diff --git a/arch/x86/kvm/reverse_cpuid.h b/arch/x86/kvm/reverse_cpuid.h index a19d473d0184..7eeade35a425 100644 --- a/arch/x86/kvm/reverse_cpuid.h +++ b/arch/x86/kvm/reverse_cpuid.h @@ -48,6 +48,7 @@ static const struct cpuid_reg reverse_cpuid[] = { [CPUID_7_1_EAX] = { 7, 1, CPUID_EAX}, [CPUID_12_EAX] = {0x00000012, 0, CPUID_EAX}, [CPUID_8000_001F_EAX] = {0x8000001f, 0, CPUID_EAX}, + [CPUID_8000_0021_EAX] = {0x80000021, 0, CPUID_EAX}, }; /* From b35087763a44d1eb45857f799579a351332be505 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Wed, 28 Jun 2023 11:02:39 +0200 Subject: [PATCH 183/513] x86/srso: Add a Speculative RAS Overflow mitigation Upstream commit: fb3bd914b3ec28f5fb697ac55c4846ac2d542855 Add a mitigation for the speculative return address stack overflow vulnerability found on AMD processors. The mitigation works by ensuring all RET instructions speculate to a controlled location, similar to how speculation is controlled in the retpoline sequence. To accomplish this, the __x86_return_thunk forces the CPU to mispredict every function return using a 'safe return' sequence. To ensure the safety of this mitigation, the kernel must ensure that the safe return sequence is itself free from attacker interference. In Zen3 and Zen4, this is accomplished by creating a BTB alias between the untraining function srso_untrain_ret_alias() and the safe return function srso_safe_ret_alias() which results in evicting a potentially poisoned BTB entry and using that safe one for all function returns. In older Zen1 and Zen2, this is accomplished using a reinterpretation technique similar to Retbleed one: srso_untrain_ret() and srso_safe_ret(). Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Greg Kroah-Hartman --- Documentation/admin-guide/hw-vuln/index.rst | 1 + Documentation/admin-guide/hw-vuln/srso.rst | 133 ++++++++++++++++++ .../admin-guide/kernel-parameters.txt | 11 ++ arch/x86/Kconfig | 7 + arch/x86/include/asm/cpufeatures.h | 5 + arch/x86/include/asm/nospec-branch.h | 9 +- arch/x86/include/asm/processor.h | 2 + arch/x86/kernel/alternative.c | 4 +- arch/x86/kernel/cpu/amd.c | 14 ++ arch/x86/kernel/cpu/bugs.c | 106 ++++++++++++++ arch/x86/kernel/cpu/common.c | 8 +- arch/x86/kernel/vmlinux.lds.S | 32 ++++- arch/x86/lib/retpoline.S | 81 ++++++++++- drivers/base/cpu.c | 8 ++ include/linux/cpu.h | 2 + tools/objtool/arch/x86/decode.c | 5 +- 16 files changed, 419 insertions(+), 9 deletions(-) create mode 100644 Documentation/admin-guide/hw-vuln/srso.rst diff --git a/Documentation/admin-guide/hw-vuln/index.rst b/Documentation/admin-guide/hw-vuln/index.rst index 436fac0bd9c3..6828102baaa7 100644 --- a/Documentation/admin-guide/hw-vuln/index.rst +++ b/Documentation/admin-guide/hw-vuln/index.rst @@ -20,3 +20,4 @@ are configurable at compile, boot or run time. processor_mmio_stale_data.rst cross-thread-rsb.rst gather_data_sampling.rst + srso diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst new file mode 100644 index 000000000000..2f923c805802 --- /dev/null +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -0,0 +1,133 @@ +.. SPDX-License-Identifier: GPL-2.0 + +Speculative Return Stack Overflow (SRSO) +======================================== + +This is a mitigation for the speculative return stack overflow (SRSO) +vulnerability found on AMD processors. The mechanism is by now the well +known scenario of poisoning CPU functional units - the Branch Target +Buffer (BTB) and Return Address Predictor (RAP) in this case - and then +tricking the elevated privilege domain (the kernel) into leaking +sensitive data. + +AMD CPUs predict RET instructions using a Return Address Predictor (aka +Return Address Stack/Return Stack Buffer). In some cases, a non-architectural +CALL instruction (i.e., an instruction predicted to be a CALL but is +not actually a CALL) can create an entry in the RAP which may be used +to predict the target of a subsequent RET instruction. + +The specific circumstances that lead to this varies by microarchitecture +but the concern is that an attacker can mis-train the CPU BTB to predict +non-architectural CALL instructions in kernel space and use this to +control the speculative target of a subsequent kernel RET, potentially +leading to information disclosure via a speculative side-channel. + +The issue is tracked under CVE-2023-20569. + +Affected processors +------------------- + +AMD Zen, generations 1-4. That is, all families 0x17 and 0x19. Older +processors have not been investigated. + +System information and options +------------------------------ + +First of all, it is required that the latest microcode be loaded for +mitigations to be effective. + +The sysfs file showing SRSO mitigation status is: + + /sys/devices/system/cpu/vulnerabilities/spec_rstack_overflow + +The possible values in this file are: + + - 'Not affected' The processor is not vulnerable + + - 'Vulnerable: no microcode' The processor is vulnerable, no + microcode extending IBPB functionality + to address the vulnerability has been + applied. + + - 'Mitigation: microcode' Extended IBPB functionality microcode + patch has been applied. It does not + address User->Kernel and Guest->Host + transitions protection but it does + address User->User and VM->VM attack + vectors. + + (spec_rstack_overflow=microcode) + + - 'Mitigation: safe RET' Software-only mitigation. It complements + the extended IBPB microcode patch + functionality by addressing User->Kernel + and Guest->Host transitions protection. + + Selected by default or by + spec_rstack_overflow=safe-ret + + - 'Mitigation: IBPB' Similar protection as "safe RET" above + but employs an IBPB barrier on privilege + domain crossings (User->Kernel, + Guest->Host). + + (spec_rstack_overflow=ibpb) + + - 'Mitigation: IBPB on VMEXIT' Mitigation addressing the cloud provider + scenario - the Guest->Host transitions + only. + + (spec_rstack_overflow=ibpb-vmexit) + +In order to exploit vulnerability, an attacker needs to: + + - gain local access on the machine + + - break kASLR + + - find gadgets in the running kernel in order to use them in the exploit + + - potentially create and pin an additional workload on the sibling + thread, depending on the microarchitecture (not necessary on fam 0x19) + + - run the exploit + +Considering the performance implications of each mitigation type, the +default one is 'Mitigation: safe RET' which should take care of most +attack vectors, including the local User->Kernel one. + +As always, the user is advised to keep her/his system up-to-date by +applying software updates regularly. + +The default setting will be reevaluated when needed and especially when +new attack vectors appear. + +As one can surmise, 'Mitigation: safe RET' does come at the cost of some +performance depending on the workload. If one trusts her/his userspace +and does not want to suffer the performance impact, one can always +disable the mitigation with spec_rstack_overflow=off. + +Similarly, 'Mitigation: IBPB' is another full mitigation type employing +an indrect branch prediction barrier after having applied the required +microcode patch for one's system. This mitigation comes also at +a performance cost. + +Mitigation: safe RET +-------------------- + +The mitigation works by ensuring all RET instructions speculate to +a controlled location, similar to how speculation is controlled in the +retpoline sequence. To accomplish this, the __x86_return_thunk forces +the CPU to mispredict every function return using a 'safe return' +sequence. + +To ensure the safety of this mitigation, the kernel must ensure that the +safe return sequence is itself free from attacker interference. In Zen3 +and Zen4, this is accomplished by creating a BTB alias between the +untraining function srso_untrain_ret_alias() and the safe return +function srso_safe_ret_alias() which results in evicting a potentially +poisoned BTB entry and using that safe one for all function returns. + +In older Zen1 and Zen2, this is accomplished using a reinterpretation +technique similar to Retbleed one: srso_untrain_ret() and +srso_safe_ret(). diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt index 495cd57138ab..bb78526120f3 100644 --- a/Documentation/admin-guide/kernel-parameters.txt +++ b/Documentation/admin-guide/kernel-parameters.txt @@ -5450,6 +5450,17 @@ Not specifying this option is equivalent to spectre_v2_user=auto. + spec_rstack_overflow= + [X86] Control RAS overflow mitigation on AMD Zen CPUs + + off - Disable mitigation + microcode - Enable microcode mitigation only + safe-ret - Enable sw-only safe RET mitigation (default) + ibpb - Enable mitigation by issuing IBPB on + kernel entry + ibpb-vmexit - Issue IBPB only on VMEXIT + (cloud-specific mitigation) + spec_store_bypass_disable= [HW] Control Speculative Store Bypass (SSB) Disable mitigation (Speculative Store Bypass vulnerability) diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index c62c2b9c3773..cfb1edd25437 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -2457,6 +2457,13 @@ config CPU_IBRS_ENTRY This mitigates both spectre_v2 and retbleed at great cost to performance. +config CPU_SRSO + bool "Mitigate speculative RAS overflow on AMD" + depends on CPU_SUP_AMD && X86_64 && RETHUNK + default y + help + Enable the SRSO mitigation needed on AMD Zen1-4 machines. + config SLS bool "Mitigate Straight-Line-Speculation" depends on CC_HAS_SLS && X86_64 diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 0a23f7d4aff7..737332740dba 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -306,6 +306,9 @@ #define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */ +#define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */ +#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */ + /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ @@ -455,4 +458,6 @@ #define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */ #define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */ +/* BUG word 2 */ +#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 06c9f0eaa9ed..2dd7ad162e6f 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -112,7 +112,7 @@ * eventually turn into it's own annotation. */ .macro ANNOTATE_UNRET_END -#ifdef CONFIG_DEBUG_ENTRY +#if (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO)) ANNOTATE_RETPOLINE_SAFE nop #endif @@ -179,6 +179,11 @@ CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB #endif + +#ifdef CONFIG_CPU_SRSO + ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ + "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS +#endif .endm #else /* __ASSEMBLY__ */ @@ -191,6 +196,8 @@ extern void __x86_return_thunk(void); extern void zen_untrain_ret(void); +extern void srso_untrain_ret(void); +extern void srso_untrain_ret_alias(void); extern void entry_ibpb(void); #ifdef CONFIG_RETPOLINE diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 3e3bd5b7d5db..747ccc2ae383 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -803,9 +803,11 @@ extern u16 get_llc_id(unsigned int cpu); #ifdef CONFIG_CPU_SUP_AMD extern u32 amd_get_nodes_per_socket(void); extern u32 amd_get_highest_perf(void); +extern bool cpu_has_ibpb_brtype_microcode(void); #else static inline u32 amd_get_nodes_per_socket(void) { return 0; } static inline u32 amd_get_highest_perf(void) { return 0; } +static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; } #endif static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 43dd7f281a21..2c7d97c45796 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -523,7 +523,9 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes) { int i = 0; - if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) + if (cpu_feature_enabled(X86_FEATURE_RETHUNK) || + cpu_feature_enabled(X86_FEATURE_SRSO) || + cpu_feature_enabled(X86_FEATURE_SRSO_ALIAS)) return -1; bytes[i++] = RET_INSN_OPCODE; diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 3daceadf5d1f..d984fb03e287 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1270,6 +1270,20 @@ u32 amd_get_highest_perf(void) } EXPORT_SYMBOL_GPL(amd_get_highest_perf); +bool cpu_has_ibpb_brtype_microcode(void) +{ + u8 fam = boot_cpu_data.x86; + + if (fam == 0x17) { + /* Zen1/2 IBPB flushes branch type predictions too. */ + return boot_cpu_has(X86_FEATURE_AMD_IBPB); + } else if (fam == 0x19) { + return false; + } + + return false; +} + static void zenbleed_check_cpu(void *unused) { struct cpuinfo_x86 *c = &cpu_data(smp_processor_id()); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 322d1a421d8c..758d53cd6bdc 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -47,6 +47,7 @@ static void __init mmio_select_mitigation(void); static void __init srbds_select_mitigation(void); static void __init l1d_flush_select_mitigation(void); static void __init gds_select_mitigation(void); +static void __init srso_select_mitigation(void); /* The base value of the SPEC_CTRL MSR without task-specific bits set */ u64 x86_spec_ctrl_base; @@ -161,6 +162,7 @@ void __init cpu_select_mitigations(void) srbds_select_mitigation(); l1d_flush_select_mitigation(); gds_select_mitigation(); + srso_select_mitigation(); } /* @@ -2303,6 +2305,95 @@ static int __init l1tf_cmdline(char *str) } early_param("l1tf", l1tf_cmdline); +#undef pr_fmt +#define pr_fmt(fmt) "Speculative Return Stack Overflow: " fmt + +enum srso_mitigation { + SRSO_MITIGATION_NONE, + SRSO_MITIGATION_MICROCODE, + SRSO_MITIGATION_SAFE_RET, +}; + +enum srso_mitigation_cmd { + SRSO_CMD_OFF, + SRSO_CMD_MICROCODE, + SRSO_CMD_SAFE_RET, +}; + +static const char * const srso_strings[] = { + [SRSO_MITIGATION_NONE] = "Vulnerable", + [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode", + [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET", +}; + +static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; +static enum srso_mitigation_cmd srso_cmd __ro_after_init = SRSO_CMD_SAFE_RET; + +static int __init srso_parse_cmdline(char *str) +{ + if (!str) + return -EINVAL; + + if (!strcmp(str, "off")) + srso_cmd = SRSO_CMD_OFF; + else if (!strcmp(str, "microcode")) + srso_cmd = SRSO_CMD_MICROCODE; + else if (!strcmp(str, "safe-ret")) + srso_cmd = SRSO_CMD_SAFE_RET; + else + pr_err("Ignoring unknown SRSO option (%s).", str); + + return 0; +} +early_param("spec_rstack_overflow", srso_parse_cmdline); + +#define SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options." + +static void __init srso_select_mitigation(void) +{ + bool has_microcode; + + if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) + return; + + has_microcode = cpu_has_ibpb_brtype_microcode(); + if (!has_microcode) { + pr_warn("IBPB-extending microcode not applied!\n"); + pr_warn(SRSO_NOTICE); + } + + switch (srso_cmd) { + case SRSO_CMD_OFF: + return; + + case SRSO_CMD_MICROCODE: + if (has_microcode) { + srso_mitigation = SRSO_MITIGATION_MICROCODE; + pr_warn(SRSO_NOTICE); + } + break; + + case SRSO_CMD_SAFE_RET: + if (IS_ENABLED(CONFIG_CPU_SRSO)) { + if (boot_cpu_data.x86 == 0x19) + setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); + else + setup_force_cpu_cap(X86_FEATURE_SRSO); + srso_mitigation = SRSO_MITIGATION_SAFE_RET; + } else { + pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); + return; + } + break; + + default: + break; + + } + + pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode")); +} + #undef pr_fmt #define pr_fmt(fmt) fmt @@ -2506,6 +2597,13 @@ static ssize_t gds_show_state(char *buf) return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]); } +static ssize_t srso_show_state(char *buf) +{ + return sysfs_emit(buf, "%s%s\n", + srso_strings[srso_mitigation], + (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode")); +} + static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, char *buf, unsigned int bug) { @@ -2558,6 +2656,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr case X86_BUG_GDS: return gds_show_state(buf); + case X86_BUG_SRSO: + return srso_show_state(buf); + default: break; } @@ -2627,4 +2728,9 @@ ssize_t cpu_show_gds(struct device *dev, struct device_attribute *attr, char *bu { return cpu_show_common(dev, attr, buf, X86_BUG_GDS); } + +ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf) +{ + return cpu_show_common(dev, attr, buf, X86_BUG_SRSO); +} #endif diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 6555c0c83e79..8d6f28b2f530 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1136,6 +1136,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { #define SMT_RSB BIT(4) /* CPU is affected by GDS */ #define GDS BIT(5) +/* CPU is affected by SRSO */ +#define SRSO BIT(6) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), @@ -1169,8 +1171,9 @@ static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_AMD(0x15, RETBLEED), VULNBL_AMD(0x16, RETBLEED), - VULNBL_AMD(0x17, RETBLEED | SMT_RSB), + VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO), VULNBL_HYGON(0x18, RETBLEED | SMT_RSB), + VULNBL_AMD(0x19, SRSO), {} }; @@ -1301,6 +1304,9 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) boot_cpu_has(X86_FEATURE_AVX)) setup_force_cpu_bug(X86_BUG_GDS); + if (cpu_matches(cpu_vuln_blacklist, SRSO)) + setup_force_cpu_bug(X86_BUG_SRSO); + if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index c1efcd194ad7..50aaf0cd8f46 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -133,7 +133,20 @@ SECTIONS LOCK_TEXT KPROBES_TEXT ALIGN_ENTRY_TEXT_BEGIN +#ifdef CONFIG_CPU_SRSO + *(.text.__x86.rethunk_untrain) +#endif + ENTRY_TEXT + +#ifdef CONFIG_CPU_SRSO + /* + * See the comment above srso_untrain_ret_alias()'s + * definition. + */ + . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); + *(.text.__x86.rethunk_safe) +#endif ALIGN_ENTRY_TEXT_END SOFTIRQENTRY_TEXT STATIC_CALL_TEXT @@ -142,13 +155,15 @@ SECTIONS #ifdef CONFIG_RETPOLINE __indirect_thunk_start = .; - *(.text.__x86.*) + *(.text.__x86.indirect_thunk) + *(.text.__x86.return_thunk) __indirect_thunk_end = .; #endif } :text =0xcccc /* End of text section, which should occupy whole number of pages */ _etext = .; + . = ALIGN(PAGE_SIZE); X86_ALIGN_RODATA_BEGIN @@ -496,6 +511,21 @@ INIT_PER_CPU(irq_stack_backing_store); "fixed_percpu_data is not at start of per-cpu area"); #endif + #ifdef CONFIG_RETHUNK +. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned"); +. = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); +#endif + +#ifdef CONFIG_CPU_SRSO +/* + * GNU ld cannot do XOR so do: (A | B) - (A & B) in order to compute the XOR + * of the two function addresses: + */ +. = ASSERT(((srso_untrain_ret_alias | srso_safe_ret_alias) - + (srso_untrain_ret_alias & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), + "SRSO function pair won't alias"); +#endif + #endif /* CONFIG_X86_64 */ #ifdef CONFIG_KEXEC_CORE diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 1221bb099afb..5f7eed97487e 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -9,6 +9,7 @@ #include #include #include +#include .section .text.__x86.indirect_thunk @@ -73,6 +74,45 @@ SYM_CODE_END(__x86_indirect_thunk_array) */ #ifdef CONFIG_RETHUNK +/* + * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at + * special addresses: + * + * - srso_untrain_ret_alias() is 2M aligned + * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14 + * and 20 in its virtual address are set (while those bits in the + * srso_untrain_ret_alias() function are cleared). + * + * This guarantees that those two addresses will alias in the branch + * target buffer of Zen3/4 generations, leading to any potential + * poisoned entries at that BTB slot to get evicted. + * + * As a result, srso_safe_ret_alias() becomes a safe return. + */ +#ifdef CONFIG_CPU_SRSO + .section .text.__x86.rethunk_untrain + +SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) + ASM_NOP2 + lfence + jmp __x86_return_thunk +SYM_FUNC_END(srso_untrain_ret_alias) +__EXPORT_THUNK(srso_untrain_ret_alias) + + .section .text.__x86.rethunk_safe +#endif + +/* Needs a definition for the __x86_return_thunk alternative below. */ +SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +#ifdef CONFIG_CPU_SRSO + add $8, %_ASM_SP + UNWIND_HINT_FUNC +#endif + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_FUNC_END(srso_safe_ret_alias) + .section .text.__x86.return_thunk /* @@ -85,7 +125,7 @@ SYM_CODE_END(__x86_indirect_thunk_array) * from re-poisioning the BTB prediction. */ .align 64 - .skip 63, 0xcc + .skip 64 - (__ret - zen_untrain_ret), 0xcc SYM_FUNC_START_NOALIGN(zen_untrain_ret); /* @@ -117,10 +157,10 @@ SYM_FUNC_START_NOALIGN(zen_untrain_ret); * evicted, __x86_return_thunk will suffer Straight Line Speculation * which will be contained safely by the INT3. */ -SYM_INNER_LABEL(__x86_return_thunk, SYM_L_GLOBAL) +SYM_INNER_LABEL(__ret, SYM_L_GLOBAL) ret int3 -SYM_CODE_END(__x86_return_thunk) +SYM_CODE_END(__ret) /* * Ensure the TEST decoding / BTB invalidation is complete. @@ -131,11 +171,44 @@ SYM_CODE_END(__x86_return_thunk) * Jump back and execute the RET in the middle of the TEST instruction. * INT3 is for SLS protection. */ - jmp __x86_return_thunk + jmp __ret int3 SYM_FUNC_END(zen_untrain_ret) __EXPORT_THUNK(zen_untrain_ret) +/* + * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret() + * above. On kernel entry, srso_untrain_ret() is executed which is a + * + * movabs $0xccccccc308c48348,%rax + * + * and when the return thunk executes the inner label srso_safe_ret() + * later, it is a stack manipulation and a RET which is mispredicted and + * thus a "safe" one to use. + */ + .align 64 + .skip 64 - (srso_safe_ret - srso_untrain_ret), 0xcc +SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) + .byte 0x48, 0xb8 + +SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) + add $8, %_ASM_SP + ret + int3 + int3 + int3 + lfence + call srso_safe_ret + int3 +SYM_CODE_END(srso_safe_ret) +SYM_FUNC_END(srso_untrain_ret) +__EXPORT_THUNK(srso_untrain_ret) + +SYM_FUNC_START(__x86_return_thunk) + ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ + "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS + int3 +SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) #endif /* CONFIG_RETHUNK */ diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c index 2d376438ffad..46430cf2401e 100644 --- a/drivers/base/cpu.c +++ b/drivers/base/cpu.c @@ -583,6 +583,12 @@ ssize_t __weak cpu_show_gds(struct device *dev, return sysfs_emit(buf, "Not affected\n"); } +ssize_t __weak cpu_show_spec_rstack_overflow(struct device *dev, + struct device_attribute *attr, char *buf) +{ + return sysfs_emit(buf, "Not affected\n"); +} + static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL); static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL); static DEVICE_ATTR(spectre_v2, 0444, cpu_show_spectre_v2, NULL); @@ -595,6 +601,7 @@ static DEVICE_ATTR(srbds, 0444, cpu_show_srbds, NULL); static DEVICE_ATTR(mmio_stale_data, 0444, cpu_show_mmio_stale_data, NULL); static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL); static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL); +static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL); static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_meltdown.attr, @@ -609,6 +616,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = { &dev_attr_mmio_stale_data.attr, &dev_attr_retbleed.attr, &dev_attr_gather_data_sampling.attr, + &dev_attr_spec_rstack_overflow.attr, NULL }; diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 94d26089d52a..d4c860de9a6a 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -70,6 +70,8 @@ extern ssize_t cpu_show_mmio_stale_data(struct device *dev, char *buf); extern ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index f62db0e006e9..cf8ea3594125 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -725,5 +725,8 @@ bool arch_is_retpoline(struct symbol *sym) bool arch_is_rethunk(struct symbol *sym) { - return !strcmp(sym->name, "__x86_return_thunk"); + return !strcmp(sym->name, "__x86_return_thunk") || + !strcmp(sym->name, "srso_untrain_ret") || + !strcmp(sym->name, "srso_safe_ret") || + !strcmp(sym->name, "__ret"); } From 4e9115e194a8d21195a5e7600975ed601dd364ff Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Tue, 18 Jul 2023 11:13:40 +0200 Subject: [PATCH 184/513] x86/srso: Add IBPB_BRTYPE support Upstream commit: 79113e4060aba744787a81edb9014f2865193854 Add support for the synthetic CPUID flag which "if this bit is 1, it indicates that MSR 49h (PRED_CMD) bit 0 (IBPB) flushes all branch type predictions from the CPU branch predictor." This flag is there so that this capability in guests can be detected easily (otherwise one would have to track microcode revisions which is impossible for guests). It is also needed only for Zen3 and -4. The other two (Zen1 and -2) always flush branch type predictions by default. Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 2 ++ arch/x86/kernel/cpu/bugs.c | 12 +++++++++++- 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 737332740dba..11203651cfd0 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -415,6 +415,8 @@ #define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */ #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */ +#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ + /* * BUG word(s) */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 758d53cd6bdc..d22f64a26e03 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2356,10 +2356,20 @@ static void __init srso_select_mitigation(void) if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) return; - has_microcode = cpu_has_ibpb_brtype_microcode(); + /* + * The first check is for the kernel running as a guest in order + * for guests to verify whether IBPB is a viable mitigation. + */ + has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode(); if (!has_microcode) { pr_warn("IBPB-extending microcode not applied!\n"); pr_warn(SRSO_NOTICE); + } else { + /* + * Enable the synthetic (even if in a real CPUID leaf) + * flag for guests. + */ + setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); } switch (srso_cmd) { From c24aaa7dde5ff82acb4415c15225e4a42982240d Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Thu, 29 Jun 2023 17:43:40 +0200 Subject: [PATCH 185/513] x86/srso: Add SRSO_NO support Upstream commit: 1b5277c0ea0b247393a9c426769fde18cff5e2f6 Add support for the CPUID flag which denotes that the CPU is not affected by SRSO. Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 2 ++ arch/x86/include/asm/msr-index.h | 1 + arch/x86/include/asm/nospec-branch.h | 6 +++--- arch/x86/kernel/cpu/amd.c | 12 ++++++------ arch/x86/kernel/cpu/bugs.c | 24 ++++++++++++++++++++---- arch/x86/kernel/cpu/common.c | 6 ++++-- arch/x86/kvm/cpuid.c | 3 +++ 7 files changed, 39 insertions(+), 15 deletions(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 11203651cfd0..7a6e846d4fe6 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -415,7 +415,9 @@ #define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */ #define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */ +#define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */ #define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */ +#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */ /* * BUG word(s) diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h index bad29c2deecf..91d8322af413 100644 --- a/arch/x86/include/asm/msr-index.h +++ b/arch/x86/include/asm/msr-index.h @@ -60,6 +60,7 @@ #define MSR_IA32_PRED_CMD 0x00000049 /* Prediction Command */ #define PRED_CMD_IBPB BIT(0) /* Indirect Branch Prediction Barrier */ +#define PRED_CMD_SBPB BIT(7) /* Selective Branch Prediction Barrier */ #define MSR_PPIN_CTL 0x0000004e #define MSR_PPIN 0x0000004f diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 2dd7ad162e6f..01e90e68f0c7 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -307,11 +307,11 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature) : "memory"); } +extern u64 x86_pred_cmd; + static inline void indirect_branch_prediction_barrier(void) { - u64 val = PRED_CMD_IBPB; - - alternative_msr_write(MSR_IA32_PRED_CMD, val, X86_FEATURE_USE_IBPB); + alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB); } /* The Intel SPEC CTRL MSR base value cache */ diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index d984fb03e287..9b8a7ec39561 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1274,14 +1274,14 @@ bool cpu_has_ibpb_brtype_microcode(void) { u8 fam = boot_cpu_data.x86; - if (fam == 0x17) { - /* Zen1/2 IBPB flushes branch type predictions too. */ + /* Zen1/2 IBPB flushes branch type predictions too. */ + if (fam == 0x17) return boot_cpu_has(X86_FEATURE_AMD_IBPB); - } else if (fam == 0x19) { + /* Poke the MSR bit on Zen3/4 to check its presence. */ + else if (fam == 0x19) + return !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB); + else return false; - } - - return false; } static void zenbleed_check_cpu(void *unused) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index d22f64a26e03..ea7707b43d3f 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -57,6 +57,9 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base); DEFINE_PER_CPU(u64, x86_spec_ctrl_current); EXPORT_SYMBOL_GPL(x86_spec_ctrl_current); +u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB; +EXPORT_SYMBOL_GPL(x86_pred_cmd); + static DEFINE_MUTEX(spec_ctrl_mutex); /* Update SPEC_CTRL MSR and its cached copy unconditionally */ @@ -2354,7 +2357,7 @@ static void __init srso_select_mitigation(void) bool has_microcode; if (!boot_cpu_has_bug(X86_BUG_SRSO) || cpu_mitigations_off()) - return; + goto pred_cmd; /* * The first check is for the kernel running as a guest in order @@ -2367,9 +2370,18 @@ static void __init srso_select_mitigation(void) } else { /* * Enable the synthetic (even if in a real CPUID leaf) - * flag for guests. + * flags for guests. */ setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); + setup_force_cpu_cap(X86_FEATURE_SBPB); + + /* + * Zen1/2 with SMT off aren't vulnerable after the right + * IBPB microcode has been applied. + */ + if ((boot_cpu_data.x86 < 0x19) && + (cpu_smt_control == CPU_SMT_DISABLED)) + setup_force_cpu_cap(X86_FEATURE_SRSO_NO); } switch (srso_cmd) { @@ -2392,16 +2404,20 @@ static void __init srso_select_mitigation(void) srso_mitigation = SRSO_MITIGATION_SAFE_RET; } else { pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); - return; + goto pred_cmd; } break; default: break; - } pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode")); + +pred_cmd: + if (boot_cpu_has(X86_FEATURE_SRSO_NO) || + srso_cmd == SRSO_CMD_OFF) + x86_pred_cmd = PRED_CMD_SBPB; } #undef pr_fmt diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 8d6f28b2f530..3eefd31af923 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1304,8 +1304,10 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c) boot_cpu_has(X86_FEATURE_AVX)) setup_force_cpu_bug(X86_BUG_GDS); - if (cpu_matches(cpu_vuln_blacklist, SRSO)) - setup_force_cpu_bug(X86_BUG_SRSO); + if (!cpu_has(c, X86_FEATURE_SRSO_NO)) { + if (cpu_matches(cpu_vuln_blacklist, SRSO)) + setup_force_cpu_bug(X86_BUG_SRSO); + } if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN)) return; diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 528437e3e2f3..b939b94d931f 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c @@ -544,6 +544,9 @@ void kvm_set_cpu_caps(void) F(PMM) | F(PMM_EN) ); + if (cpu_feature_enabled(X86_FEATURE_SRSO_NO)) + kvm_cpu_cap_set(X86_FEATURE_SRSO_NO); + /* * Hide RDTSCP and RDPID if either feature is reported as supported but * probing MSR_TSC_AUX failed. This is purely a sanity check and From 5398faac76a6188bbacc142984ec143fef7f640c Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Thu, 6 Jul 2023 15:04:35 +0200 Subject: [PATCH 186/513] x86/srso: Add IBPB Upstream commit: 233d6f68b98d480a7c42ebe78c38f79d44741ca9 Add the option to mitigate using IBPB on a kernel entry. Pull in the Retbleed alternative so that the IBPB call from there can be used. Also, if Retbleed mitigation is done using IBPB, the same mitigation can and must be used here. Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/nospec-branch.h | 3 ++- arch/x86/kernel/cpu/bugs.c | 23 +++++++++++++++++++++++ 2 files changed, 25 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 01e90e68f0c7..4a12dfdd317c 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -173,7 +173,8 @@ * where we have a stack but before any RET instruction. */ .macro UNTRAIN_RET -#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) +#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_IBPB_ENTRY) || \ + defined(CONFIG_CPU_SRSO) ANNOTATE_UNRET_END ALTERNATIVE_2 "", \ CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index ea7707b43d3f..fbb96bdfd8ba 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2315,18 +2315,21 @@ enum srso_mitigation { SRSO_MITIGATION_NONE, SRSO_MITIGATION_MICROCODE, SRSO_MITIGATION_SAFE_RET, + SRSO_MITIGATION_IBPB, }; enum srso_mitigation_cmd { SRSO_CMD_OFF, SRSO_CMD_MICROCODE, SRSO_CMD_SAFE_RET, + SRSO_CMD_IBPB, }; static const char * const srso_strings[] = { [SRSO_MITIGATION_NONE] = "Vulnerable", [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode", [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET", + [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", }; static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; @@ -2343,6 +2346,8 @@ static int __init srso_parse_cmdline(char *str) srso_cmd = SRSO_CMD_MICROCODE; else if (!strcmp(str, "safe-ret")) srso_cmd = SRSO_CMD_SAFE_RET; + else if (!strcmp(str, "ibpb")) + srso_cmd = SRSO_CMD_IBPB; else pr_err("Ignoring unknown SRSO option (%s).", str); @@ -2384,6 +2389,14 @@ static void __init srso_select_mitigation(void) setup_force_cpu_cap(X86_FEATURE_SRSO_NO); } + if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { + if (has_microcode) { + pr_err("Retbleed IBPB mitigation enabled, using same for SRSO\n"); + srso_mitigation = SRSO_MITIGATION_IBPB; + goto pred_cmd; + } + } + switch (srso_cmd) { case SRSO_CMD_OFF: return; @@ -2408,6 +2421,16 @@ static void __init srso_select_mitigation(void) } break; + case SRSO_CMD_IBPB: + if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) { + if (has_microcode) { + setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB); + srso_mitigation = SRSO_MITIGATION_IBPB; + } + } else { + pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); + goto pred_cmd; + } default: break; } From 0071b17eb66b12151f83cdbfc9824743004f87eb Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Fri, 7 Jul 2023 13:53:41 +0200 Subject: [PATCH 187/513] x86/srso: Add IBPB on VMEXIT Upstream commit: d893832d0e1ef41c72cdae444268c1d64a2be8ad Add the option to flush IBPB only on VMEXIT in order to protect from malicious guests but one otherwise trusts the software that runs on the hypervisor. Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/kernel/cpu/bugs.c | 19 +++++++++++++++++++ arch/x86/kvm/svm/svm.c | 4 +++- arch/x86/kvm/svm/vmenter.S | 3 +++ 4 files changed, 26 insertions(+), 1 deletion(-) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 7a6e846d4fe6..608ffc45fc0e 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -308,6 +308,7 @@ #define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */ #define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */ +#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index fbb96bdfd8ba..3f490db41a39 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2316,6 +2316,7 @@ enum srso_mitigation { SRSO_MITIGATION_MICROCODE, SRSO_MITIGATION_SAFE_RET, SRSO_MITIGATION_IBPB, + SRSO_MITIGATION_IBPB_ON_VMEXIT, }; enum srso_mitigation_cmd { @@ -2323,6 +2324,7 @@ enum srso_mitigation_cmd { SRSO_CMD_MICROCODE, SRSO_CMD_SAFE_RET, SRSO_CMD_IBPB, + SRSO_CMD_IBPB_ON_VMEXIT, }; static const char * const srso_strings[] = { @@ -2330,6 +2332,7 @@ static const char * const srso_strings[] = { [SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode", [SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET", [SRSO_MITIGATION_IBPB] = "Mitigation: IBPB", + [SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only" }; static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE; @@ -2348,6 +2351,8 @@ static int __init srso_parse_cmdline(char *str) srso_cmd = SRSO_CMD_SAFE_RET; else if (!strcmp(str, "ibpb")) srso_cmd = SRSO_CMD_IBPB; + else if (!strcmp(str, "ibpb-vmexit")) + srso_cmd = SRSO_CMD_IBPB_ON_VMEXIT; else pr_err("Ignoring unknown SRSO option (%s).", str); @@ -2431,6 +2436,20 @@ static void __init srso_select_mitigation(void) pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n"); goto pred_cmd; } + break; + + case SRSO_CMD_IBPB_ON_VMEXIT: + if (IS_ENABLED(CONFIG_CPU_SRSO)) { + if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) { + setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT); + srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT; + } + } else { + pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); + goto pred_cmd; + } + break; + default: break; } diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 302a4669c5a1..d63c3843e493 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1489,7 +1489,9 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu) if (sd->current_vmcb != svm->vmcb) { sd->current_vmcb = svm->vmcb; - indirect_branch_prediction_barrier(); + + if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT)) + indirect_branch_prediction_barrier(); } if (kvm_vcpu_apicv_active(vcpu)) avic_vcpu_load(vcpu, cpu); diff --git a/arch/x86/kvm/svm/vmenter.S b/arch/x86/kvm/svm/vmenter.S index 723f8534986c..f96060855522 100644 --- a/arch/x86/kvm/svm/vmenter.S +++ b/arch/x86/kvm/svm/vmenter.S @@ -119,6 +119,9 @@ SYM_FUNC_START(__svm_vcpu_run) */ UNTRAIN_RET + /* SRSO */ + ALTERNATIVE "", "call entry_ibpb", X86_FEATURE_IBPB_ON_VMEXIT + /* * Clear all general purpose registers except RSP and RAX to prevent * speculative use of the guest's values, even those that are reloaded From df4c3823cba5c43e2321eface8501bfafe76d0d9 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Fri, 28 Jul 2023 17:28:43 -0500 Subject: [PATCH 188/513] x86/srso: Fix return thunks in generated code Upstream commit: 238ec850b95a02dcdff3edc86781aa913549282f Set X86_FEATURE_RETHUNK when enabling the SRSO mitigation so that generated code (e.g., ftrace, static call, eBPF) generates "jmp __x86_return_thunk" instead of RET. [ bp: Add a comment. ] Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Signed-off-by: Josh Poimboeuf Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/alternative.c | 4 +--- arch/x86/kernel/cpu/bugs.c | 6 ++++++ 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c index 2c7d97c45796..43dd7f281a21 100644 --- a/arch/x86/kernel/alternative.c +++ b/arch/x86/kernel/alternative.c @@ -523,9 +523,7 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes) { int i = 0; - if (cpu_feature_enabled(X86_FEATURE_RETHUNK) || - cpu_feature_enabled(X86_FEATURE_SRSO) || - cpu_feature_enabled(X86_FEATURE_SRSO_ALIAS)) + if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) return -1; bytes[i++] = RET_INSN_OPCODE; diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 3f490db41a39..c5d9a81234d9 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2415,6 +2415,12 @@ static void __init srso_select_mitigation(void) case SRSO_CMD_SAFE_RET: if (IS_ENABLED(CONFIG_CPU_SRSO)) { + /* + * Enable the return thunk for generated code + * like ftrace, static_call, etc. + */ + setup_force_cpu_cap(X86_FEATURE_RETHUNK); + if (boot_cpu_data.x86 == 0x19) setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); else From 153f9a7b02d4f292671e81077e901ef01e123a9f Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Mon, 7 Aug 2023 10:46:04 +0200 Subject: [PATCH 189/513] x86/srso: Tie SBPB bit setting to microcode patch detection commit 5a15d8348881e9371afdf9f5357a135489496955 upstream. The SBPB bit in MSR_IA32_PRED_CMD is supported only after a microcode patch has been applied so set X86_FEATURE_SBPB only then. Otherwise, guests would attempt to set that bit and #GP on the MSR write. While at it, make SMT detection more robust as some guests - depending on how and what CPUID leafs their report - lead to cpu_smt_control getting set to CPU_SMT_NOT_SUPPORTED but SRSO_NO should be set for any guest incarnation where one simply cannot do SMT, for whatever reason. Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Reported-by: Konrad Rzeszutek Wilk Reported-by: Salvatore Bonaccorso Signed-off-by: Borislav Petkov (AMD) Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/amd.c | 19 ++++++++++++------- arch/x86/kernel/cpu/bugs.c | 7 +++---- 2 files changed, 15 insertions(+), 11 deletions(-) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 9b8a7ec39561..1b90eb6ea503 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1272,16 +1272,21 @@ EXPORT_SYMBOL_GPL(amd_get_highest_perf); bool cpu_has_ibpb_brtype_microcode(void) { - u8 fam = boot_cpu_data.x86; - + switch (boot_cpu_data.x86) { /* Zen1/2 IBPB flushes branch type predictions too. */ - if (fam == 0x17) + case 0x17: return boot_cpu_has(X86_FEATURE_AMD_IBPB); - /* Poke the MSR bit on Zen3/4 to check its presence. */ - else if (fam == 0x19) - return !wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB); - else + case 0x19: + /* Poke the MSR bit on Zen3/4 to check its presence. */ + if (!wrmsrl_safe(MSR_IA32_PRED_CMD, PRED_CMD_SBPB)) { + setup_force_cpu_cap(X86_FEATURE_SBPB); + return true; + } else { + return false; + } + default: return false; + } } static void zenbleed_check_cpu(void *unused) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index c5d9a81234d9..73dad1400633 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2383,14 +2383,13 @@ static void __init srso_select_mitigation(void) * flags for guests. */ setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE); - setup_force_cpu_cap(X86_FEATURE_SBPB); /* * Zen1/2 with SMT off aren't vulnerable after the right * IBPB microcode has been applied. */ if ((boot_cpu_data.x86 < 0x19) && - (cpu_smt_control == CPU_SMT_DISABLED)) + (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) setup_force_cpu_cap(X86_FEATURE_SRSO_NO); } @@ -2463,8 +2462,8 @@ static void __init srso_select_mitigation(void) pr_info("%s%s\n", srso_strings[srso_mitigation], (has_microcode ? "" : ", no microcode")); pred_cmd: - if (boot_cpu_has(X86_FEATURE_SRSO_NO) || - srso_cmd == SRSO_CMD_OFF) + if ((boot_cpu_has(X86_FEATURE_SRSO_NO) || srso_cmd == SRSO_CMD_OFF) && + boot_cpu_has(X86_FEATURE_SBPB)) x86_pred_cmd = PRED_CMD_SBPB; } From b14a3924c2675c22e07a5a190223b6b6cdc2867d Mon Sep 17 00:00:00 2001 From: Ross Lagerwall Date: Thu, 3 Aug 2023 08:41:22 +0200 Subject: [PATCH 190/513] xen/netback: Fix buffer overrun triggered by unusual packet commit 534fc31d09b706a16d83533e16b5dc855caf7576 upstream. It is possible that a guest can send a packet that contains a head + 18 slots and yet has a len <= XEN_NETBACK_TX_COPY_LEN. This causes nr_slots to underflow in xenvif_get_requests() which then causes the subsequent loop's termination condition to be wrong, causing a buffer overrun of queue->tx_map_ops. Rework the code to account for the extra frag_overflow slots. This is CVE-2023-34319 / XSA-432. Fixes: ad7f402ae4f4 ("xen/netback: Ensure protocol headers don't fall in the non-linear area") Signed-off-by: Ross Lagerwall Reviewed-by: Paul Durrant Reviewed-by: Wei Liu Signed-off-by: Juergen Gross Signed-off-by: Greg Kroah-Hartman --- drivers/net/xen-netback/netback.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index 63118b56c528..5017033c705a 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -396,7 +396,7 @@ static void xenvif_get_requests(struct xenvif_queue *queue, struct gnttab_map_grant_ref *gop = queue->tx_map_ops + *map_ops; struct xen_netif_tx_request *txp = first; - nr_slots = shinfo->nr_frags + 1; + nr_slots = shinfo->nr_frags + frag_overflow + 1; copy_count(skb) = 0; XENVIF_TX_CB(skb)->split_mask = 0; @@ -462,8 +462,8 @@ static void xenvif_get_requests(struct xenvif_queue *queue, } } - for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; - shinfo->nr_frags++, gop++) { + for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS; + shinfo->nr_frags++, gop++, nr_slots--) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; xenvif_tx_create_map_op(queue, pending_idx, txp, @@ -476,12 +476,12 @@ static void xenvif_get_requests(struct xenvif_queue *queue, txp++; } - if (frag_overflow) { + if (nr_slots > 0) { shinfo = skb_shinfo(nskb); frags = shinfo->frags; - for (shinfo->nr_frags = 0; shinfo->nr_frags < frag_overflow; + for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; shinfo->nr_frags++, txp++, gop++) { index = pending_index(queue->pending_cons++); pending_idx = queue->pending_ring[index]; @@ -492,6 +492,11 @@ static void xenvif_get_requests(struct xenvif_queue *queue, } skb_shinfo(skb)->frag_list = nskb; + } else if (nskb) { + /* A frag_list skb was allocated but it is no longer needed + * because enough slots were converted to copy ops above. + */ + kfree_skb(nskb); } (*copy_ops) = cop - queue->tx_copy_ops; From 754e0c7c4a308ca844a711b435d7efdf0f84d02e Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 8 Aug 2023 19:20:48 +0200 Subject: [PATCH 191/513] x86: fix backwards merge of GDS/SRSO bit Stable-tree-only change. Due to the way the GDS and SRSO patches flowed into the stable tree, it was a 50% chance that the merge of the which value GDS and SRSO should be. Of course, I lost that bet, and chose the opposite of what Linus chose in commit 64094e7e3118 ("Merge tag 'gds-for-linus-2023-08-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip") Fix this up by switching the values to match what is now in Linus's tree as that is the correct value to mirror. Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/common.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 3eefd31af923..54a0b3833ffe 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -1134,10 +1134,10 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = { #define RETBLEED BIT(3) /* CPU is affected by SMT (cross-thread) return predictions */ #define SMT_RSB BIT(4) -/* CPU is affected by GDS */ -#define GDS BIT(5) /* CPU is affected by SRSO */ -#define SRSO BIT(6) +#define SRSO BIT(5) +/* CPU is affected by GDS */ +#define GDS BIT(6) static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = { VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS), From c275eaaaa34260e6c907bc5e7ee07c096bc45064 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Tue, 8 Aug 2023 19:58:35 +0200 Subject: [PATCH 192/513] Linux 5.15.125 Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 6fb94face8d7..a90f955e14ab 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 15 -SUBLEVEL = 124 +SUBLEVEL = 125 EXTRAVERSION = NAME = Trick or Treat From c12fa4ac8997d785163c1ae0ed26332cf98d7b85 Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Tue, 1 Aug 2023 08:39:47 -0600 Subject: [PATCH 193/513] io_uring: gate iowait schedule on having pending requests Commit 7b72d661f1f2f950ab8c12de7e2bc48bdac8ed69 upstream. A previous commit made all cqring waits marked as iowait, as a way to improve performance for short schedules with pending IO. However, for use cases that have a special reaper thread that does nothing but wait on events on the ring, this causes a cosmetic issue where we know have one core marked as being "busy" with 100% iowait. While this isn't a grave issue, it is confusing to users. Rather than always mark us as being in iowait, gate setting of current->in_iowait to 1 by whether or not the waiting task has pending requests. Cc: stable@vger.kernel.org Link: https://lore.kernel.org/io-uring/CAMEGJJ2RxopfNQ7GNLhr7X9=bHXKo+G5OOe0LUq=+UgLXsv1Xg@mail.gmail.com/ Link: https://bugzilla.kernel.org/show_bug.cgi?id=217699 Link: https://bugzilla.kernel.org/show_bug.cgi?id=217700 Reported-by: Oleksandr Natalenko Reported-by: Phil Elwell Tested-by: Andres Freund Fixes: 8a796565cec3 ("io_uring: Use io_schedule* in cqring wait") Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- io_uring/io_uring.c | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 774b1ae8adf7..6f1d88bfd690 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -7802,12 +7802,21 @@ static int io_run_task_work_sig(void) return -EINTR; } +static bool current_pending_io(void) +{ + struct io_uring_task *tctx = current->io_uring; + + if (!tctx) + return false; + return percpu_counter_read_positive(&tctx->inflight); +} + /* when returns >0, the caller should retry */ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, struct io_wait_queue *iowq, ktime_t *timeout) { - int token, ret; + int io_wait, ret; /* make sure we run task_work before checking for signals */ ret = io_run_task_work_sig(); @@ -7818,15 +7827,17 @@ static inline int io_cqring_wait_schedule(struct io_ring_ctx *ctx, return 1; /* - * Use io_schedule_prepare/finish, so cpufreq can take into account - * that the task is waiting for IO - turns out to be important for low - * QD IO. + * Mark us as being in io_wait if we have pending requests, so cpufreq + * can take into account that the task is waiting for IO - turns out + * to be important for low QD IO. */ - token = io_schedule_prepare(); + io_wait = current->in_iowait; + if (current_pending_io()) + current->in_iowait = 1; ret = 1; if (!schedule_hrtimeout(timeout, HRTIMER_MODE_ABS)) ret = -ETIME; - io_schedule_finish(token); + current->in_iowait = io_wait; return ret; } From 40601542c43cfdf7ea31edd8dfe2174813a159b8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 16 Nov 2022 22:40:17 +0100 Subject: [PATCH 194/513] perf: Fix function pointer case commit 1af6239d1d3e61d33fd2f0ba53d3d1a67cc50574 upstream. With the advent of CFI it is no longer acceptible to cast function pointers. The robot complains thusly: kernel-events-core.c:warning:cast-from-int-(-)(struct-perf_cpu_pmu_context-)-to-remote_function_f-(aka-int-(-)(void-)-)-converts-to-incompatible-function-type Reported-by: kernel test robot Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Cixi Geng Signed-off-by: Greg Kroah-Hartman --- kernel/events/core.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/kernel/events/core.c b/kernel/events/core.c index 97052b2dff7e..c7f13da672c9 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1224,6 +1224,11 @@ static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) return 0; } +static int perf_mux_hrtimer_restart_ipi(void *arg) +{ + return perf_mux_hrtimer_restart(arg); +} + void perf_pmu_disable(struct pmu *pmu) { int *count = this_cpu_ptr(pmu->pmu_disable_count); @@ -11137,8 +11142,7 @@ perf_event_mux_interval_ms_store(struct device *dev, cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer); - cpu_function_call(cpu, - (remote_function_f)perf_mux_hrtimer_restart, cpuctx); + cpu_function_call(cpu, perf_mux_hrtimer_restart_ipi, cpuctx); } cpus_read_unlock(); mutex_unlock(&mux_interval_mutex); From 44b45e8161a5c3d19e291d2381a68b4d06b89ff7 Mon Sep 17 00:00:00 2001 From: Shay Drory Date: Thu, 13 Apr 2023 22:15:31 +0300 Subject: [PATCH 195/513] net/mlx5: Free irqs only on shutdown callback commit 9c2d08010963a61a171e8cb2852d3ce015b60cb4 upstream. Whenever a shutdown is invoked, free irqs only and keep mlx5_irq synthetic wrapper intact in order to avoid use-after-free on system shutdown. for example: ================================================================== BUG: KASAN: use-after-free in _find_first_bit+0x66/0x80 Read of size 8 at addr ffff88823fc0d318 by task kworker/u192:0/13608 CPU: 25 PID: 13608 Comm: kworker/u192:0 Tainted: G B W O 6.1.21-cloudflare-kasan-2023.3.21 #1 Hardware name: GIGABYTE R162-R2-GEN0/MZ12-HD2-CD, BIOS R14 05/03/2021 Workqueue: mlx5e mlx5e_tx_timeout_work [mlx5_core] Call Trace: dump_stack_lvl+0x34/0x48 print_report+0x170/0x473 ? _find_first_bit+0x66/0x80 kasan_report+0xad/0x130 ? _find_first_bit+0x66/0x80 _find_first_bit+0x66/0x80 mlx5e_open_channels+0x3c5/0x3a10 [mlx5_core] ? console_unlock+0x2fa/0x430 ? _raw_spin_lock_irqsave+0x8d/0xf0 ? _raw_spin_unlock_irqrestore+0x42/0x80 ? preempt_count_add+0x7d/0x150 ? __wake_up_klogd.part.0+0x7d/0xc0 ? vprintk_emit+0xfe/0x2c0 ? mlx5e_trigger_napi_sched+0x40/0x40 [mlx5_core] ? dev_attr_show.cold+0x35/0x35 ? devlink_health_do_dump.part.0+0x174/0x340 ? devlink_health_report+0x504/0x810 ? mlx5e_reporter_tx_timeout+0x29d/0x3a0 [mlx5_core] ? mlx5e_tx_timeout_work+0x17c/0x230 [mlx5_core] ? process_one_work+0x680/0x1050 mlx5e_safe_switch_params+0x156/0x220 [mlx5_core] ? mlx5e_switch_priv_channels+0x310/0x310 [mlx5_core] ? mlx5_eq_poll_irq_disabled+0xb6/0x100 [mlx5_core] mlx5e_tx_reporter_timeout_recover+0x123/0x240 [mlx5_core] ? __mutex_unlock_slowpath.constprop.0+0x2b0/0x2b0 devlink_health_reporter_recover+0xa6/0x1f0 devlink_health_report+0x2f7/0x810 ? vsnprintf+0x854/0x15e0 mlx5e_reporter_tx_timeout+0x29d/0x3a0 [mlx5_core] ? mlx5e_reporter_tx_err_cqe+0x1a0/0x1a0 [mlx5_core] ? mlx5e_tx_reporter_timeout_dump+0x50/0x50 [mlx5_core] ? mlx5e_tx_reporter_dump_sq+0x260/0x260 [mlx5_core] ? newidle_balance+0x9b7/0xe30 ? psi_group_change+0x6a7/0xb80 ? mutex_lock+0x96/0xf0 ? __mutex_lock_slowpath+0x10/0x10 mlx5e_tx_timeout_work+0x17c/0x230 [mlx5_core] process_one_work+0x680/0x1050 worker_thread+0x5a0/0xeb0 ? process_one_work+0x1050/0x1050 kthread+0x2a2/0x340 ? kthread_complete_and_exit+0x20/0x20 ret_from_fork+0x22/0x30 Freed by task 1: kasan_save_stack+0x23/0x50 kasan_set_track+0x21/0x30 kasan_save_free_info+0x2a/0x40 ____kasan_slab_free+0x169/0x1d0 slab_free_freelist_hook+0xd2/0x190 __kmem_cache_free+0x1a1/0x2f0 irq_pool_free+0x138/0x200 [mlx5_core] mlx5_irq_table_destroy+0xf6/0x170 [mlx5_core] mlx5_core_eq_free_irqs+0x74/0xf0 [mlx5_core] shutdown+0x194/0x1aa [mlx5_core] pci_device_shutdown+0x75/0x120 device_shutdown+0x35c/0x620 kernel_restart+0x60/0xa0 __do_sys_reboot+0x1cb/0x2c0 do_syscall_64+0x3b/0x90 entry_SYSCALL_64_after_hwframe+0x4b/0xb5 The buggy address belongs to the object at ffff88823fc0d300 which belongs to the cache kmalloc-192 of size 192 The buggy address is located 24 bytes inside of 192-byte region [ffff88823fc0d300, ffff88823fc0d3c0) The buggy address belongs to the physical page: page:0000000010139587 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x23fc0c head:0000000010139587 order:1 compound_mapcount:0 compound_pincount:0 flags: 0x2ffff800010200(slab|head|node=0|zone=2|lastcpupid=0x1ffff) raw: 002ffff800010200 0000000000000000 dead000000000122 ffff88810004ca00 raw: 0000000000000000 0000000000200020 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff88823fc0d200: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff88823fc0d280: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc >ffff88823fc0d300: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff88823fc0d380: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc ffff88823fc0d400: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ================================================================== general protection fault, probably for non-canonical address 0xdffffc005c40d7ac: 0000 [#1] PREEMPT SMP KASAN NOPTI KASAN: probably user-memory-access in range [0x00000002e206bd60-0x00000002e206bd67] CPU: 25 PID: 13608 Comm: kworker/u192:0 Tainted: G B W O 6.1.21-cloudflare-kasan-2023.3.21 #1 Hardware name: GIGABYTE R162-R2-GEN0/MZ12-HD2-CD, BIOS R14 05/03/2021 Workqueue: mlx5e mlx5e_tx_timeout_work [mlx5_core] RIP: 0010:__alloc_pages+0x141/0x5c0 Call Trace: ? sysvec_apic_timer_interrupt+0xa0/0xc0 ? asm_sysvec_apic_timer_interrupt+0x16/0x20 ? __alloc_pages_slowpath.constprop.0+0x1ec0/0x1ec0 ? _raw_spin_unlock_irqrestore+0x3d/0x80 __kmalloc_large_node+0x80/0x120 ? kvmalloc_node+0x4e/0x170 __kmalloc_node+0xd4/0x150 kvmalloc_node+0x4e/0x170 mlx5e_open_channels+0x631/0x3a10 [mlx5_core] ? console_unlock+0x2fa/0x430 ? _raw_spin_lock_irqsave+0x8d/0xf0 ? _raw_spin_unlock_irqrestore+0x42/0x80 ? preempt_count_add+0x7d/0x150 ? __wake_up_klogd.part.0+0x7d/0xc0 ? vprintk_emit+0xfe/0x2c0 ? mlx5e_trigger_napi_sched+0x40/0x40 [mlx5_core] ? dev_attr_show.cold+0x35/0x35 ? devlink_health_do_dump.part.0+0x174/0x340 ? devlink_health_report+0x504/0x810 ? mlx5e_reporter_tx_timeout+0x29d/0x3a0 [mlx5_core] ? mlx5e_tx_timeout_work+0x17c/0x230 [mlx5_core] ? process_one_work+0x680/0x1050 mlx5e_safe_switch_params+0x156/0x220 [mlx5_core] ? mlx5e_switch_priv_channels+0x310/0x310 [mlx5_core] ? mlx5_eq_poll_irq_disabled+0xb6/0x100 [mlx5_core] mlx5e_tx_reporter_timeout_recover+0x123/0x240 [mlx5_core] ? __mutex_unlock_slowpath.constprop.0+0x2b0/0x2b0 devlink_health_reporter_recover+0xa6/0x1f0 devlink_health_report+0x2f7/0x810 ? vsnprintf+0x854/0x15e0 mlx5e_reporter_tx_timeout+0x29d/0x3a0 [mlx5_core] ? mlx5e_reporter_tx_err_cqe+0x1a0/0x1a0 [mlx5_core] ? mlx5e_tx_reporter_timeout_dump+0x50/0x50 [mlx5_core] ? mlx5e_tx_reporter_dump_sq+0x260/0x260 [mlx5_core] ? newidle_balance+0x9b7/0xe30 ? psi_group_change+0x6a7/0xb80 ? mutex_lock+0x96/0xf0 ? __mutex_lock_slowpath+0x10/0x10 mlx5e_tx_timeout_work+0x17c/0x230 [mlx5_core] process_one_work+0x680/0x1050 worker_thread+0x5a0/0xeb0 ? process_one_work+0x1050/0x1050 kthread+0x2a2/0x340 ? kthread_complete_and_exit+0x20/0x20 ret_from_fork+0x22/0x30 ---[ end trace 0000000000000000 ]--- RIP: 0010:__alloc_pages+0x141/0x5c0 Code: e0 39 a3 96 89 e9 b8 22 01 32 01 83 e1 0f 48 89 fa 01 c9 48 c1 ea 03 d3 f8 83 e0 03 89 44 24 6c 48 b8 00 00 00 00 00 fc ff df <80> 3c 02 00 0f 85 fc 03 00 00 89 e8 4a 8b 14 f5 e0 39 a3 96 4c 89 RSP: 0018:ffff888251f0f438 EFLAGS: 00010202 RAX: dffffc0000000000 RBX: 1ffff1104a3e1e8b RCX: 0000000000000000 RDX: 000000005c40d7ac RSI: 0000000000000003 RDI: 00000002e206bd60 RBP: 0000000000052dc0 R08: ffff8882b0044218 R09: ffff8882b0045e8a R10: fffffbfff300fefc R11: ffff888167af4000 R12: 0000000000000003 R13: 0000000000000000 R14: 00000000696c7070 R15: ffff8882373f4380 FS: 0000000000000000(0000) GS:ffff88bf2be80000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00005641d031eee8 CR3: 0000002e7ca14000 CR4: 0000000000350ee0 Kernel panic - not syncing: Fatal exception Kernel Offset: 0x11000000 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffffbfffffff) ---[ end Kernel panic - not syncing: Fatal exception ]---] Reported-by: Frederick Lawler Link: https://lore.kernel.org/netdev/be5b9271-7507-19c5-ded1-fa78f1980e69@cloudflare.com Signed-off-by: Shay Drory Signed-off-by: Saeed Mahameed [hardik: Refer to the irqn member of the mlx5_irq struct, instead of the msi_map, since we don't have upstream v6.4 commit 235a25fe28de ("net/mlx5: Modify struct mlx5_irq to use struct msi_map")]. [hardik: Refer to the pf_pool member of the mlx5_irq_table struct, instead of pcif_pool, since we don't have upstream v6.4 commit 8bebfd767909 ("net/mlx5: Improve naming of pci function vectors")]. Signed-off-by: Hardik Garg Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/eq.c | 2 +- .../ethernet/mellanox/mlx5/core/mlx5_irq.h | 1 + .../net/ethernet/mellanox/mlx5/core/pci_irq.c | 29 +++++++++++++++++++ 3 files changed, 31 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c index 605c8ecc3610..ccccbac04428 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c @@ -981,7 +981,7 @@ void mlx5_core_eq_free_irqs(struct mlx5_core_dev *dev) mutex_lock(&table->lock); /* sync with create/destroy_async_eq */ if (!mlx5_core_is_sf(dev)) clear_rmap(dev); - mlx5_irq_table_destroy(dev); + mlx5_irq_table_free_irqs(dev); mutex_unlock(&table->lock); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h index abd024173c42..8cf40a3658d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_irq.h @@ -16,6 +16,7 @@ int mlx5_irq_table_init(struct mlx5_core_dev *dev); void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev); int mlx5_irq_table_create(struct mlx5_core_dev *dev); void mlx5_irq_table_destroy(struct mlx5_core_dev *dev); +void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev); int mlx5_irq_table_get_num_comp(struct mlx5_irq_table *table); int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table); struct mlx5_irq_table *mlx5_irq_table_get(struct mlx5_core_dev *dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c index 11f3649fdaab..df16dc35bb04 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pci_irq.c @@ -550,6 +550,24 @@ static void irq_pools_destroy(struct mlx5_irq_table *table) irq_pool_free(table->pf_pool); } +static void mlx5_irq_pool_free_irqs(struct mlx5_irq_pool *pool) +{ + struct mlx5_irq *irq; + unsigned long index; + + xa_for_each(&pool->irqs, index, irq) + free_irq(irq->irqn, &irq->nh); +} + +static void mlx5_irq_pools_free_irqs(struct mlx5_irq_table *table) +{ + if (table->sf_ctrl_pool) { + mlx5_irq_pool_free_irqs(table->sf_comp_pool); + mlx5_irq_pool_free_irqs(table->sf_ctrl_pool); + } + mlx5_irq_pool_free_irqs(table->pf_pool); +} + /* irq_table API */ int mlx5_irq_table_init(struct mlx5_core_dev *dev) @@ -630,6 +648,17 @@ void mlx5_irq_table_destroy(struct mlx5_core_dev *dev) pci_free_irq_vectors(dev->pdev); } +void mlx5_irq_table_free_irqs(struct mlx5_core_dev *dev) +{ + struct mlx5_irq_table *table = dev->priv.irq_table; + + if (mlx5_core_is_sf(dev)) + return; + + mlx5_irq_pools_free_irqs(table); + pci_free_irq_vectors(dev->pdev); +} + int mlx5_irq_table_get_sfs_vec(struct mlx5_irq_table *table) { if (table->sf_comp_pool) From 073699df4a09033d58ca20e63479c4a14891bc7e Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 2 Aug 2023 17:02:22 +0000 Subject: [PATCH 196/513] arm64: errata: Add workaround for TSB flush failures commit fa82d0b4b833790ac4572377fb777dcea24a9d69 upstream Arm Neoverse-N2 (#2067961) and Cortex-A710 (#2054223) suffers from errata, where a TSB (trace synchronization barrier) fails to flush the trace data completely, when executed from a trace prohibited region. In Linux we always execute it after we have moved the PE to trace prohibited region. So, we can apply the workaround every time a TSB is executed. The work around is to issue two TSB consecutively. NOTE: This errata is defined as LOCAL_CPU_ERRATUM, implying that a late CPU could be blocked from booting if it is the first CPU that requires the workaround. This is because we do not allow setting a cpu_hwcaps after the SMP boot. The other alternative is to use "this_cpu_has_cap()" instead of the faster system wide check, which may be a bit of an overhead, given we may have to do this in nvhe KVM host before a guest entry. Cc: Will Deacon Cc: Catalin Marinas Cc: Mathieu Poirier Cc: Mike Leach Cc: Mark Rutland Cc: Anshuman Khandual Cc: Marc Zyngier Acked-by: Catalin Marinas Reviewed-by: Mathieu Poirier Reviewed-by: Anshuman Khandual Signed-off-by: Suzuki K Poulose Link: https://lore.kernel.org/r/20211019163153.3692640-4-suzuki.poulose@arm.com Signed-off-by: Will Deacon Signed-off-by: Easwar Hariharan Signed-off-by: Greg Kroah-Hartman --- Documentation/arm64/silicon-errata.rst | 4 ++++ arch/arm64/Kconfig | 33 ++++++++++++++++++++++++++ arch/arm64/include/asm/barrier.h | 16 ++++++++++++- arch/arm64/kernel/cpu_errata.c | 19 +++++++++++++++ arch/arm64/tools/cpucaps | 1 + 5 files changed, 72 insertions(+), 1 deletion(-) diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 076861b0f5ac..1de575fc135b 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -104,6 +104,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A710 | #2119858 | ARM64_ERRATUM_2119858 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1349291 | N/A | @@ -112,6 +114,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N2 | #2139208 | ARM64_ERRATUM_2139208 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-500 | #841119,826419 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index e5e35470647b..6dce6e56ee53 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -774,6 +774,39 @@ config ARM64_ERRATUM_2139208 If unsure, say Y. +config ARM64_WORKAROUND_TSB_FLUSH_FAILURE + bool + +config ARM64_ERRATUM_2054223 + bool "Cortex-A710: 2054223: workaround TSB instruction failing to flush trace" + default y + select ARM64_WORKAROUND_TSB_FLUSH_FAILURE + help + Enable workaround for ARM Cortex-A710 erratum 2054223 + + Affected cores may fail to flush the trace data on a TSB instruction, when + the PE is in trace prohibited state. This will cause losing a few bytes + of the trace cached. + + Workaround is to issue two TSB consecutively on affected cores. + + If unsure, say Y. + +config ARM64_ERRATUM_2067961 + bool "Neoverse-N2: 2067961: workaround TSB instruction failing to flush trace" + default y + select ARM64_WORKAROUND_TSB_FLUSH_FAILURE + help + Enable workaround for ARM Neoverse-N2 erratum 2067961 + + Affected cores may fail to flush the trace data on a TSB instruction, when + the PE is in trace prohibited state. This will cause losing a few bytes + of the trace cached. + + Workaround is to issue two TSB consecutively on affected cores. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h index 451e11e5fd23..1c5a00598458 100644 --- a/arch/arm64/include/asm/barrier.h +++ b/arch/arm64/include/asm/barrier.h @@ -23,7 +23,7 @@ #define dsb(opt) asm volatile("dsb " #opt : : : "memory") #define psb_csync() asm volatile("hint #17" : : : "memory") -#define tsb_csync() asm volatile("hint #18" : : : "memory") +#define __tsb_csync() asm volatile("hint #18" : : : "memory") #define csdb() asm volatile("hint #20" : : : "memory") #ifdef CONFIG_ARM64_PSEUDO_NMI @@ -46,6 +46,20 @@ #define dma_rmb() dmb(oshld) #define dma_wmb() dmb(oshst) + +#define tsb_csync() \ + do { \ + /* \ + * CPUs affected by Arm Erratum 2054223 or 2067961 needs \ + * another TSB to ensure the trace is flushed. The barriers \ + * don't have to be strictly back to back, as long as the \ + * CPU is in trace prohibited state. \ + */ \ + if (cpus_have_final_cap(ARM64_WORKAROUND_TSB_FLUSH_FAILURE)) \ + __tsb_csync(); \ + __tsb_csync(); \ + } while (0) + /* * Generate a mask for array_index__nospec() that is ~0UL when 0 <= idx < sz * and 0 otherwise. diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index d810d4b7b438..ab412b45732f 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -375,6 +375,18 @@ static const struct midr_range trbe_overwrite_fill_mode_cpus[] = { }; #endif /* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */ +#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE +static const struct midr_range tsb_flush_fail_cpus[] = { +#ifdef CONFIG_ARM64_ERRATUM_2067961 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), +#endif +#ifdef CONFIG_ARM64_ERRATUM_2054223 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), +#endif + {}, +}; +#endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */ + const struct arm64_cpu_capabilities arm64_errata[] = { #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE { @@ -606,6 +618,13 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus), }, +#endif +#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE + { + .desc = "ARM erratum 2067961 or 2054223", + .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE, + ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus), + }, #endif { } diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 32fe50a3a26c..36ab307c69d4 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -57,6 +57,7 @@ WORKAROUND_1542419 WORKAROUND_1742098 WORKAROUND_2457168 WORKAROUND_TRBE_OVERWRITE_FILL_MODE +WORKAROUND_TSB_FLUSH_FAILURE WORKAROUND_CAVIUM_23154 WORKAROUND_CAVIUM_27456 WORKAROUND_CAVIUM_30115 From a850fa85d47790d132c0a6af3e6b041600dd4b69 Mon Sep 17 00:00:00 2001 From: Suzuki K Poulose Date: Wed, 2 Aug 2023 17:02:23 +0000 Subject: [PATCH 197/513] arm64: errata: Add detection for TRBE write to out-of-range commit 8d81b2a38ddfc4b03662d2359765648c8b4cc73c upstream Arm Neoverse-N2 and Cortex-A710 cores are affected by an erratum where the trbe, under some circumstances, might write upto 64bytes to an address after the Limit as programmed by the TRBLIMITR_EL1.LIMIT. This might - - Corrupt a page in the ring buffer, which may corrupt trace from a previous session, consumed by userspace. - Hit the guard page at the end of the vmalloc area and raise a fault. To keep the handling simpler, we always leave the last page from the range, which TRBE is allowed to write. This can be achieved by ensuring that we always have more than a PAGE worth space in the range, while calculating the LIMIT for TRBE. And then the LIMIT pointer can be adjusted to leave the PAGE (TRBLIMITR.LIMIT -= PAGE_SIZE), out of the TRBE range while enabling it. This makes sure that the TRBE will only write to an area within its allowed limit (i.e, [head-head+size]) and we do not have to handle address faults within the driver. Cc: Anshuman Khandual Cc: Mathieu Poirier Cc: Mike Leach Cc: Leo Yan Cc: Will Deacon Cc: Mark Rutland Reviewed-by: Anshuman Khandual Reviewed-by: Mathieu Poirier Acked-by: Catalin Marinas Signed-off-by: Suzuki K Poulose Link: https://lore.kernel.org/r/20211019163153.3692640-5-suzuki.poulose@arm.com Signed-off-by: Will Deacon Signed-off-by: Easwar Hariharan Signed-off-by: Greg Kroah-Hartman --- Documentation/arm64/silicon-errata.rst | 4 +++ arch/arm64/Kconfig | 41 ++++++++++++++++++++++++++ arch/arm64/kernel/cpu_errata.c | 20 +++++++++++++ arch/arm64/tools/cpucaps | 1 + 4 files changed, 66 insertions(+) diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 1de575fc135b..f64354f8a79f 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -106,6 +106,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Cortex-A710 | #2054223 | ARM64_ERRATUM_2054223 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Cortex-A710 | #2224489 | ARM64_ERRATUM_2224489 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1188873,1418040| ARM64_ERRATUM_1418040 | +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N1 | #1349291 | N/A | @@ -116,6 +118,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | Neoverse-N2 | #2067961 | ARM64_ERRATUM_2067961 | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | Neoverse-N2 | #2253138 | ARM64_ERRATUM_2253138 | ++----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-500 | #841119,826419 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 6dce6e56ee53..5ab4b0520eab 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -807,6 +807,47 @@ config ARM64_ERRATUM_2067961 If unsure, say Y. +config ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE + bool + +config ARM64_ERRATUM_2253138 + bool "Neoverse-N2: 2253138: workaround TRBE writing to address out-of-range" + depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in + depends on CORESIGHT_TRBE + default y + select ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE + help + This option adds the workaround for ARM Neoverse-N2 erratum 2253138. + + Affected Neoverse-N2 cores might write to an out-of-range address, not reserved + for TRBE. Under some conditions, the TRBE might generate a write to the next + virtually addressed page following the last page of the TRBE address space + (i.e., the TRBLIMITR_EL1.LIMIT), instead of wrapping around to the base. + + Work around this in the driver by always making sure that there is a + page beyond the TRBLIMITR_EL1.LIMIT, within the space allowed for the TRBE. + + If unsure, say Y. + +config ARM64_ERRATUM_2224489 + bool "Cortex-A710: 2224489: workaround TRBE writing to address out-of-range" + depends on COMPILE_TEST # Until the CoreSight TRBE driver changes are in + depends on CORESIGHT_TRBE + default y + select ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE + help + This option adds the workaround for ARM Cortex-A710 erratum 2224489. + + Affected Cortex-A710 cores might write to an out-of-range address, not reserved + for TRBE. Under some conditions, the TRBE might generate a write to the next + virtually addressed page following the last page of the TRBE address space + (i.e., the TRBLIMITR_EL1.LIMIT), instead of wrapping around to the base. + + Work around this in the driver by always making sure that there is a + page beyond the TRBLIMITR_EL1.LIMIT, within the space allowed for the TRBE. + + If unsure, say Y. + config CAVIUM_ERRATUM_22375 bool "Cavium erratum 22375, 24313" default y diff --git a/arch/arm64/kernel/cpu_errata.c b/arch/arm64/kernel/cpu_errata.c index ab412b45732f..bf69a20bc27f 100644 --- a/arch/arm64/kernel/cpu_errata.c +++ b/arch/arm64/kernel/cpu_errata.c @@ -387,6 +387,18 @@ static const struct midr_range tsb_flush_fail_cpus[] = { }; #endif /* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */ +#ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE +static struct midr_range trbe_write_out_of_range_cpus[] = { +#ifdef CONFIG_ARM64_ERRATUM_2253138 + MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2), +#endif +#ifdef CONFIG_ARM64_ERRATUM_2224489 + MIDR_ALL_VERSIONS(MIDR_CORTEX_A710), +#endif + {}, +}; +#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */ + const struct arm64_cpu_capabilities arm64_errata[] = { #ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE { @@ -625,6 +637,14 @@ const struct arm64_cpu_capabilities arm64_errata[] = { .capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE, ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus), }, +#endif +#ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE + { + .desc = "ARM erratum 2253138 or 2224489", + .capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE, + .type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE, + CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus), + }, #endif { } diff --git a/arch/arm64/tools/cpucaps b/arch/arm64/tools/cpucaps index 36ab307c69d4..fcaeec5a5125 100644 --- a/arch/arm64/tools/cpucaps +++ b/arch/arm64/tools/cpucaps @@ -58,6 +58,7 @@ WORKAROUND_1742098 WORKAROUND_2457168 WORKAROUND_TRBE_OVERWRITE_FILL_MODE WORKAROUND_TSB_FLUSH_FAILURE +WORKAROUND_TRBE_WRITE_OUT_OF_RANGE WORKAROUND_CAVIUM_23154 WORKAROUND_CAVIUM_27456 WORKAROUND_CAVIUM_30115 From 2de9f3dcfe63ce61372f0b626548bc6e7c0a27eb Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 2 Aug 2023 17:02:24 +0000 Subject: [PATCH 198/513] iommu/arm-smmu-v3: Work around MMU-600 erratum 1076982 commit f322e8af35c7f23a8c08b595c38d6c855b2d836f upstream MMU-600 versions prior to r1p0 fail to correctly generate a WFE wakeup event when the command queue transitions fom full to non-full. We can easily work around this by simply hiding the SEV capability such that we fall back to polling for space in the queue - since MMU-600 implements MSIs we wouldn't expect to need SEV for sync completion either, so this should have little to no impact. Signed-off-by: Robin Murphy Reviewed-by: Nicolin Chen Tested-by: Nicolin Chen Link: https://lore.kernel.org/r/08adbe3d01024d8382a478325f73b56851f76e49.1683731256.git.robin.murphy@arm.com Signed-off-by: Will Deacon Signed-off-by: Easwar Hariharan Signed-off-by: Greg Kroah-Hartman --- Documentation/arm64/silicon-errata.rst | 2 ++ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 29 +++++++++++++++++++++ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 6 +++++ 3 files changed, 37 insertions(+) diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index f64354f8a79f..55e1e074dec1 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -122,6 +122,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-500 | #841119,826419 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | MMU-600 | #1076982 | N/A | ++----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ | Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 | +----------------+-----------------+-----------------+-----------------------------+ diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index bcdb2cbdda97..782d040a829c 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3459,6 +3459,33 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) return 0; } +#define IIDR_IMPLEMENTER_ARM 0x43b +#define IIDR_PRODUCTID_ARM_MMU_600 0x483 + +static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu) +{ + u32 reg; + unsigned int implementer, productid, variant, revision; + + reg = readl_relaxed(smmu->base + ARM_SMMU_IIDR); + implementer = FIELD_GET(IIDR_IMPLEMENTER, reg); + productid = FIELD_GET(IIDR_PRODUCTID, reg); + variant = FIELD_GET(IIDR_VARIANT, reg); + revision = FIELD_GET(IIDR_REVISION, reg); + + switch (implementer) { + case IIDR_IMPLEMENTER_ARM: + switch (productid) { + case IIDR_PRODUCTID_ARM_MMU_600: + /* Arm erratum 1076982 */ + if (variant == 0 && revision <= 2) + smmu->features &= ~ARM_SMMU_FEAT_SEV; + break; + } + break; + } +} + static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) { u32 reg; @@ -3664,6 +3691,8 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) smmu->ias = max(smmu->ias, smmu->oas); + arm_smmu_device_iidr_probe(smmu); + if (arm_smmu_sva_supported(smmu)) smmu->features |= ARM_SMMU_FEAT_SVA; diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 4cb136f07914..5964e02c4e57 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -69,6 +69,12 @@ #define IDR5_VAX GENMASK(11, 10) #define IDR5_VAX_52_BIT 1 +#define ARM_SMMU_IIDR 0x18 +#define IIDR_PRODUCTID GENMASK(31, 20) +#define IIDR_VARIANT GENMASK(19, 16) +#define IIDR_REVISION GENMASK(15, 12) +#define IIDR_IMPLEMENTER GENMASK(11, 0) + #define ARM_SMMU_CR0 0x20 #define CR0_ATSCHK (1 << 4) #define CR0_CMDQEN (1 << 3) From fd86b59442155b82b86d4997e463a0cd5d7ba7c9 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 2 Aug 2023 17:02:25 +0000 Subject: [PATCH 199/513] iommu/arm-smmu-v3: Document MMU-700 erratum 2812531 commit 309a15cb16bb075da1c99d46fb457db6a1a2669e upstream To work around MMU-700 erratum 2812531 we need to ensure that certain sequences of commands cannot be issued without an intervening sync. In practice this falls out of our current command-batching machinery anyway - each batch only contains a single type of invalidation command, and ends with a sync. The only exception is when a batch is sufficiently large to need issuing across multiple command queue slots, wherein the earlier slots will not contain a sync and thus may in theory interleave with another batch being issued in parallel to create an affected sequence across the slot boundary. Since MMU-700 supports range invalidate commands and thus we will prefer to use them (which also happens to avoid conditions for other errata), I'm not entirely sure it's even possible for a single high-level invalidate call to generate a batch of more than 63 commands, but for the sake of robustness and documentation, wire up an option to enforce that a sync is always inserted for every slot issued. The other aspect is that the relative order of DVM commands cannot be controlled, so DVM cannot be used. Again that is already the status quo, but since we have at least defined ARM_SMMU_FEAT_BTM, we can explicitly disable it for documentation purposes even if it's not wired up anywhere yet. Signed-off-by: Robin Murphy Reviewed-by: Nicolin Chen Link: https://lore.kernel.org/r/330221cdfd0003cd51b6c04e7ff3566741ad8374.1683731256.git.robin.murphy@arm.com Signed-off-by: Will Deacon Signed-off-by: Easwar Hariharan Signed-off-by: Greg Kroah-Hartman --- Documentation/arm64/silicon-errata.rst | 2 ++ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 12 ++++++++++++ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 1 + 3 files changed, 15 insertions(+) diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 55e1e074dec1..322df8abbc0e 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -124,6 +124,8 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-600 | #1076982 | N/A | +----------------+-----------------+-----------------+-----------------------------+ +| ARM | MMU-700 | #2812531 | N/A | ++----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ | Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 | +----------------+-----------------+-----------------+-----------------------------+ diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 782d040a829c..6a551a48d271 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -897,6 +897,12 @@ static void arm_smmu_cmdq_batch_add(struct arm_smmu_device *smmu, struct arm_smmu_cmdq_batch *cmds, struct arm_smmu_cmdq_ent *cmd) { + if (cmds->num == CMDQ_BATCH_ENTRIES - 1 && + (smmu->options & ARM_SMMU_OPT_CMDQ_FORCE_SYNC)) { + arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, true); + cmds->num = 0; + } + if (cmds->num == CMDQ_BATCH_ENTRIES) { arm_smmu_cmdq_issue_cmdlist(smmu, cmds->cmds, cmds->num, false); cmds->num = 0; @@ -3461,6 +3467,7 @@ static int arm_smmu_device_reset(struct arm_smmu_device *smmu, bool bypass) #define IIDR_IMPLEMENTER_ARM 0x43b #define IIDR_PRODUCTID_ARM_MMU_600 0x483 +#define IIDR_PRODUCTID_ARM_MMU_700 0x487 static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu) { @@ -3481,6 +3488,11 @@ static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu) if (variant == 0 && revision <= 2) smmu->features &= ~ARM_SMMU_FEAT_SEV; break; + case IIDR_PRODUCTID_ARM_MMU_700: + /* Arm erratum 2812531 */ + smmu->features &= ~ARM_SMMU_FEAT_BTM; + smmu->options |= ARM_SMMU_OPT_CMDQ_FORCE_SYNC; + break; } break; } diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index 5964e02c4e57..abaecdf8d5d2 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -651,6 +651,7 @@ struct arm_smmu_device { #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) #define ARM_SMMU_OPT_PAGE0_REGS_ONLY (1 << 1) #define ARM_SMMU_OPT_MSIPOLL (1 << 2) +#define ARM_SMMU_OPT_CMDQ_FORCE_SYNC (1 << 3) u32 options; struct arm_smmu_cmdq cmdq; From 744e6b80b83020d5e7de84bd8b6ab4d8ec28025d Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 2 Aug 2023 17:02:26 +0000 Subject: [PATCH 200/513] iommu/arm-smmu-v3: Add explicit feature for nesting commit 1d9777b9f3d55b4b6faf186ba4f1d6fb560c0523 upstream In certain cases we may want to refuse to allow nested translation even when both stages are implemented, so let's add an explicit feature for nesting support which we can control in its own right. For now this merely serves as documentation, but it means a nice convenient check will be ready and waiting for the future nesting code. Signed-off-by: Robin Murphy Reviewed-by: Nicolin Chen Link: https://lore.kernel.org/r/136c3f4a3a84cc14a5a1978ace57dfd3ed67b688.1683731256.git.robin.murphy@arm.com Signed-off-by: Will Deacon Signed-off-by: Easwar Hariharan Signed-off-by: Greg Kroah-Hartman --- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 4 ++++ drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h | 1 + 2 files changed, 5 insertions(+) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 6a551a48d271..7cec4a457d91 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3703,6 +3703,10 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) smmu->ias = max(smmu->ias, smmu->oas); + if ((smmu->features & ARM_SMMU_FEAT_TRANS_S1) && + (smmu->features & ARM_SMMU_FEAT_TRANS_S2)) + smmu->features |= ARM_SMMU_FEAT_NESTING; + arm_smmu_device_iidr_probe(smmu); if (arm_smmu_sva_supported(smmu)) diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h index abaecdf8d5d2..c594a9b46999 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h @@ -646,6 +646,7 @@ struct arm_smmu_device { #define ARM_SMMU_FEAT_BTM (1 << 16) #define ARM_SMMU_FEAT_SVA (1 << 17) #define ARM_SMMU_FEAT_E2H (1 << 18) +#define ARM_SMMU_FEAT_NESTING (1 << 19) u32 features; #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) From 804e72062be4e3e39f823be3f4d5a2850a8c2959 Mon Sep 17 00:00:00 2001 From: Robin Murphy Date: Wed, 2 Aug 2023 17:02:27 +0000 Subject: [PATCH 201/513] iommu/arm-smmu-v3: Document nesting-related errata commit 0bfbfc526c70606bf0fad302e4821087cbecfaf4 upstream Both MMU-600 and MMU-700 have similar errata around TLB invalidation while both stages of translation are active, which will need some consideration once nesting support is implemented. For now, though, it's very easy to make our implicit lack of nesting support explicit for those cases, so they're less likely to be missed in future. Signed-off-by: Robin Murphy Reviewed-by: Nicolin Chen Link: https://lore.kernel.org/r/696da78d32bb4491f898f11b0bb4d850a8aa7c6a.1683731256.git.robin.murphy@arm.com Signed-off-by: Will Deacon Signed-off-by: Easwar Hariharan Signed-off-by: Greg Kroah-Hartman --- Documentation/arm64/silicon-errata.rst | 4 ++-- drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c | 5 +++++ 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/Documentation/arm64/silicon-errata.rst b/Documentation/arm64/silicon-errata.rst index 322df8abbc0e..83a75e16e54d 100644 --- a/Documentation/arm64/silicon-errata.rst +++ b/Documentation/arm64/silicon-errata.rst @@ -122,9 +122,9 @@ stable kernels. +----------------+-----------------+-----------------+-----------------------------+ | ARM | MMU-500 | #841119,826419 | N/A | +----------------+-----------------+-----------------+-----------------------------+ -| ARM | MMU-600 | #1076982 | N/A | +| ARM | MMU-600 | #1076982,1209401| N/A | +----------------+-----------------+-----------------+-----------------------------+ -| ARM | MMU-700 | #2812531 | N/A | +| ARM | MMU-700 | #2268618,2812531| N/A | +----------------+-----------------+-----------------+-----------------------------+ +----------------+-----------------+-----------------+-----------------------------+ | Broadcom | Brahma-B53 | N/A | ARM64_ERRATUM_845719 | diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c index 7cec4a457d91..340ef116d574 100644 --- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c +++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c @@ -3487,11 +3487,16 @@ static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu) /* Arm erratum 1076982 */ if (variant == 0 && revision <= 2) smmu->features &= ~ARM_SMMU_FEAT_SEV; + /* Arm erratum 1209401 */ + if (variant < 2) + smmu->features &= ~ARM_SMMU_FEAT_NESTING; break; case IIDR_PRODUCTID_ARM_MMU_700: /* Arm erratum 2812531 */ smmu->features &= ~ARM_SMMU_FEAT_BTM; smmu->options |= ARM_SMMU_OPT_CMDQ_FORCE_SYNC; + /* Arm errata 2268618, 2812531 */ + smmu->features &= ~ARM_SMMU_FEAT_NESTING; break; } break; From b7880809d75daf419e9faadc42525b85e2ed01cf Mon Sep 17 00:00:00 2001 From: Hugo Villeneuve Date: Tue, 4 Jul 2023 09:48:00 -0400 Subject: [PATCH 202/513] arm64: dts: imx8mn-var-som: add missing pull-up for onboard PHY reset pinmux [ Upstream commit 253be5b53c2792fb4384f8005b05421e6f040ee3 ] For SOMs with an onboard PHY, the RESET_N pull-up resistor is currently deactivated in the pinmux configuration. When the pinmux code selects the GPIO function for this pin, with a default direction of input, this prevents the RESET_N pin from being taken to the proper 3.3V level (deasserted), and this results in the PHY being not detected since it is held in reset. Taken from RESET_N pin description in ADIN13000 datasheet: This pin requires a 1K pull-up resistor to AVDD_3P3. Activate the pull-up resistor to fix the issue. Fixes: ade0176dd8a0 ("arm64: dts: imx8mn-var-som: Add Variscite VAR-SOM-MX8MN System on Module") Signed-off-by: Hugo Villeneuve Reviewed-by: Fabio Estevam Signed-off-by: Shawn Guo Signed-off-by: Sasha Levin --- arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi index d053ef302fb8..faafefe562e4 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mn-var-som.dtsi @@ -351,7 +351,7 @@ MX8MN_IOMUXC_ENET_RXC_ENET1_RGMII_RXC 0x91 MX8MN_IOMUXC_ENET_RX_CTL_ENET1_RGMII_RX_CTL 0x91 MX8MN_IOMUXC_ENET_TX_CTL_ENET1_RGMII_TX_CTL 0x1f - MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x19 + MX8MN_IOMUXC_GPIO1_IO09_GPIO1_IO9 0x159 >; }; From 809edb4262f035fd5c695ba34c89236245b53fca Mon Sep 17 00:00:00 2001 From: "ndesaulniers@google.com" Date: Tue, 1 Aug 2023 15:22:17 -0700 Subject: [PATCH 203/513] word-at-a-time: use the same return type for has_zero regardless of endianness [ Upstream commit 79e8328e5acbe691bbde029a52c89d70dcbc22f3 ] Compiling big-endian targets with Clang produces the diagnostic: fs/namei.c:2173:13: warning: use of bitwise '|' with boolean operands [-Wbitwise-instead-of-logical] } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants))); ~^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ || fs/namei.c:2173:13: note: cast one or both operands to int to silence this warning It appears that when has_zero was introduced, two definitions were produced with different signatures (in particular different return types). Looking at the usage in hash_name() in fs/namei.c, I suspect that has_zero() is meant to be invoked twice per while loop iteration; using logical-or would not update `bdata` when `a` did not have zeros. So I think it's preferred to always return an unsigned long rather than a bool than update the while loop in hash_name() to use a logical-or rather than bitwise-or. [ Also changed powerpc version to do the same - Linus ] Link: https://github.com/ClangBuiltLinux/linux/issues/1832 Link: https://lore.kernel.org/lkml/20230801-bitwise-v1-1-799bec468dc4@google.com/ Fixes: 36126f8f2ed8 ("word-at-a-time: make the interfaces truly generic") Debugged-by: Nathan Chancellor Signed-off-by: Nick Desaulniers Acked-by: Heiko Carstens Cc: Arnd Bergmann Signed-off-by: Linus Torvalds Signed-off-by: Sasha Levin --- arch/powerpc/include/asm/word-at-a-time.h | 2 +- include/asm-generic/word-at-a-time.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index f3f4710d4ff5..99129b0cd8b8 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h @@ -34,7 +34,7 @@ static inline long find_zero(unsigned long mask) return leading_zero_bits >> 3; } -static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) +static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) { unsigned long rhs = val | c->low_bits; *data = rhs; diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h index 20c93f08c993..95a1d214108a 100644 --- a/include/asm-generic/word-at-a-time.h +++ b/include/asm-generic/word-at-a-time.h @@ -38,7 +38,7 @@ static inline long find_zero(unsigned long mask) return (mask >> 8) ? byte : byte + 1; } -static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) +static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c) { unsigned long rhs = val | c->low_bits; *data = rhs; From 8e72db3ffa5d5afeb0077277052f529ebcdfa470 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Thu, 27 Jul 2023 20:29:39 +0200 Subject: [PATCH 204/513] KVM: s390: fix sthyi error handling [ Upstream commit 0c02cc576eac161601927b41634f80bfd55bfa9e ] Commit 9fb6c9b3fea1 ("s390/sthyi: add cache to store hypervisor info") added cache handling for store hypervisor info. This also changed the possible return code for sthyi_fill(). Instead of only returning a condition code like the sthyi instruction would do, it can now also return a negative error value (-ENOMEM). handle_styhi() was not changed accordingly. In case of an error, the negative error value would incorrectly injected into the guest PSW. Add proper error handling to prevent this, and update the comment which describes the possible return values of sthyi_fill(). Fixes: 9fb6c9b3fea1 ("s390/sthyi: add cache to store hypervisor info") Reviewed-by: Christian Borntraeger Link: https://lore.kernel.org/r/20230727182939.2050744-1-hca@linux.ibm.com Signed-off-by: Heiko Carstens Signed-off-by: Sasha Levin --- arch/s390/kernel/sthyi.c | 6 +++--- arch/s390/kvm/intercept.c | 9 ++++++--- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/arch/s390/kernel/sthyi.c b/arch/s390/kernel/sthyi.c index 4d141e2c132e..2ea7f208f0e7 100644 --- a/arch/s390/kernel/sthyi.c +++ b/arch/s390/kernel/sthyi.c @@ -459,9 +459,9 @@ static int sthyi_update_cache(u64 *rc) * * Fills the destination with system information returned by the STHYI * instruction. The data is generated by emulation or execution of STHYI, - * if available. The return value is the condition code that would be - * returned, the rc parameter is the return code which is passed in - * register R2 + 1. + * if available. The return value is either a negative error value or + * the condition code that would be returned, the rc parameter is the + * return code which is passed in register R2 + 1. */ int sthyi_fill(void *dst, u64 *rc) { diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c index aeb0e0865e89..458b42b50b8c 100644 --- a/arch/s390/kvm/intercept.c +++ b/arch/s390/kvm/intercept.c @@ -389,8 +389,8 @@ static int handle_partial_execution(struct kvm_vcpu *vcpu) */ int handle_sthyi(struct kvm_vcpu *vcpu) { - int reg1, reg2, r = 0; - u64 code, addr, cc = 0, rc = 0; + int reg1, reg2, cc = 0, r = 0; + u64 code, addr, rc = 0; struct sthyi_sctns *sctns = NULL; if (!test_kvm_facility(vcpu->kvm, 74)) @@ -421,7 +421,10 @@ int handle_sthyi(struct kvm_vcpu *vcpu) return -ENOMEM; cc = sthyi_fill(sctns, &rc); - + if (cc < 0) { + free_page((unsigned long)sctns); + return cc; + } out: if (!cc) { if (kvm_s390_pv_cpu_is_protected(vcpu)) { From 4c224ea31bedb379485ddbd7e8062384f9a02262 Mon Sep 17 00:00:00 2001 From: Ilan Peer Date: Sun, 23 Jul 2023 23:10:43 +0300 Subject: [PATCH 205/513] wifi: cfg80211: Fix return value in scan logic [ Upstream commit fd7f08d92fcd7cc3eca0dd6c853f722a4c6176df ] The reporter noticed a warning when running iwlwifi: WARNING: CPU: 8 PID: 659 at mm/page_alloc.c:4453 __alloc_pages+0x329/0x340 As cfg80211_parse_colocated_ap() is not expected to return a negative value return 0 and not a negative value if cfg80211_calc_short_ssid() fails. Fixes: c8cb5b854b40f ("nl80211/cfg80211: support 6 GHz scanning") Closes: https://bugzilla.kernel.org/show_bug.cgi?id=217675 Signed-off-by: Ilan Peer Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20230723201043.3007430-1-ilan.peer@intel.com Signed-off-by: Sasha Levin --- net/wireless/scan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/wireless/scan.c b/net/wireless/scan.c index a565476809f0..c7192d7bcbd7 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -641,7 +641,7 @@ static int cfg80211_parse_colocated_ap(const struct cfg80211_bss_ies *ies, ret = cfg80211_calc_short_ssid(ies, &ssid_elem, &s_ssid_tmp); if (ret) - return ret; + return 0; /* RNR IE may contain more than one NEIGHBOR_AP_INFO */ while (pos + sizeof(*ap_info) <= end) { From 00cecb0a8f9e7a21754d5ad85813ab6b47b3308f Mon Sep 17 00:00:00 2001 From: Zhengchao Shao Date: Wed, 5 Jul 2023 20:15:27 +0800 Subject: [PATCH 206/513] net/mlx5: DR, fix memory leak in mlx5dr_cmd_create_reformat_ctx [ Upstream commit 5dd77585dd9d0e03dd1bceb95f0269a7eaf6b936 ] when mlx5_cmd_exec failed in mlx5dr_cmd_create_reformat_ctx, the memory pointed by 'in' is not released, which will cause memory leak. Move memory release after mlx5_cmd_exec. Fixes: 1d9186476e12 ("net/mlx5: DR, Add direct rule command utilities") Signed-off-by: Zhengchao Shao Reviewed-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: Sasha Levin --- drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c index fcf705ce421f..aa003a75946b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_cmd.c @@ -528,11 +528,12 @@ int mlx5dr_cmd_create_reformat_ctx(struct mlx5_core_dev *mdev, err = mlx5_cmd_exec(mdev, in, inlen, out, sizeof(out)); if (err) - return err; + goto err_free_in; *reformat_id = MLX5_GET(alloc_packet_reformat_context_out, out, packet_reformat_id); - kvfree(in); +err_free_in: + kvfree(in); return err; } From 8f9a04c742e1f091a9fb34c1f5fcef4bb0464fbb Mon Sep 17 00:00:00 2001 From: Yuanjun Gong Date: Tue, 25 Jul 2023 14:56:55 +0800 Subject: [PATCH 207/513] net/mlx5e: fix return value check in mlx5e_ipsec_remove_trailer() [ Upstream commit e5bcb7564d3bd0c88613c76963c5349be9c511c5 ] mlx5e_ipsec_remove_trailer() should return an error code if function pskb_trim() returns an unexpected value. Fixes: 2ac9cfe78223 ("net/mlx5e: IPSec, Add Innova IPSec offload TX data path") Signed-off-by: Yuanjun Gong Reviewed-by: Leon Romanovsky Signed-off-by: Saeed Mahameed Signed-off-by: Sasha Levin --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index b56fea142c24..4590d19c25cf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -121,7 +121,9 @@ static int mlx5e_ipsec_remove_trailer(struct sk_buff *skb, struct xfrm_state *x) trailer_len = alen + plen + 2; - pskb_trim(skb, skb->len - trailer_len); + ret = pskb_trim(skb, skb->len - trailer_len); + if (unlikely(ret)) + return ret; if (skb->protocol == htons(ETH_P_IP)) { ipv4hdr->tot_len = htons(ntohs(ipv4hdr->tot_len) - trailer_len); ip_send_check(ipv4hdr); From cc9ebceaa6d0824ac53adb2c569f84b55987f709 Mon Sep 17 00:00:00 2001 From: Lin Ma Date: Tue, 25 Jul 2023 10:33:30 +0800 Subject: [PATCH 208/513] bpf: Add length check for SK_DIAG_BPF_STORAGE_REQ_MAP_FD parsing [ Upstream commit bcc29b7f5af6797702c2306a7aacb831fc5ce9cb ] The nla_for_each_nested parsing in function bpf_sk_storage_diag_alloc does not check the length of the nested attribute. This can lead to an out-of-attribute read and allow a malformed nlattr (e.g., length 0) to be viewed as a 4 byte integer. This patch adds an additional check when the nlattr is getting counted. This makes sure the latter nla_get_u32 can access the attributes with the correct length. Fixes: 1ed4d92458a9 ("bpf: INET_DIAG support in bpf_sk_storage") Suggested-by: Jakub Kicinski Signed-off-by: Lin Ma Reviewed-by: Jakub Kicinski Link: https://lore.kernel.org/r/20230725023330.422856-1-linma@zju.edu.cn Signed-off-by: Martin KaFai Lau Signed-off-by: Sasha Levin --- net/core/bpf_sk_storage.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/core/bpf_sk_storage.c b/net/core/bpf_sk_storage.c index 910ca41cb9e6..4953abee79fe 100644 --- a/net/core/bpf_sk_storage.c +++ b/net/core/bpf_sk_storage.c @@ -521,8 +521,11 @@ bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs) return ERR_PTR(-EPERM); nla_for_each_nested(nla, nla_stgs, rem) { - if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD) + if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD) { + if (nla_len(nla) != sizeof(u32)) + return ERR_PTR(-EINVAL); nr_maps++; + } } diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL); From 047508edd602921ee8bb0f2aa2100aa2e9bedc75 Mon Sep 17 00:00:00 2001 From: Lin Ma Date: Wed, 26 Jul 2023 15:53:14 +0800 Subject: [PATCH 209/513] rtnetlink: let rtnl_bridge_setlink checks IFLA_BRIDGE_MODE length [ Upstream commit d73ef2d69c0dba5f5a1cb9600045c873bab1fb7f ] There are totally 9 ndo_bridge_setlink handlers in the current kernel, which are 1) bnxt_bridge_setlink, 2) be_ndo_bridge_setlink 3) i40e_ndo_bridge_setlink 4) ice_bridge_setlink 5) ixgbe_ndo_bridge_setlink 6) mlx5e_bridge_setlink 7) nfp_net_bridge_setlink 8) qeth_l2_bridge_setlink 9) br_setlink. By investigating the code, we find that 1-7 parse and use nlattr IFLA_BRIDGE_MODE but 3 and 4 forget to do the nla_len check. This can lead to an out-of-attribute read and allow a malformed nlattr (e.g., length 0) to be viewed as a 2 byte integer. To avoid such issues, also for other ndo_bridge_setlink handlers in the future. This patch adds the nla_len check in rtnl_bridge_setlink and does an early error return if length mismatches. To make it works, the break is removed from the parsing for IFLA_BRIDGE_FLAGS to make sure this nla_for_each_nested iterates every attribute. Fixes: b1edc14a3fbf ("ice: Implement ice_bridge_getlink and ice_bridge_setlink") Fixes: 51616018dd1b ("i40e: Add support for getlink, setlink ndo ops") Suggested-by: Jakub Kicinski Signed-off-by: Lin Ma Acked-by: Nikolay Aleksandrov Reviewed-by: Hangbin Liu Link: https://lore.kernel.org/r/20230726075314.1059224-1-linma@zju.edu.cn Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/core/rtnetlink.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 49766446797c..b055e196f530 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -4919,13 +4919,17 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh, br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); if (br_spec) { nla_for_each_nested(attr, br_spec, rem) { - if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { + if (nla_type(attr) == IFLA_BRIDGE_FLAGS && !have_flags) { if (nla_len(attr) < sizeof(flags)) return -EINVAL; have_flags = true; flags = nla_get_u16(attr); - break; + } + + if (nla_type(attr) == IFLA_BRIDGE_MODE) { + if (nla_len(attr) < sizeof(u16)) + return -EINVAL; } } } From 0f6e3d8d7f9107989076b6b12a1bd378c16a4ae6 Mon Sep 17 00:00:00 2001 From: Yuanjun Gong Date: Thu, 27 Jul 2023 01:05:06 +0800 Subject: [PATCH 210/513] net: dsa: fix value check in bcm_sf2_sw_probe() [ Upstream commit dadc5b86cc9459581f37fe755b431adc399ea393 ] in bcm_sf2_sw_probe(), check the return value of clk_prepare_enable() and return the error code if clk_prepare_enable() returns an unexpected value. Fixes: e9ec5c3bd238 ("net: dsa: bcm_sf2: request and handle clocks") Signed-off-by: Yuanjun Gong Reviewed-by: Florian Fainelli Link: https://lore.kernel.org/r/20230726170506.16547-1-ruc_gongyuanjun@163.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/dsa/bcm_sf2.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c index d76b2377d66e..773d751ef169 100644 --- a/drivers/net/dsa/bcm_sf2.c +++ b/drivers/net/dsa/bcm_sf2.c @@ -1422,7 +1422,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); - clk_prepare_enable(priv->clk); + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; priv->clk_mdiv = devm_clk_get_optional(&pdev->dev, "sw_switch_mdiv"); if (IS_ERR(priv->clk_mdiv)) { @@ -1430,7 +1432,9 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev) goto out_clk; } - clk_prepare_enable(priv->clk_mdiv); + ret = clk_prepare_enable(priv->clk_mdiv); + if (ret) + goto out_clk; ret = bcm_sf2_sw_rst(priv); if (ret) { From 675d29de69c7a17807e504f84e5e19031a678d82 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Georg=20M=C3=BCller?= Date: Fri, 28 Jul 2023 17:18:12 +0200 Subject: [PATCH 211/513] perf test uprobe_from_different_cu: Skip if there is no gcc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 98ce8e4a9dcfb448b30a2d7a16190f4a00382377 ] Without gcc, the test will fail. On cleanup, ignore probe removal errors. Otherwise, in case of an error adding the probe, the temporary directory is not removed. Fixes: 56cbeacf14353057 ("perf probe: Add test for regression introduced by switch to die_get_decl_file()") Signed-off-by: Georg Müller Acked-by: Ian Rogers Cc: Adrian Hunter Cc: Alexander Shishkin Cc: Georg Müller Cc: Ingo Molnar Cc: Jiri Olsa Cc: Mark Rutland Cc: Masami Hiramatsu Cc: Namhyung Kim Cc: Peter Zijlstra Link: https://lore.kernel.org/r/20230728151812.454806-2-georgmueller@gmx.net Link: https://lore.kernel.org/r/CAP-5=fUP6UuLgRty3t2=fQsQi3k4hDMz415vWdp1x88QMvZ8ug@mail.gmail.com/ Signed-off-by: Arnaldo Carvalho de Melo Signed-off-by: Sasha Levin --- tools/perf/tests/shell/test_uprobe_from_different_cu.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh index 00d2e0e2e0c2..319f36ebb9a4 100644 --- a/tools/perf/tests/shell/test_uprobe_from_different_cu.sh +++ b/tools/perf/tests/shell/test_uprobe_from_different_cu.sh @@ -4,6 +4,12 @@ set -e +# skip if there's no gcc +if ! [ -x "$(command -v gcc)" ]; then + echo "failed: no gcc compiler" + exit 2 +fi + temp_dir=$(mktemp -d /tmp/perf-uprobe-different-cu-sh.XXXXXXXXXX) cleanup() @@ -11,7 +17,7 @@ cleanup() trap - EXIT TERM INT if [[ "${temp_dir}" =~ ^/tmp/perf-uprobe-different-cu-sh.*$ ]]; then echo "--- Cleaning up ---" - perf probe -x ${temp_dir}/testfile -d foo + perf probe -x ${temp_dir}/testfile -d foo || true rm -f "${temp_dir}/"* rmdir "${temp_dir}" fi From 8dedcc6af341bfa4697bbed4bfaf2e9f4d737c66 Mon Sep 17 00:00:00 2001 From: Jamal Hadi Salim Date: Wed, 26 Jul 2023 09:51:51 -0400 Subject: [PATCH 212/513] net: sched: cls_u32: Fix match key mis-addressing [ Upstream commit e68409db995380d1badacba41ff24996bd396171 ] A match entry is uniquely identified with an "address" or "path" in the form of: hashtable ID(12b):bucketid(8b):nodeid(12b). When creating table match entries all of hash table id, bucket id and node (match entry id) are needed to be either specified by the user or reasonable in-kernel defaults are used. The in-kernel default for a table id is 0x800(omnipresent root table); for bucketid it is 0x0. Prior to this fix there was none for a nodeid i.e. the code assumed that the user passed the correct nodeid and if the user passes a nodeid of 0 (as Mingi Cho did) then that is what was used. But nodeid of 0 is reserved for identifying the table. This is not a problem until we dump. The dump code notices that the nodeid is zero and assumes it is referencing a table and therefore references table struct tc_u_hnode instead of what was created i.e match entry struct tc_u_knode. Ming does an equivalent of: tc filter add dev dummy0 parent 10: prio 1 handle 0x1000 \ protocol ip u32 match ip src 10.0.0.1/32 classid 10:1 action ok Essentially specifying a table id 0, bucketid 1 and nodeid of zero Tableid 0 is remapped to the default of 0x800. Bucketid 1 is ignored and defaults to 0x00. Nodeid was assumed to be what Ming passed - 0x000 dumping before fix shows: ~$ tc filter ls dev dummy0 parent 10: filter protocol ip pref 1 u32 chain 0 filter protocol ip pref 1 u32 chain 0 fh 800: ht divisor 1 filter protocol ip pref 1 u32 chain 0 fh 800: ht divisor -30591 Note that the last line reports a table instead of a match entry (you can tell this because it says "ht divisor..."). As a result of reporting the wrong data type (misinterpretting of struct tc_u_knode as being struct tc_u_hnode) the divisor is reported with value of -30591. Ming identified this as part of the heap address (physmap_base is 0xffff8880 (-30591 - 1)). The fix is to ensure that when table entry matches are added and no nodeid is specified (i.e nodeid == 0) then we get the next available nodeid from the table's pool. After the fix, this is what the dump shows: $ tc filter ls dev dummy0 parent 10: filter protocol ip pref 1 u32 chain 0 filter protocol ip pref 1 u32 chain 0 fh 800: ht divisor 1 filter protocol ip pref 1 u32 chain 0 fh 800::800 order 2048 key ht 800 bkt 0 flowid 10:1 not_in_hw match 0a000001/ffffffff at 12 action order 1: gact action pass random type none pass val 0 index 1 ref 1 bind 1 Reported-by: Mingi Cho Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Jamal Hadi Salim Link: https://lore.kernel.org/r/20230726135151.416917-1-jhs@mojatatu.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/sched/cls_u32.c | 56 ++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 50 insertions(+), 6 deletions(-) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index 0025fa837e85..e4e38aa75111 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -999,18 +999,62 @@ static int u32_change(struct net *net, struct sk_buff *in_skb, return -EINVAL; } + /* At this point, we need to derive the new handle that will be used to + * uniquely map the identity of this table match entry. The + * identity of the entry that we need to construct is 32 bits made of: + * htid(12b):bucketid(8b):node/entryid(12b) + * + * At this point _we have the table(ht)_ in which we will insert this + * entry. We carry the table's id in variable "htid". + * Note that earlier code picked the ht selection either by a) the user + * providing the htid specified via TCA_U32_HASH attribute or b) when + * no such attribute is passed then the root ht, is default to at ID + * 0x[800][00][000]. Rule: the root table has a single bucket with ID 0. + * If OTOH the user passed us the htid, they may also pass a bucketid of + * choice. 0 is fine. For example a user htid is 0x[600][01][000] it is + * indicating hash bucketid of 1. Rule: the entry/node ID _cannot_ be + * passed via the htid, so even if it was non-zero it will be ignored. + * + * We may also have a handle, if the user passed one. The handle also + * carries the same addressing of htid(12b):bucketid(8b):node/entryid(12b). + * Rule: the bucketid on the handle is ignored even if one was passed; + * rather the value on "htid" is always assumed to be the bucketid. + */ if (handle) { + /* Rule: The htid from handle and tableid from htid must match */ if (TC_U32_HTID(handle) && TC_U32_HTID(handle ^ htid)) { NL_SET_ERR_MSG_MOD(extack, "Handle specified hash table address mismatch"); return -EINVAL; } - handle = htid | TC_U32_NODE(handle); - err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, handle, - GFP_KERNEL); - if (err) - return err; - } else + /* Ok, so far we have a valid htid(12b):bucketid(8b) but we + * need to finalize the table entry identification with the last + * part - the node/entryid(12b)). Rule: Nodeid _cannot be 0_ for + * entries. Rule: nodeid of 0 is reserved only for tables(see + * earlier code which processes TC_U32_DIVISOR attribute). + * Rule: The nodeid can only be derived from the handle (and not + * htid). + * Rule: if the handle specified zero for the node id example + * 0x60000000, then pick a new nodeid from the pool of IDs + * this hash table has been allocating from. + * If OTOH it is specified (i.e for example the user passed a + * handle such as 0x60000123), then we use it generate our final + * handle which is used to uniquely identify the match entry. + */ + if (!TC_U32_NODE(handle)) { + handle = gen_new_kid(ht, htid); + } else { + handle = htid | TC_U32_NODE(handle); + err = idr_alloc_u32(&ht->handle_idr, NULL, &handle, + handle, GFP_KERNEL); + if (err) + return err; + } + } else { + /* The user did not give us a handle; lets just generate one + * from the table's pool of nodeids. + */ handle = gen_new_kid(ht, htid); + } if (tb[TCA_U32_SEL] == NULL) { NL_SET_ERR_MSG_MOD(extack, "Selector not specified"); From 6d8c259f482776eb64315acafcce12d59583cd1c Mon Sep 17 00:00:00 2001 From: Chengfeng Ye Date: Thu, 27 Jul 2023 08:56:19 +0000 Subject: [PATCH 213/513] mISDN: hfcpci: Fix potential deadlock on &hc->lock [ Upstream commit 56c6be35fcbed54279df0a2c9e60480a61841d6f ] As &hc->lock is acquired by both timer _hfcpci_softirq() and hardirq hfcpci_int(), the timer should disable irq before lock acquisition otherwise deadlock could happen if the timmer is preemtped by the hadr irq. Possible deadlock scenario: hfcpci_softirq() (timer) -> _hfcpci_softirq() -> spin_lock(&hc->lock); -> hfcpci_int() -> spin_lock(&hc->lock); (deadlock here) This flaw was found by an experimental static analysis tool I am developing for irq-related deadlock. The tentative patch fixes the potential deadlock by spin_lock_irq() in timer. Fixes: b36b654a7e82 ("mISDN: Create /sys/class/mISDN") Signed-off-by: Chengfeng Ye Link: https://lore.kernel.org/r/20230727085619.7419-1-dg573847474@gmail.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/isdn/hardware/mISDN/hfcpci.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/isdn/hardware/mISDN/hfcpci.c b/drivers/isdn/hardware/mISDN/hfcpci.c index eba58b99cd29..d6cf01c32a33 100644 --- a/drivers/isdn/hardware/mISDN/hfcpci.c +++ b/drivers/isdn/hardware/mISDN/hfcpci.c @@ -839,7 +839,7 @@ hfcpci_fill_fifo(struct bchannel *bch) *z1t = cpu_to_le16(new_z1); /* now send data */ if (bch->tx_idx < bch->tx_skb->len) return; - dev_kfree_skb(bch->tx_skb); + dev_kfree_skb_any(bch->tx_skb); if (get_next_bframe(bch)) goto next_t_frame; return; @@ -895,7 +895,7 @@ hfcpci_fill_fifo(struct bchannel *bch) } bz->za[new_f1].z1 = cpu_to_le16(new_z1); /* for next buffer */ bz->f1 = new_f1; /* next frame */ - dev_kfree_skb(bch->tx_skb); + dev_kfree_skb_any(bch->tx_skb); get_next_bframe(bch); } @@ -1119,7 +1119,7 @@ tx_birq(struct bchannel *bch) if (bch->tx_skb && bch->tx_idx < bch->tx_skb->len) hfcpci_fill_fifo(bch); else { - dev_kfree_skb(bch->tx_skb); + dev_kfree_skb_any(bch->tx_skb); if (get_next_bframe(bch)) hfcpci_fill_fifo(bch); } @@ -2277,7 +2277,7 @@ _hfcpci_softirq(struct device *dev, void *unused) return 0; if (hc->hw.int_m2 & HFCPCI_IRQ_ENABLE) { - spin_lock(&hc->lock); + spin_lock_irq(&hc->lock); bch = Sel_BCS(hc, hc->hw.bswapped ? 2 : 1); if (bch && bch->state == ISDN_P_B_RAW) { /* B1 rx&tx */ main_rec_hfcpci(bch); @@ -2288,7 +2288,7 @@ _hfcpci_softirq(struct device *dev, void *unused) main_rec_hfcpci(bch); tx_birq(bch); } - spin_unlock(&hc->lock); + spin_unlock_irq(&hc->lock); } return 0; } From a19952dbb5b667ae3ada56e113c3a4b4cbe119cf Mon Sep 17 00:00:00 2001 From: Prabhakar Kushwaha Date: Mon, 4 Oct 2021 09:58:39 +0300 Subject: [PATCH 214/513] qed: Fix kernel-doc warnings [ Upstream commit 19198e4ec97dc9d173b458a75ace3c3ca55c2d85 ] This patch fixes all the qed and qede kernel-doc warnings according to the guidelines that are described in Documentation/doc-guide/kernel-doc.rst. Signed-off-by: Ariel Elior Signed-off-by: Omkar Kulkarni Signed-off-by: Shai Malin Signed-off-by: Prabhakar Kushwaha Signed-off-by: David S. Miller Stable-dep-of: e346e231b42b ("qed: Fix scheduling in a tasklet while getting stats") Signed-off-by: Sasha Levin --- drivers/net/ethernet/qlogic/qed/qed.h | 9 +- drivers/net/ethernet/qlogic/qed/qed_cxt.h | 138 +-- drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 343 ++++--- drivers/net/ethernet/qlogic/qed/qed_hsi.h | 956 +++++++++--------- drivers/net/ethernet/qlogic/qed/qed_hw.h | 222 ++-- .../net/ethernet/qlogic/qed/qed_init_ops.h | 58 +- drivers/net/ethernet/qlogic/qed/qed_int.h | 284 +++--- drivers/net/ethernet/qlogic/qed/qed_iscsi.h | 9 +- drivers/net/ethernet/qlogic/qed/qed_l2.h | 134 +-- drivers/net/ethernet/qlogic/qed/qed_ll2.h | 130 +-- drivers/net/ethernet/qlogic/qed/qed_mcp.h | 757 +++++++------- .../net/ethernet/qlogic/qed/qed_selftest.h | 30 +- drivers/net/ethernet/qlogic/qed/qed_sp.h | 215 ++-- drivers/net/ethernet/qlogic/qed/qed_sriov.h | 99 +- drivers/net/ethernet/qlogic/qed/qed_vf.h | 301 +++--- drivers/net/ethernet/qlogic/qede/qede_main.c | 5 +- include/linux/qed/qed_chain.h | 97 +- include/linux/qed/qed_if.h | 255 ++--- include/linux/qed/qed_iscsi_if.h | 2 +- include/linux/qed/qed_ll2_if.h | 42 +- include/linux/qed/qed_nvmetcp_if.h | 17 + 21 files changed, 2186 insertions(+), 1917 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h index d58e021614cd..b656408b9d70 100644 --- a/drivers/net/ethernet/qlogic/qed/qed.h +++ b/drivers/net/ethernet/qlogic/qed/qed.h @@ -877,12 +877,13 @@ u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type); /** - * @brief qed_concrete_to_sw_fid - get the sw function id from - * the concrete value. + * qed_concrete_to_sw_fid(): Get the sw function id from + * the concrete value. * - * @param concrete_fid + * @cdev: Qed dev pointer. + * @concrete_fid: Concrete fid. * - * @return inline u8 + * Return: inline u8. */ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, u32 concrete_fid) diff --git a/drivers/net/ethernet/qlogic/qed/qed_cxt.h b/drivers/net/ethernet/qlogic/qed/qed_cxt.h index 8adb7ed0c12d..d31196db7bdd 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_cxt.h +++ b/drivers/net/ethernet/qlogic/qed/qed_cxt.h @@ -28,24 +28,23 @@ struct qed_tid_mem { }; /** - * @brief qedo_cid_get_cxt_info - Returns the context info for a specific cid + * qed_cxt_get_cid_info(): Returns the context info for a specific cidi. * + * @p_hwfn: HW device data. + * @p_info: In/out. * - * @param p_hwfn - * @param p_info in/out - * - * @return int + * Return: Int. */ int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn, struct qed_cxt_info *p_info); /** - * @brief qed_cxt_get_tid_mem_info + * qed_cxt_get_tid_mem_info(): Returns the tid mem info. * - * @param p_hwfn - * @param p_info + * @p_hwfn: HW device data. + * @p_info: in/out. * - * @return int + * Return: int. */ int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn, struct qed_tid_mem *p_info); @@ -64,142 +63,155 @@ u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *vf_cid); /** - * @brief qed_cxt_set_pf_params - Set the PF params for cxt init + * qed_cxt_set_pf_params(): Set the PF params for cxt init. + * + * @p_hwfn: HW device data. + * @rdma_tasks: Requested maximum. * - * @param p_hwfn - * @param rdma_tasks - requested maximum - * @return int + * Return: int. */ int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks); /** - * @brief qed_cxt_cfg_ilt_compute - compute ILT init parameters + * qed_cxt_cfg_ilt_compute(): Compute ILT init parameters. * - * @param p_hwfn - * @param last_line + * @p_hwfn: HW device data. + * @last_line: Last_line. * - * @return int + * Return: Int */ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line); /** - * @brief qed_cxt_cfg_ilt_compute_excess - how many lines can be decreased + * qed_cxt_cfg_ilt_compute_excess(): How many lines can be decreased. + * + * @p_hwfn: HW device data. + * @used_lines: Used lines. * - * @param p_hwfn - * @param used_lines + * Return: Int. */ u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines); /** - * @brief qed_cxt_mngr_alloc - Allocate and init the context manager struct + * qed_cxt_mngr_alloc(): Allocate and init the context manager struct. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_mngr_free + * qed_cxt_mngr_free() - Context manager free. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired map + * qed_cxt_tables_alloc(): Allocate ILT shadow, Searcher T2, acquired map. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_mngr_setup - Reset the acquired CIDs + * qed_cxt_mngr_setup(): Reset the acquired CIDs. * - * @param p_hwfn + * @p_hwfn: HW device data. */ void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_hw_init_common - Initailze ILT and DQ, common phase, per path. - * + * qed_cxt_hw_init_common(): Initailze ILT and DQ, common phase, per path. * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn); /** - * @brief qed_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path. + * qed_cxt_hw_init_pf(): Initailze ILT and DQ, PF phase, per path. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_qm_init_pf - Initailze the QM PF phase, per path + * qed_qm_init_pf(): Initailze the QM PF phase, per path. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @is_pf_loading: Is pf pending. * - * @param p_hwfn - * @param p_ptt - * @param is_pf_loading + * Return: Void. */ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool is_pf_loading); /** - * @brief Reconfigures QM pf on the fly + * qed_qm_reconf(): Reconfigures QM pf on the fly. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int + * Return: Int. */ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); #define QED_CXT_PF_CID (0xff) /** - * @brief qed_cxt_release - Release a cid + * qed_cxt_release_cid(): Release a cid. * - * @param p_hwfn - * @param cid + * @p_hwfn: HW device data. + * @cid: Cid. + * + * Return: Void. */ void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid); /** - * @brief qed_cxt_release - Release a cid belonging to a vf-queue + * _qed_cxt_release_cid(): Release a cid belonging to a vf-queue. + * + * @p_hwfn: HW device data. + * @cid: Cid. + * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF. * - * @param p_hwfn - * @param cid - * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF + * Return: Void. */ void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid); /** - * @brief qed_cxt_acquire - Acquire a new cid of a specific protocol type + * qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type. * - * @param p_hwfn - * @param type - * @param p_cid + * @p_hwfn: HW device data. + * @type: Type. + * @p_cid: Pointer cid. * - * @return int + * Return: Int. */ int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *p_cid); /** - * @brief _qed_cxt_acquire - Acquire a new cid of a specific protocol type - * for a vf-queue + * _qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type + * for a vf-queue. * - * @param p_hwfn - * @param type - * @param p_cid - * @param vfid - engine relative index. QED_CXT_PF_CID if belongs to PF + * @p_hwfn: HW device data. + * @type: Type. + * @p_cid: Pointer cid. + * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF. * - * @return int + * Return: Int. */ int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn, enum protocol_type type, u32 *p_cid, u8 vfid); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index d3c1f3879be8..f0a825b985a4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -15,44 +15,52 @@ #include "qed_int.h" /** - * @brief qed_init_dp - initialize the debug level + * qed_init_dp(): Initialize the debug level. * - * @param cdev - * @param dp_module - * @param dp_level + * @cdev: Qed dev pointer. + * @dp_module: Module debug parameter. + * @dp_level: Module debug level. + * + * Return: Void. */ void qed_init_dp(struct qed_dev *cdev, u32 dp_module, u8 dp_level); /** - * @brief qed_init_struct - initialize the device structure to - * its defaults + * qed_init_struct(): Initialize the device structure to + * its defaults. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_init_struct(struct qed_dev *cdev); /** - * @brief qed_resc_free - + * qed_resc_free: Free device resources. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_resc_free(struct qed_dev *cdev); /** - * @brief qed_resc_alloc - + * qed_resc_alloc(): Alloc device resources. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_resc_alloc(struct qed_dev *cdev); /** - * @brief qed_resc_setup - + * qed_resc_setup(): Setup device resources. * - * @param cdev + * @cdev: Qed dev pointer. + * + * Return: Void. */ void qed_resc_setup(struct qed_dev *cdev); @@ -105,94 +113,97 @@ struct qed_hw_init_params { }; /** - * @brief qed_hw_init - + * qed_hw_init(): Init Qed hardware. * - * @param cdev - * @param p_params + * @cdev: Qed dev pointer. + * @p_params: Pointers to params. * - * @return int + * Return: Int. */ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params); /** - * @brief qed_hw_timers_stop_all - stop the timers HW block + * qed_hw_timers_stop_all(): Stop the timers HW block. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return void + * Return: void. */ void qed_hw_timers_stop_all(struct qed_dev *cdev); /** - * @brief qed_hw_stop - + * qed_hw_stop(): Stop Qed hardware. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: int. */ int qed_hw_stop(struct qed_dev *cdev); /** - * @brief qed_hw_stop_fastpath -should be called incase - * slowpath is still required for the device, - * but fastpath is not. + * qed_hw_stop_fastpath(): Should be called incase + * slowpath is still required for the device, + * but fastpath is not. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_hw_stop_fastpath(struct qed_dev *cdev); /** - * @brief qed_hw_start_fastpath -restart fastpath traffic, - * only if hw_stop_fastpath was called + * qed_hw_start_fastpath(): Restart fastpath traffic, + * only if hw_stop_fastpath was called. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_hw_start_fastpath(struct qed_hwfn *p_hwfn); /** - * @brief qed_hw_prepare - + * qed_hw_prepare(): Prepare Qed hardware. * - * @param cdev - * @param personality - personality to initialize + * @cdev: Qed dev pointer. + * @personality: Personality to initialize. * - * @return int + * Return: Int. */ int qed_hw_prepare(struct qed_dev *cdev, int personality); /** - * @brief qed_hw_remove - + * qed_hw_remove(): Remove Qed hardware. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_hw_remove(struct qed_dev *cdev); /** - * @brief qed_ptt_acquire - Allocate a PTT window + * qed_ptt_acquire(): Allocate a PTT window. * - * Should be called at the entry point to the driver (at the beginning of an - * exported function) + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: struct qed_ptt. * - * @return struct qed_ptt + * Should be called at the entry point to the driver (at the beginning of an + * exported function). */ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_release - Release PTT Window + * qed_ptt_release(): Release PTT Window. * - * Should be called at the end of a flow - at the end of the function that - * acquired the PTT. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * + * Return: Void. * - * @param p_hwfn - * @param p_ptt + * Should be called at the end of a flow - at the end of the function that + * acquired the PTT. */ void qed_ptt_release(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -205,15 +216,17 @@ enum qed_dmae_address_type_t { }; /** - * @brief qed_dmae_host2grc - copy data from source addr to - * dmae registers using the given ptt + * qed_dmae_host2grc(): Copy data from source addr to + * dmae registers using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @source_addr: Source address. + * @grc_addr: GRC address (dmae_data_offset). + * @size_in_dwords: Size. + * @p_params: (default parameters will be used in case of NULL). * - * @param p_hwfn - * @param p_ptt - * @param source_addr - * @param grc_addr (dmae_data_offset) - * @param size_in_dwords - * @param p_params (default parameters will be used in case of NULL) + * Return: Int. */ int qed_dmae_host2grc(struct qed_hwfn *p_hwfn, @@ -224,29 +237,34 @@ qed_dmae_host2grc(struct qed_hwfn *p_hwfn, struct qed_dmae_params *p_params); /** - * @brief qed_dmae_grc2host - Read data from dmae data offset - * to source address using the given ptt + * qed_dmae_grc2host(): Read data from dmae data offset + * to source address using the given ptt. + * + * @p_ptt: P_ptt. + * @grc_addr: GRC address (dmae_data_offset). + * @dest_addr: Destination Address. + * @size_in_dwords: Size. + * @p_params: (default parameters will be used in case of NULL). * - * @param p_ptt - * @param grc_addr (dmae_data_offset) - * @param dest_addr - * @param size_in_dwords - * @param p_params (default parameters will be used in case of NULL) + * Return: Int. */ int qed_dmae_grc2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 grc_addr, dma_addr_t dest_addr, u32 size_in_dwords, struct qed_dmae_params *p_params); /** - * @brief qed_dmae_host2host - copy data from to source address - * to a destination adress (for SRIOV) using the given ptt + * qed_dmae_host2host(): Copy data from to source address + * to a destination adrress (for SRIOV) using the given + * ptt. * - * @param p_hwfn - * @param p_ptt - * @param source_addr - * @param dest_addr - * @param size_in_dwords - * @param p_params (default parameters will be used in case of NULL) + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @source_addr: Source address. + * @dest_addr: Destination address. + * @size_in_dwords: size. + * @p_params: (default parameters will be used in case of NULL). + * + * Return: Int. */ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -259,51 +277,51 @@ int qed_chain_alloc(struct qed_dev *cdev, struct qed_chain *chain, void qed_chain_free(struct qed_dev *cdev, struct qed_chain *chain); /** - * @@brief qed_fw_l2_queue - Get absolute L2 queue ID + * qed_fw_l2_queue(): Get absolute L2 queue ID. * - * @param p_hwfn - * @param src_id - relative to p_hwfn - * @param dst_id - absolute per engine + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. * - * @return int + * Return: Int. */ int qed_fw_l2_queue(struct qed_hwfn *p_hwfn, u16 src_id, u16 *dst_id); /** - * @@brief qed_fw_vport - Get absolute vport ID + * qed_fw_vport(): Get absolute vport ID. * - * @param p_hwfn - * @param src_id - relative to p_hwfn - * @param dst_id - absolute per engine + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. * - * @return int + * Return: Int. */ int qed_fw_vport(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id); /** - * @@brief qed_fw_rss_eng - Get absolute RSS engine ID + * qed_fw_rss_eng(): Get absolute RSS engine ID. * - * @param p_hwfn - * @param src_id - relative to p_hwfn - * @param dst_id - absolute per engine + * @p_hwfn: HW device data. + * @src_id: Relative to p_hwfn. + * @dst_id: Absolute per engine. * - * @return int + * Return: Int. */ int qed_fw_rss_eng(struct qed_hwfn *p_hwfn, u8 src_id, u8 *dst_id); /** - * @brief qed_llh_get_num_ppfid - Return the allocated number of LLH filter - * banks that are allocated to the PF. + * qed_llh_get_num_ppfid(): Return the allocated number of LLH filter + * banks that are allocated to the PF. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return u8 - Number of LLH filter banks + * Return: u8 Number of LLH filter banks. */ u8 qed_llh_get_num_ppfid(struct qed_dev *cdev); @@ -314,45 +332,50 @@ enum qed_eng { }; /** - * @brief qed_llh_set_ppfid_affinity - Set the engine affinity for the given - * LLH filter bank. + * qed_llh_set_ppfid_affinity(): Set the engine affinity for the given + * LLH filter bank. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param eng + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @eng: Engine. * - * @return int + * Return: Int. */ int qed_llh_set_ppfid_affinity(struct qed_dev *cdev, u8 ppfid, enum qed_eng eng); /** - * @brief qed_llh_set_roce_affinity - Set the RoCE engine affinity + * qed_llh_set_roce_affinity(): Set the RoCE engine affinity. * - * @param cdev - * @param eng + * @cdev: Qed dev pointer. + * @eng: Engine. * - * @return int + * Return: Int. */ int qed_llh_set_roce_affinity(struct qed_dev *cdev, enum qed_eng eng); /** - * @brief qed_llh_add_mac_filter - Add a LLH MAC filter into the given filter - * bank. + * qed_llh_add_mac_filter(): Add a LLH MAC filter into the given filter + * bank. + * + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @mac_addr: MAC to add. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param mac_addr - MAC to add + * Return: Int. */ int qed_llh_add_mac_filter(struct qed_dev *cdev, u8 ppfid, u8 mac_addr[ETH_ALEN]); /** - * @brief qed_llh_remove_mac_filter - Remove a LLH MAC filter from the given - * filter bank. + * qed_llh_remove_mac_filter(): Remove a LLH MAC filter from the given + * filter bank. * - * @param p_ptt - * @param p_filter - MAC to remove + * @cdev: Qed dev pointer. + * @ppfid: Ppfid. + * @mac_addr: MAC to remove + * + * Return: Void. */ void qed_llh_remove_mac_filter(struct qed_dev *cdev, u8 ppfid, u8 mac_addr[ETH_ALEN]); @@ -368,15 +391,16 @@ enum qed_llh_prot_filter_type_t { }; /** - * @brief qed_llh_add_protocol_filter - Add a LLH protocol filter into the - * given filter bank. + * qed_llh_add_protocol_filter(): Add a LLH protocol filter into the + * given filter bank. + * + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @type: Type of filters and comparing. + * @source_port_or_eth_type: Source port or ethertype to add. + * @dest_port: Destination port to add. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param type - type of filters and comparing - * @param source_port_or_eth_type - source port or ethertype to add - * @param dest_port - destination port to add - * @param type - type of filters and comparing + * Return: Int. */ int qed_llh_add_protocol_filter(struct qed_dev *cdev, @@ -385,14 +409,14 @@ qed_llh_add_protocol_filter(struct qed_dev *cdev, u16 source_port_or_eth_type, u16 dest_port); /** - * @brief qed_llh_remove_protocol_filter - Remove a LLH protocol filter from - * the given filter bank. + * qed_llh_remove_protocol_filter(): Remove a LLH protocol filter from + * the given filter bank. * - * @param cdev - * @param ppfid - relative within the allocated ppfids ('0' is the default one). - * @param type - type of filters and comparing - * @param source_port_or_eth_type - source port or ethertype to add - * @param dest_port - destination port to add + * @cdev: Qed dev pointer. + * @ppfid: Relative within the allocated ppfids ('0' is the default one). + * @type: Type of filters and comparing. + * @source_port_or_eth_type: Source port or ethertype to add. + * @dest_port: Destination port to add. */ void qed_llh_remove_protocol_filter(struct qed_dev *cdev, @@ -401,31 +425,31 @@ qed_llh_remove_protocol_filter(struct qed_dev *cdev, u16 source_port_or_eth_type, u16 dest_port); /** - * *@brief Cleanup of previous driver remains prior to load + * qed_final_cleanup(): Cleanup of previous driver remains prior to load. * - * @param p_hwfn - * @param p_ptt - * @param id - For PF, engine-relative. For VF, PF-relative. - * @param is_vf - true iff cleanup is made for a VF. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @id: For PF, engine-relative. For VF, PF-relative. + * @is_vf: True iff cleanup is made for a VF. * - * @return int + * Return: Int. */ int qed_final_cleanup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 id, bool is_vf); /** - * @brief qed_get_queue_coalesce - Retrieve coalesce value for a given queue. + * qed_get_queue_coalesce(): Retrieve coalesce value for a given queue. * - * @param p_hwfn - * @param p_coal - store coalesce value read from the hardware. - * @param p_handle + * @p_hwfn: HW device data. + * @coal: Store coalesce value read from the hardware. + * @handle: P_handle. * - * @return int + * Return: Int. **/ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); /** - * @brief qed_set_queue_coalesce - Configure coalesce parameters for Rx and + * qed_set_queue_coalesce(): Configure coalesce parameters for Rx and * Tx queue. The fact that we can configure coalescing to up to 511, but on * varying accuracy [the bigger the value the less accurate] up to a mistake * of 3usec for the highest values. @@ -433,37 +457,38 @@ int qed_get_queue_coalesce(struct qed_hwfn *p_hwfn, u16 *coal, void *handle); * should be in same range [i.e., either 0-0x7f, 0x80-0xff or 0x100-0x1ff] * otherwise configuration would break. * + * @rx_coal: Rx Coalesce value in micro seconds. + * @tx_coal: TX Coalesce value in micro seconds. + * @p_handle: P_handle. * - * @param rx_coal - Rx Coalesce value in micro seconds. - * @param tx_coal - TX Coalesce value in micro seconds. - * @param p_handle - * - * @return int + * Return: Int. **/ int qed_set_queue_coalesce(u16 rx_coal, u16 tx_coal, void *p_handle); /** - * @brief qed_pglueb_set_pfid_enable - Enable or disable PCI BUS MASTER + * qed_pglueb_set_pfid_enable(): Enable or disable PCI BUS MASTER. * - * @param p_hwfn - * @param p_ptt - * @param b_enable - true/false + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @b_enable: True/False. * - * @return int + * Return: Int. */ int qed_pglueb_set_pfid_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_enable); /** - * @brief db_recovery_add - add doorbell information to the doorbell - * recovery mechanism. + * qed_db_recovery_add(): add doorbell information to the doorbell + * recovery mechanism. * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address of where db_data is stored - * @param db_width - doorbell is 32b pr 64b - * @param db_space - doorbell recovery addresses are user or kernel space + * @cdev: Qed dev pointer. + * @db_addr: Doorbell address. + * @db_data: Address of where db_data is stored. + * @db_width: Doorbell is 32b pr 64b. + * @db_space: Doorbell recovery addresses are user or kernel space. + * + * Return: Int. */ int qed_db_recovery_add(struct qed_dev *cdev, void __iomem *db_addr, @@ -472,13 +497,15 @@ int qed_db_recovery_add(struct qed_dev *cdev, enum qed_db_rec_space db_space); /** - * @brief db_recovery_del - remove doorbell information from the doorbell + * qed_db_recovery_del() - remove doorbell information from the doorbell * recovery mechanism. db_data serves as key (db_addr is not unique). * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address where db_data is stored. Serves as key for the + * @cdev: Qed dev pointer. + * @db_addr: doorbell address. + * @db_data: address where db_data is stored. Serves as key for the * entry to delete. + * + * Return: Int. */ int qed_db_recovery_del(struct qed_dev *cdev, void __iomem *db_addr, void *db_data); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h index fb1baa2da2d0..744c82a10875 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h @@ -3012,96 +3012,102 @@ struct iro { /***************************** Public Functions *******************************/ /** - * @brief qed_dbg_set_bin_ptr - Sets a pointer to the binary data with debug - * arrays. + * qed_dbg_set_bin_ptr(): Sets a pointer to the binary data with debug + * arrays. * - * @param p_hwfn - HW device data - * @param bin_ptr - a pointer to the binary data with debug arrays. + * @p_hwfn: HW device data. + * @bin_ptr: A pointer to the binary data with debug arrays. + * + * Return: enum dbg status. */ enum dbg_status qed_dbg_set_bin_ptr(struct qed_hwfn *p_hwfn, const u8 * const bin_ptr); /** - * @brief qed_read_regs - Reads registers into a buffer (using GRC). + * qed_read_regs(): Reads registers into a buffer (using GRC). + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf: Destination buffer. + * @addr: Source GRC address in dwords. + * @len: Number of registers to read. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf - Destination buffer. - * @param addr - Source GRC address in dwords. - * @param len - Number of registers to read. + * Return: Void. */ void qed_read_regs(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf, u32 addr, u32 len); /** - * @brief qed_read_fw_info - Reads FW info from the chip. + * qed_read_fw_info(): Reads FW info from the chip. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @fw_info: (Out) a pointer to write the FW info into. + * + * Return: True if the FW info was read successfully from one of the Storms, + * or false if all Storms are in reset. * * The FW info contains FW-related information, such as the FW version, * FW image (main/L2B/kuku), FW timestamp, etc. * The FW info is read from the internal RAM of the first Storm that is not in * reset. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param fw_info - Out: a pointer to write the FW info into. - * - * @return true if the FW info was read successfully from one of the Storms, - * or false if all Storms are in reset. */ bool qed_read_fw_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct fw_info *fw_info); /** - * @brief qed_dbg_grc_config - Sets the value of a GRC parameter. + * qed_dbg_grc_config(): Sets the value of a GRC parameter. * - * @param p_hwfn - HW device data - * @param grc_param - GRC parameter - * @param val - Value to set. + * @p_hwfn: HW device data. + * @grc_param: GRC parameter. + * @val: Value to set. * - * @return error if one of the following holds: - * - the version wasn't set - * - grc_param is invalid - * - val is outside the allowed boundaries + * Return: Error if one of the following holds: + * - The version wasn't set. + * - Grc_param is invalid. + * - Val is outside the allowed boundaries. */ enum dbg_status qed_dbg_grc_config(struct qed_hwfn *p_hwfn, enum dbg_grc_params grc_param, u32 val); /** - * @brief qed_dbg_grc_set_params_default - Reverts all GRC parameters to their - * default value. + * qed_dbg_grc_set_params_default(): Reverts all GRC parameters to their + * default value. + * + * @p_hwfn: HW device data. * - * @param p_hwfn - HW device data + * Return: Void. */ void qed_dbg_grc_set_params_default(struct qed_hwfn *p_hwfn); /** - * @brief qed_dbg_grc_get_dump_buf_size - Returns the required buffer size for - * GRC Dump. + * qed_dbg_grc_get_dump_buf_size(): Returns the required buffer size for + * GRC Dump. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for the GRC Dump - * data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) required buffer size (in dwords) for the GRC Dump + * data. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. */ enum dbg_status qed_dbg_grc_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); /** - * @brief qed_dbg_grc_dump - Dumps GRC data into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the collected GRC data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified dump buffer is too small - * Otherwise, returns ok. + * qed_dbg_grc_dump(): Dumps GRC data into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the collected GRC data into. + * @buf_size_in_dwords:Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified dump buffer is too small. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3110,36 +3116,36 @@ enum dbg_status qed_dbg_grc_dump(struct qed_hwfn *p_hwfn, u32 *num_dumped_dwords); /** - * @brief qed_dbg_idle_chk_get_dump_buf_size - Returns the required buffer size - * for idle check results. + * qed_dbg_idle_chk_get_dump_buf_size(): Returns the required buffer size + * for idle check results. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for the idle check - * data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) required buffer size (in dwords) for the idle check + * data. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_idle_chk_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); /** - * @brief qed_dbg_idle_chk_dump - Performs idle check and writes the results - * into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the idle check data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * Otherwise, returns ok. + * qed_dbg_idle_chk_dump: Performs idle check and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the idle check data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3148,42 +3154,42 @@ enum dbg_status qed_dbg_idle_chk_dump(struct qed_hwfn *p_hwfn, u32 *num_dumped_dwords); /** - * @brief qed_dbg_mcp_trace_get_dump_buf_size - Returns the required buffer size - * for mcp trace results. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for mcp trace data. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the trace data in MCP scratchpad contain an invalid signature - * - the bundle ID in NVRAM is invalid - * - the trace meta data cannot be found (in NVRAM or image file) - * Otherwise, returns ok. + * qed_dbg_mcp_trace_get_dump_buf_size(): Returns the required buffer size + * for mcp trace results. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for mcp trace data. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The trace data in MCP scratchpad contain an invalid signature. + * - The bundle ID in NVRAM is invalid. + * - The trace meta data cannot be found (in NVRAM or image file). + * Otherwise, returns ok. */ enum dbg_status qed_dbg_mcp_trace_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); /** - * @brief qed_dbg_mcp_trace_dump - Performs mcp trace and writes the results - * into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the mcp trace data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - the trace data in MCP scratchpad contain an invalid signature - * - the bundle ID in NVRAM is invalid - * - the trace meta data cannot be found (in NVRAM or image file) - * - the trace meta data cannot be read (from NVRAM or image file) - * Otherwise, returns ok. + * qed_dbg_mcp_trace_dump(): Performs mcp trace and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the mcp trace data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - The trace data in MCP scratchpad contain an invalid signature. + * - The bundle ID in NVRAM is invalid. + * - The trace meta data cannot be found (in NVRAM or image file). + * - The trace meta data cannot be read (from NVRAM or image file). + * Otherwise, returns ok. */ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3192,36 +3198,36 @@ enum dbg_status qed_dbg_mcp_trace_dump(struct qed_hwfn *p_hwfn, u32 *num_dumped_dwords); /** - * @brief qed_dbg_reg_fifo_get_dump_buf_size - Returns the required buffer size - * for grc trace fifo results. + * qed_dbg_reg_fifo_get_dump_buf_size(): Returns the required buffer size + * for grc trace fifo results. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for reg fifo data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for reg fifo data. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. */ enum dbg_status qed_dbg_reg_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); /** - * @brief qed_dbg_reg_fifo_dump - Reads the reg fifo and writes the results into - * the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the reg fifo data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - DMAE transaction failed - * Otherwise, returns ok. + * qed_dbg_reg_fifo_dump(): Reads the reg fifo and writes the results into + * the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the reg fifo data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - DMAE transaction failed. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3230,37 +3236,37 @@ enum dbg_status qed_dbg_reg_fifo_dump(struct qed_hwfn *p_hwfn, u32 *num_dumped_dwords); /** - * @brief qed_dbg_igu_fifo_get_dump_buf_size - Returns the required buffer size - * for the IGU fifo results. + * qed_dbg_igu_fifo_get_dump_buf_size(): Returns the required buffer size + * for the IGU fifo results. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for the IGU fifo - * data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for the IGU fifo + * data. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_igu_fifo_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); /** - * @brief qed_dbg_igu_fifo_dump - Reads the IGU fifo and writes the results into - * the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the IGU fifo data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - DMAE transaction failed - * Otherwise, returns ok. + * qed_dbg_igu_fifo_dump(): Reads the IGU fifo and writes the results into + * the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the IGU fifo data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set + * - The specified buffer is too small + * - DMAE transaction failed + * Otherwise, returns ok. */ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3269,37 +3275,37 @@ enum dbg_status qed_dbg_igu_fifo_dump(struct qed_hwfn *p_hwfn, u32 *num_dumped_dwords); /** - * @brief qed_dbg_protection_override_get_dump_buf_size - Returns the required - * buffer size for protection override window results. + * qed_dbg_protection_override_get_dump_buf_size(): Returns the required + * buffer size for protection override window results. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for protection - * override data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for protection + * override data. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. */ enum dbg_status qed_dbg_protection_override_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); /** - * @brief qed_dbg_protection_override_dump - Reads protection override window - * entries and writes the results into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the protection override data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * - DMAE transaction failed - * Otherwise, returns ok. + * qed_dbg_protection_override_dump(): Reads protection override window + * entries and writes the results into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the protection override data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * @return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * - DMAE transaction failed. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3307,34 +3313,34 @@ enum dbg_status qed_dbg_protection_override_dump(struct qed_hwfn *p_hwfn, u32 buf_size_in_dwords, u32 *num_dumped_dwords); /** - * @brief qed_dbg_fw_asserts_get_dump_buf_size - Returns the required buffer - * size for FW Asserts results. + * qed_dbg_fw_asserts_get_dump_buf_size(): Returns the required buffer + * size for FW Asserts results. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param buf_size - OUT: required buffer size (in dwords) for FW Asserts data. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @buf_size: (OUT) Required buffer size (in dwords) for FW Asserts data. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_fw_asserts_get_dump_buf_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *buf_size); /** - * @brief qed_dbg_fw_asserts_dump - Reads the FW Asserts and writes the results - * into the specified buffer. - * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param dump_buf - Pointer to write the FW Asserts data into. - * @param buf_size_in_dwords - Size of the specified buffer in dwords. - * @param num_dumped_dwords - OUT: number of dumped dwords. - * - * @return error if one of the following holds: - * - the version wasn't set - * - the specified buffer is too small - * Otherwise, returns ok. + * qed_dbg_fw_asserts_dump(): Reads the FW Asserts and writes the results + * into the specified buffer. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dump_buf: Pointer to write the FW Asserts data into. + * @buf_size_in_dwords: Size of the specified buffer in dwords. + * @num_dumped_dwords: (OUT) number of dumped dwords. + * + * Return: Error if one of the following holds: + * - The version wasn't set. + * - The specified buffer is too small. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3343,19 +3349,19 @@ enum dbg_status qed_dbg_fw_asserts_dump(struct qed_hwfn *p_hwfn, u32 *num_dumped_dwords); /** - * @brief qed_dbg_read_attn - Reads the attention registers of the specified + * qed_dbg_read_attn(): Reads the attention registers of the specified * block and type, and writes the results into the specified buffer. * - * @param p_hwfn - HW device data - * @param p_ptt - Ptt window used for writing the registers. - * @param block - Block ID. - * @param attn_type - Attention type. - * @param clear_status - Indicates if the attention status should be cleared. - * @param results - OUT: Pointer to write the read results into + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @block: Block ID. + * @attn_type: Attention type. + * @clear_status: Indicates if the attention status should be cleared. + * @results: (OUT) Pointer to write the read results into. * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. */ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3365,15 +3371,15 @@ enum dbg_status qed_dbg_read_attn(struct qed_hwfn *p_hwfn, struct dbg_attn_block_result *results); /** - * @brief qed_dbg_print_attn - Prints attention registers values in the - * specified results struct. + * qed_dbg_print_attn(): Prints attention registers values in the + * specified results struct. * - * @param p_hwfn - * @param results - Pointer to the attention read results + * @p_hwfn: HW device data. + * @results: Pointer to the attention read results * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set + * Otherwise, returns ok. */ enum dbg_status qed_dbg_print_attn(struct qed_hwfn *p_hwfn, struct dbg_attn_block_result *results); @@ -3420,60 +3426,64 @@ struct dbg_tools_user_data { /***************************** Public Functions *******************************/ /** - * @brief qed_dbg_user_set_bin_ptr - Sets a pointer to the binary data with - * debug arrays. + * qed_dbg_user_set_bin_ptr(): Sets a pointer to the binary data with + * debug arrays. * - * @param p_hwfn - HW device data - * @param bin_ptr - a pointer to the binary data with debug arrays. + * @p_hwfn: HW device data. + * @bin_ptr: a pointer to the binary data with debug arrays. + * + * Return: dbg_status. */ enum dbg_status qed_dbg_user_set_bin_ptr(struct qed_hwfn *p_hwfn, const u8 * const bin_ptr); /** - * @brief qed_dbg_alloc_user_data - Allocates user debug data. + * qed_dbg_alloc_user_data(): Allocates user debug data. + * + * @p_hwfn: HW device data. + * @user_data_ptr: (OUT) a pointer to the allocated memory. * - * @param p_hwfn - HW device data - * @param user_data_ptr - OUT: a pointer to the allocated memory. + * Return: dbg_status. */ enum dbg_status qed_dbg_alloc_user_data(struct qed_hwfn *p_hwfn, void **user_data_ptr); /** - * @brief qed_dbg_get_status_str - Returns a string for the specified status. + * qed_dbg_get_status_str(): Returns a string for the specified status. * - * @param status - a debug status code. + * @status: A debug status code. * - * @return a string for the specified status + * Return: A string for the specified status. */ const char *qed_dbg_get_status_str(enum dbg_status status); /** - * @brief qed_get_idle_chk_results_buf_size - Returns the required buffer size - * for idle check results (in bytes). + * qed_get_idle_chk_results_buf_size(): Returns the required buffer size + * for idle check results (in bytes). * - * @param p_hwfn - HW device data - * @param dump_buf - idle check dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. + * @p_hwfn: HW device data. + * @dump_buf: idle check dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_get_idle_chk_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, u32 num_dumped_dwords, u32 *results_buf_size); /** - * @brief qed_print_idle_chk_results - Prints idle check results + * qed_print_idle_chk_results(): Prints idle check results * - * @param p_hwfn - HW device data - * @param dump_buf - idle check dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the idle check results. - * @param num_errors - OUT: number of errors found in idle check. - * @param num_warnings - OUT: number of warnings found in idle check. + * @p_hwfn: HW device data. + * @dump_buf: idle check dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf: buffer for printing the idle check results. + * @num_errors: (OUT) number of errors found in idle check. + * @num_warnings: (OUT) number of warnings found in idle check. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3483,28 +3493,30 @@ enum dbg_status qed_print_idle_chk_results(struct qed_hwfn *p_hwfn, u32 *num_warnings); /** - * @brief qed_dbg_mcp_trace_set_meta_data - Sets the MCP Trace meta data. + * qed_dbg_mcp_trace_set_meta_data(): Sets the MCP Trace meta data. + * + * @p_hwfn: HW device data. + * @meta_buf: Meta buffer. + * + * Return: Void. * * Needed in case the MCP Trace dump doesn't contain the meta data (e.g. due to * no NVRAM access). - * - * @param data - pointer to MCP Trace meta data - * @param size - size of MCP Trace meta data in dwords */ void qed_dbg_mcp_trace_set_meta_data(struct qed_hwfn *p_hwfn, const u32 *meta_buf); /** - * @brief qed_get_mcp_trace_results_buf_size - Returns the required buffer size - * for MCP Trace results (in bytes). + * qed_get_mcp_trace_results_buf_size(): Returns the required buffer size + * for MCP Trace results (in bytes). * - * @param p_hwfn - HW device data - * @param dump_buf - MCP Trace dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. + * @p_hwfn: HW device data. + * @dump_buf: MCP Trace dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. * - * @return error if the parsing fails, ok otherwise. + * Return: Rrror if the parsing fails, ok otherwise. */ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3512,14 +3524,14 @@ enum dbg_status qed_get_mcp_trace_results_buf_size(struct qed_hwfn *p_hwfn, u32 *results_buf_size); /** - * @brief qed_print_mcp_trace_results - Prints MCP Trace results + * qed_print_mcp_trace_results(): Prints MCP Trace results * - * @param p_hwfn - HW device data - * @param dump_buf - mcp trace dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the mcp trace results. + * @p_hwfn: HW device data. + * @dump_buf: MCP trace dump buffer, starting from the header. + * @num_dumped_dwords: Member of dwords that were dumped. + * @results_buf: Buffer for printing the mcp trace results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3527,30 +3539,30 @@ enum dbg_status qed_print_mcp_trace_results(struct qed_hwfn *p_hwfn, char *results_buf); /** - * @brief qed_print_mcp_trace_results_cont - Prints MCP Trace results, and + * qed_print_mcp_trace_results_cont(): Prints MCP Trace results, and * keeps the MCP trace meta data allocated, to support continuous MCP Trace * parsing. After the continuous parsing ends, mcp_trace_free_meta_data should * be called to free the meta data. * - * @param p_hwfn - HW device data - * @param dump_buf - mcp trace dump buffer, starting from the header. - * @param results_buf - buffer for printing the mcp trace results. + * @p_hwfn: HW device data. + * @dump_buf: MVP trace dump buffer, starting from the header. + * @results_buf: Buffer for printing the mcp trace results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_mcp_trace_results_cont(struct qed_hwfn *p_hwfn, u32 *dump_buf, char *results_buf); /** - * @brief print_mcp_trace_line - Prints MCP Trace results for a single line + * qed_print_mcp_trace_line(): Prints MCP Trace results for a single line * - * @param p_hwfn - HW device data - * @param dump_buf - mcp trace dump buffer, starting from the header. - * @param num_dumped_bytes - number of bytes that were dumped. - * @param results_buf - buffer for printing the mcp trace results. + * @p_hwfn: HW device data. + * @dump_buf: MCP trace dump buffer, starting from the header. + * @num_dumped_bytes: Number of bytes that were dumped. + * @results_buf: Buffer for printing the mcp trace results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, u8 *dump_buf, @@ -3558,24 +3570,26 @@ enum dbg_status qed_print_mcp_trace_line(struct qed_hwfn *p_hwfn, char *results_buf); /** - * @brief mcp_trace_free_meta_data - Frees the MCP Trace meta data. + * qed_mcp_trace_free_meta_data(): Frees the MCP Trace meta data. * Should be called after continuous MCP Trace parsing. * - * @param p_hwfn - HW device data + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_mcp_trace_free_meta_data(struct qed_hwfn *p_hwfn); /** - * @brief qed_get_reg_fifo_results_buf_size - Returns the required buffer size - * for reg_fifo results (in bytes). + * qed_get_reg_fifo_results_buf_size(): Returns the required buffer size + * for reg_fifo results (in bytes). * - * @param p_hwfn - HW device data - * @param dump_buf - reg fifo dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. + * @p_hwfn: HW device data. + * @dump_buf: Reg fifo dump buffer. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3583,14 +3597,14 @@ enum dbg_status qed_get_reg_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 *results_buf_size); /** - * @brief qed_print_reg_fifo_results - Prints reg fifo results + * qed_print_reg_fifo_results(): Prints reg fifo results. * - * @param p_hwfn - HW device data - * @param dump_buf - reg fifo dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the reg fifo results. + * @p_hwfn: HW device data. + * @dump_buf: Reg fifo dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the reg fifo results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3598,16 +3612,16 @@ enum dbg_status qed_print_reg_fifo_results(struct qed_hwfn *p_hwfn, char *results_buf); /** - * @brief qed_get_igu_fifo_results_buf_size - Returns the required buffer size - * for igu_fifo results (in bytes). + * qed_get_igu_fifo_results_buf_size(): Returns the required buffer size + * for igu_fifo results (in bytes). * - * @param p_hwfn - HW device data - * @param dump_buf - IGU fifo dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. + * @p_hwfn: HW device data. + * @dump_buf: IGU fifo dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3615,14 +3629,14 @@ enum dbg_status qed_get_igu_fifo_results_buf_size(struct qed_hwfn *p_hwfn, u32 *results_buf_size); /** - * @brief qed_print_igu_fifo_results - Prints IGU fifo results + * qed_print_igu_fifo_results(): Prints IGU fifo results * - * @param p_hwfn - HW device data - * @param dump_buf - IGU fifo dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the IGU fifo results. + * @p_hwfn: HW device data. + * @dump_buf: IGU fifo dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the IGU fifo results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3630,16 +3644,16 @@ enum dbg_status qed_print_igu_fifo_results(struct qed_hwfn *p_hwfn, char *results_buf); /** - * @brief qed_get_protection_override_results_buf_size - Returns the required - * buffer size for protection override results (in bytes). + * qed_get_protection_override_results_buf_size(): Returns the required + * buffer size for protection override results (in bytes). * - * @param p_hwfn - HW device data - * @param dump_buf - protection override dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. + * @p_hwfn: HW device data. + * @dump_buf: Protection override dump buffer. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, @@ -3648,15 +3662,15 @@ qed_get_protection_override_results_buf_size(struct qed_hwfn *p_hwfn, u32 *results_buf_size); /** - * @brief qed_print_protection_override_results - Prints protection override - * results. + * qed_print_protection_override_results(): Prints protection override + * results. * - * @param p_hwfn - HW device data - * @param dump_buf - protection override dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the reg fifo results. + * @p_hwfn: HW device data. + * @dump_buf: Protection override dump buffer, starting from the header. + * @num_dumped_dwords: Number of dwords that were dumped. + * @results_buf: Buffer for printing the reg fifo results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3664,16 +3678,16 @@ enum dbg_status qed_print_protection_override_results(struct qed_hwfn *p_hwfn, char *results_buf); /** - * @brief qed_get_fw_asserts_results_buf_size - Returns the required buffer size - * for FW Asserts results (in bytes). + * qed_get_fw_asserts_results_buf_size(): Returns the required buffer size + * for FW Asserts results (in bytes). * - * @param p_hwfn - HW device data - * @param dump_buf - FW Asserts dump buffer. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf_size - OUT: required buffer size (in bytes) for the parsed - * results. + * @p_hwfn: HW device data. + * @dump_buf: FW Asserts dump buffer. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf_size: (OUT) required buffer size (in bytes) for the parsed + * results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3681,14 +3695,14 @@ enum dbg_status qed_get_fw_asserts_results_buf_size(struct qed_hwfn *p_hwfn, u32 *results_buf_size); /** - * @brief qed_print_fw_asserts_results - Prints FW Asserts results + * qed_print_fw_asserts_results(): Prints FW Asserts results. * - * @param p_hwfn - HW device data - * @param dump_buf - FW Asserts dump buffer, starting from the header. - * @param num_dumped_dwords - number of dwords that were dumped. - * @param results_buf - buffer for printing the FW Asserts results. + * @p_hwfn: HW device data. + * @dump_buf: FW Asserts dump buffer, starting from the header. + * @num_dumped_dwords: number of dwords that were dumped. + * @results_buf: buffer for printing the FW Asserts results. * - * @return error if the parsing fails, ok otherwise. + * Return: Error if the parsing fails, ok otherwise. */ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, u32 *dump_buf, @@ -3696,15 +3710,15 @@ enum dbg_status qed_print_fw_asserts_results(struct qed_hwfn *p_hwfn, char *results_buf); /** - * @brief qed_dbg_parse_attn - Parses and prints attention registers values in - * the specified results struct. + * qed_dbg_parse_attn(): Parses and prints attention registers values in + * the specified results struct. * - * @param p_hwfn - HW device data - * @param results - Pointer to the attention read results + * @p_hwfn: HW device data. + * @results: Pointer to the attention read results * - * @return error if one of the following holds: - * - the version wasn't set - * Otherwise, returns ok. + * Return: Error if one of the following holds: + * - The version wasn't set. + * Otherwise, returns ok. */ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, struct dbg_attn_block_result *results); @@ -3746,18 +3760,18 @@ enum dbg_status qed_dbg_parse_attn(struct qed_hwfn *p_hwfn, #define GTT_BAR0_MAP_REG_PSDM_RAM 0x01a000UL /** - * @brief qed_qm_pf_mem_size - prepare QM ILT sizes + * qed_qm_pf_mem_size(): Prepare QM ILT sizes. * - * Returns the required host memory size in 4KB units. - * Must be called before all QM init HSI functions. + * @num_pf_cids: Number of connections used by this PF. + * @num_vf_cids: Number of connections used by VFs of this PF. + * @num_tids: Number of tasks used by this PF. + * @num_pf_pqs: Number of PQs used by this PF. + * @num_vf_pqs: Number of PQs used by VFs of this PF. * - * @param num_pf_cids - number of connections used by this PF - * @param num_vf_cids - number of connections used by VFs of this PF - * @param num_tids - number of tasks used by this PF - * @param num_pf_pqs - number of PQs used by this PF - * @param num_vf_pqs - number of PQs used by VFs of this PF + * Return: The required host memory size in 4KB units. * - * @return The required host memory size in 4KB units. + * Returns the required host memory size in 4KB units. + * Must be called before all QM init HSI functions. */ u32 qed_qm_pf_mem_size(u32 num_pf_cids, u32 num_vf_cids, @@ -3800,74 +3814,74 @@ int qed_qm_pf_rt_init(struct qed_hwfn *p_hwfn, struct qed_qm_pf_rt_init_params *p_params); /** - * @brief qed_init_pf_wfq - Initializes the WFQ weight of the specified PF + * qed_init_pf_wfq(): Initializes the WFQ weight of the specified PF. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param pf_id - PF ID - * @param pf_wfq - WFQ weight. Must be non-zero. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @pf_id: PF ID + * @pf_wfq: WFQ weight. Must be non-zero. * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. */ int qed_init_pf_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, u16 pf_wfq); /** - * @brief qed_init_pf_rl - Initializes the rate limit of the specified PF + * qed_init_pf_rl(): Initializes the rate limit of the specified PF * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param pf_id - PF ID - * @param pf_rl - rate limit in Mb/sec units + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF ID. + * @pf_rl: rate limit in Mb/sec units * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. */ int qed_init_pf_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 pf_id, u32 pf_rl); /** - * @brief qed_init_vport_wfq Initializes the WFQ weight of the specified VPORT + * qed_init_vport_wfq(): Initializes the WFQ weight of the specified VPORT * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param first_tx_pq_id- An array containing the first Tx PQ ID associated - * with the VPORT for each TC. This array is filled by - * qed_qm_pf_rt_init - * @param vport_wfq - WFQ weight. Must be non-zero. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers + * @first_tx_pq_id: An array containing the first Tx PQ ID associated + * with the VPORT for each TC. This array is filled by + * qed_qm_pf_rt_init + * @wfq: WFQ weight. Must be non-zero. * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. */ int qed_init_vport_wfq(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 first_tx_pq_id[NUM_OF_TCS], u16 wfq); /** - * @brief qed_init_global_rl - Initializes the rate limit of the specified - * rate limiter + * qed_init_global_rl(): Initializes the rate limit of the specified + * rate limiter. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers - * @param rl_id - RL ID - * @param rate_limit - rate limit in Mb/sec units + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @rl_id: RL ID. + * @rate_limit: Rate limit in Mb/sec units * - * @return 0 on success, -1 on error. + * Return: 0 on success, -1 on error. */ int qed_init_global_rl(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 rl_id, u32 rate_limit); /** - * @brief qed_send_qm_stop_cmd Sends a stop command to the QM + * qed_send_qm_stop_cmd(): Sends a stop command to the QM. * - * @param p_hwfn - * @param p_ptt - * @param is_release_cmd - true for release, false for stop. - * @param is_tx_pq - true for Tx PQs, false for Other PQs. - * @param start_pq - first PQ ID to stop - * @param num_pqs - Number of PQs to stop, starting from start_pq. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @is_release_cmd: true for release, false for stop. + * @is_tx_pq: true for Tx PQs, false for Other PQs. + * @start_pq: first PQ ID to stop + * @num_pqs: Number of PQs to stop, starting from start_pq. * - * @return bool, true if successful, false if timeout occurred while waiting for - * QM command done. + * Return: Bool, true if successful, false if timeout occurred while waiting + * for QM command done. */ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3875,53 +3889,64 @@ bool qed_send_qm_stop_cmd(struct qed_hwfn *p_hwfn, bool is_tx_pq, u16 start_pq, u16 num_pqs); /** - * @brief qed_set_vxlan_dest_port - initializes vxlan tunnel destination udp port + * qed_set_vxlan_dest_port(): Initializes vxlan tunnel destination udp port. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param dest_port - vxlan destination udp port. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dest_port: vxlan destination udp port. + * + * Return: Void. */ void qed_set_vxlan_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port); /** - * @brief qed_set_vxlan_enable - enable or disable VXLAN tunnel in HW + * qed_set_vxlan_enable(): Enable or disable VXLAN tunnel in HW. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @vxlan_enable: vxlan enable flag. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param vxlan_enable - vxlan enable flag. + * Return: Void. */ void qed_set_vxlan_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool vxlan_enable); /** - * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW + * qed_set_gre_enable(): Enable or disable GRE tunnel in HW. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @eth_gre_enable: Eth GRE enable flag. + * @ip_gre_enable: IP GRE enable flag. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param eth_gre_enable - eth GRE enable enable flag. - * @param ip_gre_enable - IP GRE enable enable flag. + * Return: Void. */ void qed_set_gre_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool eth_gre_enable, bool ip_gre_enable); /** - * @brief qed_set_geneve_dest_port - initializes geneve tunnel destination udp port + * qed_set_geneve_dest_port(): Initializes geneve tunnel destination udp port * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param dest_port - geneve destination udp port. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @dest_port: Geneve destination udp port. + * + * Retur: Void. */ void qed_set_geneve_dest_port(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 dest_port); /** - * @brief qed_set_gre_enable - enable or disable GRE tunnel in HW + * qed_set_geneve_enable(): Enable or disable GRE tunnel in HW. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @eth_geneve_enable: Eth GENEVE enable flag. + * @ip_geneve_enable: IP GENEVE enable flag. * - * @param p_ptt - ptt window used for writing the registers. - * @param eth_geneve_enable - eth GENEVE enable enable flag. - * @param ip_geneve_enable - IP GENEVE enable enable flag. + * Return: Void. */ void qed_set_geneve_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3931,25 +3956,29 @@ void qed_set_vxlan_no_l2_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool enable); /** - * @brief qed_gft_disable - Disable GFT + * qed_gft_disable(): Disable GFT. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF on which to disable GFT. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. - * @param pf_id - pf on which to disable GFT. + * Return: Void. */ void qed_gft_disable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 pf_id); /** - * @brief qed_gft_config - Enable and configure HW for GFT - * - * @param p_hwfn - HW device data - * @param p_ptt - ptt window used for writing the registers. - * @param pf_id - pf on which to enable GFT. - * @param tcp - set profile tcp packets. - * @param udp - set profile udp packet. - * @param ipv4 - set profile ipv4 packet. - * @param ipv6 - set profile ipv6 packet. - * @param profile_type - define packet same fields. Use enum gft_profile_type. + * qed_gft_config(): Enable and configure HW for GFT. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @pf_id: PF on which to enable GFT. + * @tcp: Set profile tcp packets. + * @udp: Set profile udp packet. + * @ipv4: Set profile ipv4 packet. + * @ipv6: Set profile ipv6 packet. + * @profile_type: Define packet same fields. Use enum gft_profile_type. + * + * Return: Void. */ void qed_gft_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -3959,107 +3988,120 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, bool ipv4, bool ipv6, enum gft_profile_type profile_type); /** - * @brief qed_enable_context_validation - Enable and configure context - * validation. + * qed_enable_context_validation(): Enable and configure context + * validation. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. * - * @param p_hwfn - * @param p_ptt - ptt window used for writing the registers. + * Return: Void. */ void qed_enable_context_validation(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_calc_session_ctx_validation - Calcualte validation byte for - * session context. + * qed_calc_session_ctx_validation(): Calcualte validation byte for + * session context. * - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - context size. - * @param ctx_type - context type. - * @param cid - context cid. + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Context size. + * @ctx_type: Context type. + * @cid: Context cid. + * + * Return: Void. */ void qed_calc_session_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 cid); /** - * @brief qed_calc_task_ctx_validation - Calcualte validation byte for task - * context. + * qed_calc_task_ctx_validation(): Calcualte validation byte for task + * context. + * + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Context size. + * @ctx_type: Context type. + * @tid: Context tid. * - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - context size. - * @param ctx_type - context type. - * @param tid - context tid. + * Return: Void. */ void qed_calc_task_ctx_validation(void *p_ctx_mem, u16 ctx_size, u8 ctx_type, u32 tid); /** - * @brief qed_memset_session_ctx - Memset session context to 0 while - * preserving validation bytes. + * qed_memset_session_ctx(): Memset session context to 0 while + * preserving validation bytes. + * + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: Size to initialzie. + * @ctx_type: Context type. * - * @param p_hwfn - - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - size to initialzie. - * @param ctx_type - context type. + * Return: Void. */ void qed_memset_session_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); /** - * @brief qed_memset_task_ctx - Memset task context to 0 while preserving - * validation bytes. + * qed_memset_task_ctx(): Memset task context to 0 while preserving + * validation bytes. * - * @param p_ctx_mem - pointer to context memory. - * @param ctx_size - size to initialzie. - * @param ctx_type - context type. + * @p_ctx_mem: Pointer to context memory. + * @ctx_size: size to initialzie. + * @ctx_type: context type. + * + * Return: Void. */ void qed_memset_task_ctx(void *p_ctx_mem, u32 ctx_size, u8 ctx_type); #define NUM_STORMS 6 /** - * @brief qed_set_rdma_error_level - Sets the RDMA assert level. - * If the severity of the error will be - * above the level, the FW will assert. - * @param p_hwfn - HW device data - * @param p_ptt - ptt window used for writing the registers - * @param assert_level - An array of assert levels for each storm. + * qed_set_rdma_error_level(): Sets the RDMA assert level. + * If the severity of the error will be + * above the level, the FW will assert. + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @assert_level: An array of assert levels for each storm. * + * Return: Void. */ void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 assert_level[NUM_STORMS]); /** - * @brief qed_fw_overlay_mem_alloc - Allocates and fills the FW overlay memory. + * qed_fw_overlay_mem_alloc(): Allocates and fills the FW overlay memory. * - * @param p_hwfn - HW device data - * @param fw_overlay_in_buf - the input FW overlay buffer. - * @param buf_size - the size of the input FW overlay buffer in bytes. - * must be aligned to dwords. - * @param fw_overlay_out_mem - OUT: a pointer to the allocated overlays memory. + * @p_hwfn: HW device data. + * @fw_overlay_in_buf: The input FW overlay buffer. + * @buf_size_in_bytes: The size of the input FW overlay buffer in bytes. + * must be aligned to dwords. * - * @return a pointer to the allocated overlays memory, + * Return: A pointer to the allocated overlays memory, * or NULL in case of failures. */ struct phys_mem_desc * qed_fw_overlay_mem_alloc(struct qed_hwfn *p_hwfn, - const u32 * const fw_overlay_in_buf, + const u32 *const fw_overlay_in_buf, u32 buf_size_in_bytes); /** - * @brief qed_fw_overlay_init_ram - Initializes the FW overlay RAM. + * qed_fw_overlay_init_ram(): Initializes the FW overlay RAM. + * + * @p_hwfn: HW device data. + * @p_ptt: Ptt window used for writing the registers. + * @fw_overlay_mem: the allocated FW overlay memory. * - * @param p_hwfn - HW device data. - * @param p_ptt - ptt window used for writing the registers. - * @param fw_overlay_mem - the allocated FW overlay memory. + * Return: Void. */ void qed_fw_overlay_init_ram(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct phys_mem_desc *fw_overlay_mem); /** - * @brief qed_fw_overlay_mem_free - Frees the FW overlay memory. + * qed_fw_overlay_mem_free(): Frees the FW overlay memory. + * + * @p_hwfn: HW device data. + * @fw_overlay_mem: The allocated FW overlay memory to free. * - * @param p_hwfn - HW device data. - * @param fw_overlay_mem - the allocated FW overlay memory to free. + * Return: Void. */ void qed_fw_overlay_mem_free(struct qed_hwfn *p_hwfn, struct phys_mem_desc *fw_overlay_mem); diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.h b/drivers/net/ethernet/qlogic/qed/qed_hw.h index 2734f49956f7..e535983ce21b 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.h +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.h @@ -53,85 +53,94 @@ enum _dmae_cmd_crc_mask { #define DMAE_MAX_CLIENTS 32 /** - * @brief qed_gtt_init - Initialize GTT windows + * qed_gtt_init(): Initialize GTT windows. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_gtt_init(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_invalidate - Forces all ptt entries to be re-configured + * qed_ptt_invalidate(): Forces all ptt entries to be re-configured + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_ptt_invalidate(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_pool_alloc - Allocate and initialize PTT pool + * qed_ptt_pool_alloc(): Allocate and initialize PTT pool. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return struct _qed_status - success (0), negative - error. + * Return: struct _qed_status - success (0), negative - error. */ int qed_ptt_pool_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_pool_free - + * qed_ptt_pool_free(): Free PTT pool. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_ptt_pool_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_ptt_get_hw_addr - Get PTT's GRC/HW address + * qed_ptt_get_hw_addr(): Get PTT's GRC/HW address. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt * - * @return u32 + * Return: u32. */ u32 qed_ptt_get_hw_addr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_ptt_get_bar_addr - Get PPT's external BAR address + * qed_ptt_get_bar_addr(): Get PPT's external BAR address. * - * @param p_hwfn - * @param p_ptt + * @p_ptt: P_ptt * - * @return u32 + * Return: u32. */ u32 qed_ptt_get_bar_addr(struct qed_ptt *p_ptt); /** - * @brief qed_ptt_set_win - Set PTT Window's GRC BAR address + * qed_ptt_set_win(): Set PTT Window's GRC BAR address * - * @param p_hwfn - * @param new_hw_addr - * @param p_ptt + * @p_hwfn: HW device data. + * @new_hw_addr: New HW address. + * @p_ptt: P_Ptt + * + * Return: Void. */ void qed_ptt_set_win(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 new_hw_addr); /** - * @brief qed_get_reserved_ptt - Get a specific reserved PTT + * qed_get_reserved_ptt(): Get a specific reserved PTT. * - * @param p_hwfn - * @param ptt_idx + * @p_hwfn: HW device data. + * @ptt_idx: Ptt Index. * - * @return struct qed_ptt * + * Return: struct qed_ptt *. */ struct qed_ptt *qed_get_reserved_ptt(struct qed_hwfn *p_hwfn, enum reserved_ptts ptt_idx); /** - * @brief qed_wr - Write value to BAR using the given ptt + * qed_wr(): Write value to BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @val: Val. + * @hw_addr: HW address * - * @param p_hwfn - * @param p_ptt - * @param val - * @param hw_addr + * Return: Void. */ void qed_wr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -139,26 +148,28 @@ void qed_wr(struct qed_hwfn *p_hwfn, u32 val); /** - * @brief qed_rd - Read value from BAR using the given ptt + * qed_rd(): Read value from BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @hw_addr: HW address * - * @param p_hwfn - * @param p_ptt - * @param val - * @param hw_addr + * Return: Void. */ u32 qed_rd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 hw_addr); /** - * @brief qed_memcpy_from - copy n bytes from BAR using the given - * ptt - * - * @param p_hwfn - * @param p_ptt - * @param dest - * @param hw_addr - * @param n + * qed_memcpy_from(): Copy n bytes from BAR using the given ptt. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @dest: Destination. + * @hw_addr: HW address. + * @n: N + * + * Return: Void. */ void qed_memcpy_from(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -167,14 +178,15 @@ void qed_memcpy_from(struct qed_hwfn *p_hwfn, size_t n); /** - * @brief qed_memcpy_to - copy n bytes to BAR using the given - * ptt - * - * @param p_hwfn - * @param p_ptt - * @param hw_addr - * @param src - * @param n + * qed_memcpy_to(): Copy n bytes to BAR using the given ptt + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @hw_addr: HW address. + * @src: Source. + * @n: N + * + * Return: Void. */ void qed_memcpy_to(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -182,83 +194,97 @@ void qed_memcpy_to(struct qed_hwfn *p_hwfn, void *src, size_t n); /** - * @brief qed_fid_pretend - pretend to another function when - * accessing the ptt window. There is no way to unpretend - * a function. The only way to cancel a pretend is to - * pretend back to the original function. - * - * @param p_hwfn - * @param p_ptt - * @param fid - fid field of pxp_pretend structure. Can contain - * either pf / vf, port/path fields are don't care. + * qed_fid_pretend(): pretend to another function when + * accessing the ptt window. There is no way to unpretend + * a function. The only way to cancel a pretend is to + * pretend back to the original function. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @fid: fid field of pxp_pretend structure. Can contain + * either pf / vf, port/path fields are don't care. + * + * Return: Void. */ void qed_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 fid); /** - * @brief qed_port_pretend - pretend to another port when - * accessing the ptt window + * qed_port_pretend(): Pretend to another port when accessing the ptt window * - * @param p_hwfn - * @param p_ptt - * @param port_id - the port to pretend to + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @port_id: The port to pretend to + * + * Return: Void. */ void qed_port_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 port_id); /** - * @brief qed_port_unpretend - cancel any previously set port - * pretend + * qed_port_unpretend(): Cancel any previously set port pretend + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_port_fid_pretend - pretend to another port and another function - * when accessing the ptt window + * qed_port_fid_pretend(): Pretend to another port and another function + * when accessing the ptt window + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @port_id: The port to pretend to + * @fid: fid field of pxp_pretend structure. Can contain either pf / vf. * - * @param p_hwfn - * @param p_ptt - * @param port_id - the port to pretend to - * @param fid - fid field of pxp_pretend structure. Can contain either pf / vf. + * Return: Void. */ void qed_port_fid_pretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 port_id, u16 fid); /** - * @brief qed_vfid_to_concrete - build a concrete FID for a - * given VF ID + * qed_vfid_to_concrete(): Build a concrete FID for a given VF ID * - * @param p_hwfn - * @param p_ptt - * @param vfid + * @p_hwfn: HW device data. + * @vfid: VFID. + * + * Return: Void. */ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid); /** - * @brief qed_dmae_idx_to_go_cmd - map the idx to dmae cmd - * this is declared here since other files will require it. - * @param idx + * qed_dmae_idx_to_go_cmd(): Map the idx to dmae cmd + * this is declared here since other files will require it. + * + * @idx: Index + * + * Return: Void. */ u32 qed_dmae_idx_to_go_cmd(u8 idx); /** - * @brief qed_dmae_info_alloc - Init the dmae_info structure - * which is part of p_hwfn. - * @param p_hwfn + * qed_dmae_info_alloc(): Init the dmae_info structure + * which is part of p_hwfn. + * + * @p_hwfn: HW device data. + * + * Return: Int. */ int qed_dmae_info_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_dmae_info_free - Free the dmae_info structure - * which is part of p_hwfn + * qed_dmae_info_free(): Free the dmae_info structure + * which is part of p_hwfn. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_dmae_info_free(struct qed_hwfn *p_hwfn); @@ -292,14 +318,16 @@ int qed_dmae_sanity(struct qed_hwfn *p_hwfn, #define QED_HW_ERR_MAX_STR_SIZE 256 /** - * @brief qed_hw_err_notify - Notify upper layer driver and management FW - * about a HW error. - * - * @param p_hwfn - * @param p_ptt - * @param err_type - * @param fmt - debug data buffer to send to the MFW - * @param ... - buffer format args + * qed_hw_err_notify(): Notify upper layer driver and management FW + * about a HW error. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @err_type: Err Type. + * @fmt: Debug data buffer to send to the MFW + * @...: buffer format args + * + * Return void. */ void __printf(4, 5) __cold qed_hw_err_notify(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, diff --git a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h index a573c8921982..1dbc460c9eec 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_init_ops.h +++ b/drivers/net/ethernet/qlogic/qed/qed_init_ops.h @@ -12,23 +12,24 @@ #include "qed.h" /** - * @brief qed_init_iro_array - init iro_arr. + * qed_init_iro_array(): init iro_arr. * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_init_iro_array(struct qed_dev *cdev); /** - * @brief qed_init_run - Run the init-sequence. + * qed_init_run(): Run the init-sequence. * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @phase: Phase. + * @phase_id: Phase ID. + * @modes: Mode. * - * @param p_hwfn - * @param p_ptt - * @param phase - * @param phase_id - * @param modes - * @return _qed_status_t + * Return: _qed_status_t */ int qed_init_run(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -37,30 +38,31 @@ int qed_init_run(struct qed_hwfn *p_hwfn, int modes); /** - * @brief qed_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs. + * qed_init_alloc(): Allocate RT array, Store 'values' ptrs. * + * @p_hwfn: HW device data. * - * @param p_hwfn - * - * @return _qed_status_t + * Return: _qed_status_t. */ int qed_init_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_init_hwfn_deallocate + * qed_init_free(): Init HW function deallocate. * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_init_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_init_store_rt_reg - Store a configuration value in the RT array. + * qed_init_store_rt_reg(): Store a configuration value in the RT array. * + * @p_hwfn: HW device data. + * @rt_offset: RT offset. + * @val: Val. * - * @param p_hwfn - * @param rt_offset - * @param val + * Return: Void. */ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, @@ -72,15 +74,6 @@ void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, #define OVERWRITE_RT_REG(hwfn, offset, val) \ qed_init_store_rt_reg(hwfn, offset, val) -/** - * @brief - * - * - * @param p_hwfn - * @param rt_offset - * @param val - * @param size - */ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 *val, @@ -90,11 +83,12 @@ void qed_init_store_rt_agg(struct qed_hwfn *p_hwfn, qed_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val)) /** - * @brief - * Initialize GTT global windows and set admin window - * related params of GTT/PTT to default values. + * qed_gtt_init(): Initialize GTT global windows and set admin window + * related params of GTT/PTT to default values. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return Void. */ void qed_gtt_init(struct qed_hwfn *p_hwfn); #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_int.h b/drivers/net/ethernet/qlogic/qed/qed_int.h index c5550e96bbe1..eb8e0f4242d7 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_int.h +++ b/drivers/net/ethernet/qlogic/qed/qed_int.h @@ -53,51 +53,54 @@ enum qed_coalescing_fsm { }; /** - * @brief qed_int_igu_enable_int - enable device interrupts + * qed_int_igu_enable_int(): Enable device interrupts. * - * @param p_hwfn - * @param p_ptt - * @param int_mode - interrupt mode to use + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @int_mode: Interrupt mode to use. + * + * Return: Void. */ void qed_int_igu_enable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_int_mode int_mode); /** - * @brief qed_int_igu_disable_int - disable device interrupts + * qed_int_igu_disable_int(): Disable device interrupts. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ void qed_int_igu_disable_int(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_int_igu_read_sisr_reg - Reads the single isr multiple dpc - * register from igu. + * qed_int_igu_read_sisr_reg(): Reads the single isr multiple dpc + * register from igu. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return u64 + * Return: u64. */ u64 qed_int_igu_read_sisr_reg(struct qed_hwfn *p_hwfn); #define QED_SP_SB_ID 0xffff /** - * @brief qed_int_sb_init - Initializes the sb_info structure. + * qed_int_sb_init(): Initializes the sb_info structure. * - * once the structure is initialized it can be passed to sb related functions. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_info: points to an uninitialized (but allocated) sb_info structure + * @sb_virt_addr: SB Virtual address. + * @sb_phy_addr: SB Physial address. + * @sb_id: the sb_id to be used (zero based in driver) + * should use QED_SP_SB_ID for SP Status block * - * @param p_hwfn - * @param p_ptt - * @param sb_info points to an uninitialized (but - * allocated) sb_info structure - * @param sb_virt_addr - * @param sb_phy_addr - * @param sb_id the sb_id to be used (zero based in driver) - * should use QED_SP_SB_ID for SP Status block + * Return: int. * - * @return int + * Once the structure is initialized it can be passed to sb related functions. */ int qed_int_sb_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -106,82 +109,91 @@ int qed_int_sb_init(struct qed_hwfn *p_hwfn, dma_addr_t sb_phy_addr, u16 sb_id); /** - * @brief qed_int_sb_setup - Setup the sb. + * qed_int_sb_setup(): Setup the sb. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_info: Initialized sb_info structure. * - * @param p_hwfn - * @param p_ptt - * @param sb_info initialized sb_info structure + * Return: Void. */ void qed_int_sb_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_sb_info *sb_info); /** - * @brief qed_int_sb_release - releases the sb_info structure. + * qed_int_sb_release(): Releases the sb_info structure. * - * once the structure is released, it's memory can be freed + * @p_hwfn: HW device data. + * @sb_info: Points to an allocated sb_info structure. + * @sb_id: The sb_id to be used (zero based in driver) + * should never be equal to QED_SP_SB_ID + * (SP Status block). * - * @param p_hwfn - * @param sb_info points to an allocated sb_info structure - * @param sb_id the sb_id to be used (zero based in driver) - * should never be equal to QED_SP_SB_ID - * (SP Status block) + * Return: int. * - * @return int + * Once the structure is released, it's memory can be freed. */ int qed_int_sb_release(struct qed_hwfn *p_hwfn, struct qed_sb_info *sb_info, u16 sb_id); /** - * @brief qed_int_sp_dpc - To be called when an interrupt is received on the - * default status block. + * qed_int_sp_dpc(): To be called when an interrupt is received on the + * default status block. * - * @param p_hwfn - pointer to hwfn + * @t: Tasklet. + * + * Return: Void. * */ void qed_int_sp_dpc(struct tasklet_struct *t); /** - * @brief qed_int_get_num_sbs - get the number of status - * blocks configured for this funciton in the igu. + * qed_int_get_num_sbs(): Get the number of status blocks configured + * for this funciton in the igu. * - * @param p_hwfn - * @param p_sb_cnt_info + * @p_hwfn: HW device data. + * @p_sb_cnt_info: Pointer to SB count info. * - * @return int - number of status blocks configured + * Return: Void. */ void qed_int_get_num_sbs(struct qed_hwfn *p_hwfn, struct qed_sb_cnt_info *p_sb_cnt_info); /** - * @brief qed_int_disable_post_isr_release - performs the cleanup post ISR + * qed_int_disable_post_isr_release(): Performs the cleanup post ISR * release. The API need to be called after releasing all slowpath IRQs * of the device. * - * @param cdev + * @cdev: Qed dev pointer. * + * Return: Void. */ void qed_int_disable_post_isr_release(struct qed_dev *cdev); /** - * @brief qed_int_attn_clr_enable - sets whether the general behavior is + * qed_int_attn_clr_enable: Sets whether the general behavior is * preventing attentions from being reasserted, or following the * attributes of the specific attention. * - * @param cdev - * @param clr_enable + * @cdev: Qed dev pointer. + * @clr_enable: Clear enable + * + * Return: Void. * */ void qed_int_attn_clr_enable(struct qed_dev *cdev, bool clr_enable); /** - * @brief - Doorbell Recovery handler. + * qed_db_rec_handler(): Doorbell Recovery handler. * Run doorbell recovery in case of PF overflow (and flush DORQ if * needed). * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_db_rec_handler(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -223,30 +235,34 @@ struct qed_igu_info { }; /** - * @brief - Make sure the IGU CAM reflects the resources provided by MFW + * qed_int_igu_reset_cam(): Make sure the IGU CAM reflects the resources + * provided by MFW. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ int qed_int_igu_reset_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Translate the weakly-defined client sb-id into an IGU sb-id + * qed_get_igu_sb_id(): Translate the weakly-defined client sb-id into + * an IGU sb-id * - * @param p_hwfn - * @param sb_id - user provided sb_id + * @p_hwfn: HW device data. + * @sb_id: user provided sb_id. * - * @return an index inside IGU CAM where the SB resides + * Return: An index inside IGU CAM where the SB resides. */ u16 qed_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); /** - * @brief return a pointer to an unused valid SB + * qed_get_igu_free_sb(): Return a pointer to an unused valid SB * - * @param p_hwfn - * @param b_is_pf - true iff we want a SB belonging to a PF + * @p_hwfn: HW device data. + * @b_is_pf: True iff we want a SB belonging to a PF. * - * @return point to an igu_block, NULL if none is available + * Return: Point to an igu_block, NULL if none is available. */ struct qed_igu_block *qed_get_igu_free_sb(struct qed_hwfn *p_hwfn, bool b_is_pf); @@ -259,15 +275,15 @@ void qed_int_igu_init_pure_rt(struct qed_hwfn *p_hwfn, void qed_int_igu_init_rt(struct qed_hwfn *p_hwfn); /** - * @brief qed_int_igu_read_cam - Reads the IGU CAM. + * qed_int_igu_read_cam(): Reads the IGU CAM. * This function needs to be called during hardware * prepare. It reads the info from igu cam to know which * status block is the default / base status block etc. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int + * Return: Int. */ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -275,24 +291,22 @@ int qed_int_igu_read_cam(struct qed_hwfn *p_hwfn, typedef int (*qed_int_comp_cb_t)(struct qed_hwfn *p_hwfn, void *cookie); /** - * @brief qed_int_register_cb - Register callback func for - * slowhwfn statusblock. - * - * Every protocol that uses the slowhwfn status block - * should register a callback function that will be called - * once there is an update of the sp status block. - * - * @param p_hwfn - * @param comp_cb - function to be called when there is an - * interrupt on the sp sb - * - * @param cookie - passed to the callback function - * @param sb_idx - OUT parameter which gives the chosen index - * for this protocol. - * @param p_fw_cons - pointer to the actual address of the - * consumer for this protocol. - * - * @return int + * qed_int_register_cb(): Register callback func for slowhwfn statusblock. + * + * @p_hwfn: HW device data. + * @comp_cb: Function to be called when there is an + * interrupt on the sp sb + * @cookie: Passed to the callback function + * @sb_idx: (OUT) parameter which gives the chosen index + * for this protocol. + * @p_fw_cons: Pointer to the actual address of the + * consumer for this protocol. + * + * Return: Int. + * + * Every protocol that uses the slowhwfn status block + * should register a callback function that will be called + * once there is an update of the sp status block. */ int qed_int_register_cb(struct qed_hwfn *p_hwfn, qed_int_comp_cb_t comp_cb, @@ -301,37 +315,40 @@ int qed_int_register_cb(struct qed_hwfn *p_hwfn, __le16 **p_fw_cons); /** - * @brief qed_int_unregister_cb - Unregisters callback - * function from sp sb. - * Partner of qed_int_register_cb -> should be called - * when no longer required. + * qed_int_unregister_cb(): Unregisters callback function from sp sb. + * + * @p_hwfn: HW device data. + * @pi: Producer Index. * - * @param p_hwfn - * @param pi + * Return: Int. * - * @return int + * Partner of qed_int_register_cb -> should be called + * when no longer required. */ int qed_int_unregister_cb(struct qed_hwfn *p_hwfn, u8 pi); /** - * @brief qed_int_get_sp_sb_id - Get the slowhwfn sb id. + * qed_int_get_sp_sb_id(): Get the slowhwfn sb id. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return u16 + * Return: u16. */ u16 qed_int_get_sp_sb_id(struct qed_hwfn *p_hwfn); /** - * @brief Status block cleanup. Should be called for each status - * block that will be used -> both PF / VF - * - * @param p_hwfn - * @param p_ptt - * @param igu_sb_id - igu status block id - * @param opaque - opaque fid of the sb owner. - * @param b_set - set(1) / clear(0) + * qed_int_igu_init_pure_rt_single(): Status block cleanup. + * Should be called for each status + * block that will be used -> both PF / VF. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @igu_sb_id: IGU status block id. + * @opaque: Opaque fid of the sb owner. + * @b_set: Set(1) / Clear(0). + * + * Return: Void. */ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -340,15 +357,16 @@ void qed_int_igu_init_pure_rt_single(struct qed_hwfn *p_hwfn, bool b_set); /** - * @brief qed_int_cau_conf - configure cau for a given status - * block - * - * @param p_hwfn - * @param ptt - * @param sb_phys - * @param igu_sb_id - * @param vf_number - * @param vf_valid + * qed_int_cau_conf_sb(): Configure cau for a given status block. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @sb_phys: SB Physical. + * @igu_sb_id: IGU status block id. + * @vf_number: VF number + * @vf_valid: VF valid or not. + * + * Return: Void. */ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -358,52 +376,58 @@ void qed_int_cau_conf_sb(struct qed_hwfn *p_hwfn, u8 vf_valid); /** - * @brief qed_int_alloc + * qed_int_alloc(): QED interrupt alloc. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int + * Return: Int. */ int qed_int_alloc(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief qed_int_free + * qed_int_free(): QED interrupt free. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_int_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_int_setup + * qed_int_setup(): QED interrupt setup. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_int_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Enable Interrupt & Attention for hw function + * qed_int_igu_enable(): Enable Interrupt & Attention for hw function. * - * @param p_hwfn - * @param p_ptt - * @param int_mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @int_mode: Interrut mode * - * @return int + * Return: Int. */ int qed_int_igu_enable(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_int_mode int_mode); /** - * @brief - Initialize CAU status block entry + * qed_init_cau_sb_entry(): Initialize CAU status block entry. + * + * @p_hwfn: HW device data. + * @p_sb_entry: Pointer SB entry. + * @pf_id: PF number + * @vf_number: VF number + * @vf_valid: VF valid or not. * - * @param p_hwfn - * @param p_sb_entry - * @param pf_id - * @param vf_number - * @param vf_valid + * Return: Void. */ void qed_init_cau_sb_entry(struct qed_hwfn *p_hwfn, struct cau_sb_entry *p_sb_entry, diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index dab7a5d09f87..dec2b00259d4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -34,10 +34,13 @@ void qed_iscsi_setup(struct qed_hwfn *p_hwfn); void qed_iscsi_free(struct qed_hwfn *p_hwfn); /** - * @brief - Fills provided statistics struct with statistics. + * qed_get_protocol_stats_iscsi(): Fills provided statistics + * struct with statistics. * - * @param cdev - * @param stats - points to struct that will be filled with statistics. + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * + * Return: Void. */ void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, struct qed_mcp_iscsi_stats *stats); diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 8eceeebb1a7b..2ab7f3f0cf6c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -92,18 +92,18 @@ struct qed_filter_mcast { }; /** - * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue + * qed_eth_rx_queue_stop(): This ramrod closes an Rx queue. * - * @param p_hwfn - * @param p_rxq Handler of queue to close - * @param eq_completion_only If True completion will be on - * EQe, if False completion will be - * on EQe if p_hwfn opaque - * different from the RXQ opaque - * otherwise on CQe. - * @param cqe_completion If True completion will be - * receive on CQe. - * @return int + * @p_hwfn: HW device data. + * @p_rxq: Handler of queue to close + * @eq_completion_only: If True completion will be on + * EQe, if False completion will be + * on EQe if p_hwfn opaque + * different from the RXQ opaque + * otherwise on CQe. + * @cqe_completion: If True completion will be receive on CQe. + * + * Return: Int. */ int qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, @@ -111,12 +111,12 @@ qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, bool eq_completion_only, bool cqe_completion); /** - * @brief qed_eth_tx_queue_stop - closes a Tx queue + * qed_eth_tx_queue_stop(): Closes a Tx queue. * - * @param p_hwfn - * @param p_txq - handle to Tx queue needed to be closed + * @p_hwfn: HW device data. + * @p_txq: handle to Tx queue needed to be closed. * - * @return int + * Return: Int. */ int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq); @@ -205,16 +205,15 @@ int qed_sp_vport_update(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_comp_data); /** - * @brief qed_sp_vport_stop - - * - * This ramrod closes a VPort after all its RX and TX queues are terminated. - * An Assert is generated if any queues are left open. + * qed_sp_vport_stop: This ramrod closes a VPort after all its + * RX and TX queues are terminated. + * An Assert is generated if any queues are left open. * - * @param p_hwfn - * @param opaque_fid - * @param vport_id VPort ID + * @p_hwfn: HW device data. + * @opaque_fid: Opaque FID + * @vport_id: VPort ID. * - * @return int + * Return: Int. */ int qed_sp_vport_stop(struct qed_hwfn *p_hwfn, u16 opaque_fid, u8 vport_id); @@ -225,22 +224,21 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, struct qed_spq_comp_cb *p_comp_data); /** - * @brief qed_sp_rx_eth_queues_update - - * - * This ramrod updates an RX queue. It is used for setting the active state - * of the queue and updating the TPA and SGE parameters. + * qed_sp_eth_rx_queues_update(): This ramrod updates an RX queue. + * It is used for setting the active state + * of the queue and updating the TPA and + * SGE parameters. + * @p_hwfn: HW device data. + * @pp_rxq_handlers: An array of queue handlers to be updated. + * @num_rxqs: number of queues to update. + * @complete_cqe_flg: Post completion to the CQE Ring if set. + * @complete_event_flg: Post completion to the Event Ring if set. + * @comp_mode: Comp mode. + * @p_comp_data: Pointer Comp data. * - * @note At the moment - only used by non-linux VFs. + * Return: Int. * - * @param p_hwfn - * @param pp_rxq_handlers An array of queue handlers to be updated. - * @param num_rxqs number of queues to update. - * @param complete_cqe_flg Post completion to the CQE Ring if set - * @param complete_event_flg Post completion to the Event Ring if set - * @param comp_mode - * @param p_comp_data - * - * @return int + * Note At the moment - only used by non-linux VFs. */ int @@ -257,30 +255,32 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); void qed_reset_vport_stats(struct qed_dev *cdev); /** - * *@brief qed_arfs_mode_configure - - * - **Enable or disable rfs mode. It must accept atleast one of tcp or udp true - **and atleast one of ipv4 or ipv6 true to enable rfs mode. + * qed_arfs_mode_configure(): Enable or disable rfs mode. + * It must accept at least one of tcp or udp true + * and at least one of ipv4 or ipv6 true to enable + * rfs mode. * - **@param p_hwfn - **@param p_ptt - **@param p_cfg_params - arfs mode configuration parameters. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_cfg_params: arfs mode configuration parameters. * + * Return. Void. */ void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_arfs_config_params *p_cfg_params); /** - * @brief - qed_configure_rfs_ntuple_filter + * qed_configure_rfs_ntuple_filter(): This ramrod should be used to add + * or remove arfs hw filter * - * This ramrod should be used to add or remove arfs hw filter + * @p_hwfn: HW device data. + * @p_cb: Used for QED_SPQ_MODE_CB,where client would initialize + * it with cookie and callback function address, if not + * using this mode then client must pass NULL. + * @p_params: Pointer to params. * - * @params p_hwfn - * @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize - * it with cookie and callback function address, if not - * using this mode then client must pass NULL. - * @params p_params + * Return: Void. */ int qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, @@ -374,16 +374,17 @@ qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, struct qed_sp_vport_start_params *p_params); /** - * @brief - Starts an Rx queue, when queue_cid is already prepared + * qed_eth_rxq_start_ramrod(): Starts an Rx queue, when queue_cid is + * already prepared * - * @param p_hwfn - * @param p_cid - * @param bd_max_bytes - * @param bd_chain_phys_addr - * @param cqe_pbl_addr - * @param cqe_pbl_size + * @p_hwfn: HW device data. + * @p_cid: Pointer CID. + * @bd_max_bytes: Max bytes. + * @bd_chain_phys_addr: Chain physcial address. + * @cqe_pbl_addr: PBL address. + * @cqe_pbl_size: PBL size. * - * @return int + * Return: Int. */ int qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, @@ -393,15 +394,16 @@ qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); /** - * @brief - Starts a Tx queue, where queue_cid is already prepared + * qed_eth_txq_start_ramrod(): Starts a Tx queue, where queue_cid is + * already prepared * - * @param p_hwfn - * @param p_cid - * @param pbl_addr - * @param pbl_size - * @param p_pq_params - parameters for choosing the PQ for this Tx queue + * @p_hwfn: HW device data. + * @p_cid: Pointer CID. + * @pbl_addr: PBL address. + * @pbl_size: PBL size. + * @pq_id: Parameters for choosing the PQ for this Tx queue. * - * @return int + * Return: Int. */ int qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h index df88d00053a2..f80f7739ff8d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h @@ -119,41 +119,41 @@ struct qed_ll2_info { extern const struct qed_ll2_ops qed_ll2_ops_pass; /** - * @brief qed_ll2_acquire_connection - allocate resources, - * starts rx & tx (if relevant) queues pair. Provides - * connecion handler as output parameter. + * qed_ll2_acquire_connection(): Allocate resources, + * starts rx & tx (if relevant) queues pair. + * Provides connecion handler as output + * parameter. * + * @cxt: Pointer to the hw-function [opaque to some]. + * @data: Describes connection parameters. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param data - describes connection parameters - * @return int + * Return: Int. */ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data); /** - * @brief qed_ll2_establish_connection - start previously - * allocated LL2 queues pair + * qed_ll2_establish_connection(): start previously allocated LL2 queues pair * - * @param cxt - pointer to the hw-function [opaque to some] - * @param p_ptt - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_establish_connection(void *cxt, u8 connection_handle); /** - * @brief qed_ll2_post_rx_buffers - submit buffers to LL2 Rx queue. + * qed_ll2_post_rx_buffer(): Submit buffers to LL2 Rx queue. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection - * @param addr rx (physical address) buffers to submit - * @param cookie - * @param notify_fw produce corresponding Rx BD immediately + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @addr: RX (physical address) buffers to submit. + * @buf_len: Buffer Len. + * @cookie: Cookie. + * @notify_fw: Produce corresponding Rx BD immediately. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_post_rx_buffer(void *cxt, u8 connection_handle, @@ -161,15 +161,15 @@ int qed_ll2_post_rx_buffer(void *cxt, u16 buf_len, void *cookie, u8 notify_fw); /** - * @brief qed_ll2_prepare_tx_packet - request for start Tx BD - * to prepare Tx packet submission to FW. + * qed_ll2_prepare_tx_packet(): Request for start Tx BD + * to prepare Tx packet submission to FW. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle - * @param pkt - info regarding the tx packet - * @param notify_fw - issue doorbell to fw for this packet + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: Connection handle. + * @pkt: Info regarding the tx packet. + * @notify_fw: Issue doorbell to fw for this packet. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_prepare_tx_packet(void *cxt, u8 connection_handle, @@ -177,81 +177,83 @@ int qed_ll2_prepare_tx_packet(void *cxt, bool notify_fw); /** - * @brief qed_ll2_release_connection - releases resources - * allocated for LL2 connection + * qed_ll2_release_connection(): Releases resources allocated for LL2 + * connection. * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * + * Return: Void. */ void qed_ll2_release_connection(void *cxt, u8 connection_handle); /** - * @brief qed_ll2_set_fragment_of_tx_packet - provides fragments to fill - * Tx BD of BDs requested by - * qed_ll2_prepare_tx_packet + * qed_ll2_set_fragment_of_tx_packet(): Provides fragments to fill + * Tx BD of BDs requested by + * qed_ll2_prepare_tx_packet * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle - * obtained from - * qed_ll2_require_connection - * @param addr - * @param nbytes + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @addr: Address. + * @nbytes: Number of bytes. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_set_fragment_of_tx_packet(void *cxt, u8 connection_handle, dma_addr_t addr, u16 nbytes); /** - * @brief qed_ll2_terminate_connection - stops Tx/Rx queues - * + * qed_ll2_terminate_connection(): Stops Tx/Rx queues * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle - * obtained from - * qed_ll2_require_connection + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle); /** - * @brief qed_ll2_get_stats - get LL2 queue's statistics - * + * qed_ll2_get_stats(): Get LL2 queue's statistics * - * @param cxt - pointer to the hw-function [opaque to some] - * @param connection_handle LL2 connection's handle obtained from - * qed_ll2_require_connection - * @param p_stats + * @cxt: Pointer to the hw-function [opaque to some]. + * @connection_handle: LL2 connection's handle obtained from + * qed_ll2_require_connection. + * @p_stats: Pointer Status. * - * @return 0 on success, failure otherwise + * Return: 0 on success, failure otherwise. */ int qed_ll2_get_stats(void *cxt, u8 connection_handle, struct qed_ll2_stats *p_stats); /** - * @brief qed_ll2_alloc - Allocates LL2 connections set + * qed_ll2_alloc(): Allocates LL2 connections set. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_ll2_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_ll2_setup - Inits LL2 connections set + * qed_ll2_setup(): Inits LL2 connections set. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. * */ void qed_ll2_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_ll2_free - Releases LL2 connections set + * qed_ll2_free(): Releases LL2 connections set + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. * */ void qed_ll2_free(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h index 8edb450d0abf..352b757183e8 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h @@ -266,97 +266,97 @@ union qed_mfw_tlv_data { #define QED_NVM_CFG_OPTION_ENTITY_SEL BIT(4) /** - * @brief - returns the link params of the hw function + * qed_mcp_get_link_params(): Returns the link params of the hw function. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @returns pointer to link params + * Returns: Pointer to link params. */ -struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *); +struct qed_mcp_link_params *qed_mcp_get_link_params(struct qed_hwfn *p_hwfn); /** - * @brief - return the link state of the hw function + * qed_mcp_get_link_state(): Return the link state of the hw function. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @returns pointer to link state + * Returns: Pointer to link state. */ -struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *); +struct qed_mcp_link_state *qed_mcp_get_link_state(struct qed_hwfn *p_hwfn); /** - * @brief - return the link capabilities of the hw function + * qed_mcp_get_link_capabilities(): Return the link capabilities of the + * hw function. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @returns pointer to link capabilities + * Returns: Pointer to link capabilities. */ struct qed_mcp_link_capabilities *qed_mcp_get_link_capabilities(struct qed_hwfn *p_hwfn); /** - * @brief Request the MFW to set the the link according to 'link_input'. + * qed_mcp_set_link(): Request the MFW to set the link according + * to 'link_input'. * - * @param p_hwfn - * @param p_ptt - * @param b_up - raise link if `true'. Reset link if `false'. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @b_up: Raise link if `true'. Reset link if `false'. * - * @return int + * Return: Int. */ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up); /** - * @brief Get the management firmware version value + * qed_mcp_get_mfw_ver(): Get the management firmware version value. * - * @param p_hwfn - * @param p_ptt - * @param p_mfw_ver - mfw version value - * @param p_running_bundle_id - image id in nvram; Optional. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mfw_ver: MFW version value. + * @p_running_bundle_id: Image id in nvram; Optional. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - operation was successful. */ int qed_mcp_get_mfw_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mfw_ver, u32 *p_running_bundle_id); /** - * @brief Get the MBI version value + * qed_mcp_get_mbi_ver(): Get the MBI version value. * - * @param p_hwfn - * @param p_ptt - * @param p_mbi_ver - A pointer to a variable to be filled with the MBI version. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mbi_ver: A pointer to a variable to be filled with the MBI version. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - operation was successful. */ int qed_mcp_get_mbi_ver(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_mbi_ver); /** - * @brief Get media type value of the port. + * qed_mcp_get_media_type(): Get media type value of the port. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param mfw_ver - media type value + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @media_type: Media type value * - * @return int - - * 0 - Operation was successul. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_get_media_type(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *media_type); /** - * @brief Get transceiver data of the port. + * qed_mcp_get_transceiver_data(): Get transceiver data of the port. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param p_transceiver_state - transceiver state. - * @param p_transceiver_type - media type value + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_transceiver_state: Transceiver state. + * @p_tranceiver_type: Media type value. * - * @return int - - * 0 - Operation was successful. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -364,50 +364,48 @@ int qed_mcp_get_transceiver_data(struct qed_hwfn *p_hwfn, u32 *p_tranceiver_type); /** - * @brief Get transceiver supported speed mask. + * qed_mcp_trans_speed_mask(): Get transceiver supported speed mask. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param p_speed_mask - Bit mask of all supported speeds. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_speed_mask: Bit mask of all supported speeds. * - * @return int - - * 0 - Operation was successful. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_trans_speed_mask(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_speed_mask); /** - * @brief Get board configuration. + * qed_mcp_get_board_config(): Get board configuration. * - * @param cdev - qed dev pointer - * @param p_ptt - * @param p_board_config - Board config. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_board_config: Board config. * - * @return int - - * 0 - Operation was successful. - * -EBUSY - Operation failed + * Return: Int - 0 - Operation was successul. + * -EBUSY - Operation failed */ int qed_mcp_get_board_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_board_config); /** - * @brief General function for sending commands to the MCP - * mailbox. It acquire mutex lock for the entire - * operation, from sending the request until the MCP - * response. Waiting for MCP response will be checked up - * to 5 seconds every 5ms. + * qed_mcp_cmd(): General function for sending commands to the MCP + * mailbox. It acquire mutex lock for the entire + * operation, from sending the request until the MCP + * response. Waiting for MCP response will be checked up + * to 5 seconds every 5ms. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param cmd - command to be sent to the MCP. - * @param param - Optional param - * @param o_mcp_resp - The MCP response code (exclude sequence). - * @param o_mcp_param- Optional parameter provided by the MCP + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @cmd: command to be sent to the MCP. + * @param: Optional param + * @o_mcp_resp: The MCP response code (exclude sequence). + * @o_mcp_param: Optional parameter provided by the MCP * response - * @return int - 0 - operation - * was successul. + * + * Return: Int - 0 - Operation was successul. */ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -417,37 +415,39 @@ int qed_mcp_cmd(struct qed_hwfn *p_hwfn, u32 *o_mcp_param); /** - * @brief - drains the nig, allowing completion to pass in case of pauses. - * (Should be called only from sleepable context) + * qed_mcp_drain(): drains the nig, allowing completion to pass in + * case of pauses. + * (Should be called only from sleepable context) * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * + * Return: Int. */ int qed_mcp_drain(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Get the flash size value + * qed_mcp_get_flash_size(): Get the flash size value. * - * @param p_hwfn - * @param p_ptt - * @param p_flash_size - flash size in bytes to be filled. + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_flash_size: Flash size in bytes to be filled. * - * @return int - 0 - operation was successul. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_get_flash_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *p_flash_size); /** - * @brief Send driver version to MFW + * qed_mcp_send_drv_version(): Send driver version to MFW. * - * @param p_hwfn - * @param p_ptt - * @param version - Version value - * @param name - Protocol driver name + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_ver: Version value. * - * @return int - 0 - operation was successul. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, @@ -455,146 +455,148 @@ qed_mcp_send_drv_version(struct qed_hwfn *p_hwfn, struct qed_mcp_drv_version *p_ver); /** - * @brief Read the MFW process kill counter + * qed_get_process_kill_counter(): Read the MFW process kill counter. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return u32 + * Return: u32. */ u32 qed_get_process_kill_counter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Trigger a recovery process + * qed_start_recovery_process(): Trigger a recovery process. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int + * Return: Int. */ int qed_start_recovery_process(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief A recovery handler must call this function as its first step. - * It is assumed that the handler is not run from an interrupt context. + * qed_recovery_prolog(): A recovery handler must call this function + * as its first step. + * It is assumed that the handler is not run from + * an interrupt context. * - * @param cdev - * @param p_ptt + * @cdev: Qed dev pointer. * - * @return int + * Return: int. */ int qed_recovery_prolog(struct qed_dev *cdev); /** - * @brief Notify MFW about the change in base device properties + * qed_mcp_ov_update_current_config(): Notify MFW about the change in base + * device properties * - * @param p_hwfn - * @param p_ptt - * @param client - qed client type + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @client: Qed client type. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_current_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_client client); /** - * @brief Notify MFW about the driver state + * qed_mcp_ov_update_driver_state(): Notify MFW about the driver state. * - * @param p_hwfn - * @param p_ptt - * @param drv_state - Driver state + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @drv_state: Driver state. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_driver_state(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_driver_state drv_state); /** - * @brief Send MTU size to MFW + * qed_mcp_ov_update_mtu(): Send MTU size to MFW. * - * @param p_hwfn - * @param p_ptt - * @param mtu - MTU size + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mtu: MTU size. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_mtu(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 mtu); /** - * @brief Send MAC address to MFW + * qed_mcp_ov_update_mac(): Send MAC address to MFW. * - * @param p_hwfn - * @param p_ptt - * @param mac - MAC address + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mac: MAC address. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_mac(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 *mac); /** - * @brief Send WOL mode to MFW + * qed_mcp_ov_update_wol(): Send WOL mode to MFW. * - * @param p_hwfn - * @param p_ptt - * @param wol - WOL mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @wol: WOL mode. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_wol(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_ov_wol wol); /** - * @brief Set LED status + * qed_mcp_set_led(): Set LED status. * - * @param p_hwfn - * @param p_ptt - * @param mode - LED mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @mode: LED mode. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_set_led(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, enum qed_led_mode mode); /** - * @brief Read from nvm + * qed_mcp_nvm_read(): Read from NVM. * - * @param cdev - * @param addr - nvm offset - * @param p_buf - nvm read buffer - * @param len - buffer len + * @cdev: Qed dev pointer. + * @addr: NVM offset. + * @p_buf: NVM read buffer. + * @len: Buffer len. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len); /** - * @brief Write to nvm + * qed_mcp_nvm_write(): Write to NVM. * - * @param cdev - * @param addr - nvm offset - * @param cmd - nvm command - * @param p_buf - nvm write buffer - * @param len - buffer len + * @cdev: Qed dev pointer. + * @addr: NVM offset. + * @cmd: NVM command. + * @p_buf: NVM write buffer. + * @len: Buffer len. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_nvm_write(struct qed_dev *cdev, u32 cmd, u32 addr, u8 *p_buf, u32 len); /** - * @brief Check latest response + * qed_mcp_nvm_resp(): Check latest response. * - * @param cdev - * @param p_buf - nvm write buffer + * @cdev: Qed dev pointer. + * @p_buf: NVM write buffer. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_nvm_resp(struct qed_dev *cdev, u8 *p_buf); @@ -604,13 +606,13 @@ struct qed_nvm_image_att { }; /** - * @brief Allows reading a whole nvram image + * qed_mcp_get_nvm_image_att(): Allows reading a whole nvram image. * - * @param p_hwfn - * @param image_id - image to get attributes for - * @param p_image_att - image attributes structure into which to fill data + * @p_hwfn: HW device data. + * @image_id: Image to get attributes for. + * @p_image_att: Image attributes structure into which to fill data. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, @@ -618,64 +620,65 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn, struct qed_nvm_image_att *p_image_att); /** - * @brief Allows reading a whole nvram image + * qed_mcp_get_nvm_image(): Allows reading a whole nvram image. * - * @param p_hwfn - * @param image_id - image requested for reading - * @param p_buffer - allocated buffer into which to fill data - * @param buffer_len - length of the allocated buffer. + * @p_hwfn: HW device data. + * @image_id: image requested for reading. + * @p_buffer: allocated buffer into which to fill data. + * @buffer_len: length of the allocated buffer. * - * @return 0 iff p_buffer now contains the nvram image. + * Return: 0 if p_buffer now contains the nvram image. */ int qed_mcp_get_nvm_image(struct qed_hwfn *p_hwfn, enum qed_nvm_images image_id, u8 *p_buffer, u32 buffer_len); /** - * @brief Bist register test + * qed_mcp_bist_register_test(): Bist register test. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_register_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Bist clock test + * qed_mcp_bist_clock_test(): Bist clock test. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_clock_test(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Bist nvm test - get number of images + * qed_mcp_bist_nvm_get_num_images(): Bist nvm test - get number of images. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param num_images - number of images if operation was + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @num_images: number of images if operation was * successful. 0 if not. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *num_images); /** - * @brief Bist nvm test - get image attributes by index + * qed_mcp_bist_nvm_get_image_att(): Bist nvm test - get image attributes + * by index. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param p_image_att - Attributes of image - * @param image_index - Index of image to get information for + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @p_image_att: Attributes of image. + * @image_index: Index of image to get information for. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -683,23 +686,26 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn, u32 image_index); /** - * @brief - Processes the TLV request from MFW i.e., get the required TLV info - * from the qed client and send it to the MFW. + * qed_mfw_process_tlv_req(): Processes the TLV request from MFW i.e., + * get the required TLV info + * from the qed client and send it to the MFW. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Send raw debug data to the MFW + * qed_mcp_send_raw_debug_data(): Send raw debug data to the MFW + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_buf: raw debug data buffer. + * @size: Buffer size. * - * @param p_hwfn - * @param p_ptt - * @param p_buf - raw debug data buffer - * @param size - buffer size + * Return : Int. */ int qed_mcp_send_raw_debug_data(struct qed_hwfn *p_hwfn, @@ -796,47 +802,49 @@ qed_mcp_is_ext_speed_supported(const struct qed_hwfn *p_hwfn) } /** - * @brief Initialize the interface with the MCP + * qed_mcp_cmd_init(): Initialize the interface with the MCP. * - * @param p_hwfn - HW func - * @param p_ptt - PTT required for register access + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. * - * @return int + * Return: Int. */ int qed_mcp_cmd_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Initialize the port interface with the MCP + * qed_mcp_cmd_port_init(): Initialize the port interface with the MCP + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. * - * @param p_hwfn - * @param p_ptt * Can only be called after `num_ports_in_engines' is set */ void qed_mcp_cmd_port_init(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Releases resources allocated during the init process. + * qed_mcp_free(): Releases resources allocated during the init process. * - * @param p_hwfn - HW func - * @param p_ptt - PTT required for register access + * @p_hwfn: HW function. * - * @return int + * Return: Int. */ int qed_mcp_free(struct qed_hwfn *p_hwfn); /** - * @brief This function is called from the DPC context. After - * pointing PTT to the mfw mb, check for events sent by the MCP - * to the driver and ack them. In case a critical event - * detected, it will be handled here, otherwise the work will be - * queued to a sleepable work-queue. + * qed_mcp_handle_events(): This function is called from the DPC context. + * After pointing PTT to the mfw mb, check for events sent by + * the MCP to the driver and ack them. In case a critical event + * detected, it will be handled here, otherwise the work will be + * queued to a sleepable work-queue. + * + * @p_hwfn: HW function. + * @p_ptt: PTT required for register access. * - * @param p_hwfn - HW function - * @param p_ptt - PTT required for register access - * @return int - 0 - operation - * was successul. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_handle_events(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -858,106 +866,111 @@ struct qed_load_req_params { }; /** - * @brief Sends a LOAD_REQ to the MFW, and in case the operation succeeds, - * returns whether this PF is the first on the engine/port or function. + * qed_mcp_load_req(): Sends a LOAD_REQ to the MFW, and in case the + * operation succeeds, returns whether this PF is + * the first on the engine/port or function. * - * @param p_hwfn - * @param p_ptt - * @param p_params + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_load_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_load_req_params *p_params); /** - * @brief Sends a LOAD_DONE message to the MFW + * qed_mcp_load_done(): Sends a LOAD_DONE message to the MFW. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_load_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Sends a UNLOAD_REQ message to the MFW + * qed_mcp_unload_req(): Sends a UNLOAD_REQ message to the MFW. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_unload_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Sends a UNLOAD_DONE message to the MFW + * qed_mcp_unload_done(): Sends a UNLOAD_DONE message to the MFW * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - Operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_unload_done(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Read the MFW mailbox into Current buffer. + * qed_mcp_read_mb(): Read the MFW mailbox into Current buffer. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Void. */ void qed_mcp_read_mb(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Ack to mfw that driver finished FLR process for VFs + * qed_mcp_ack_vf_flr(): Ack to mfw that driver finished FLR process for VFs * - * @param p_hwfn - * @param p_ptt - * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @vfs_to_ack: bit mask of all engine VFs for which the PF acks. * - * @param return int - 0 upon success. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ack_vf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 *vfs_to_ack); /** - * @brief - calls during init to read shmem of all function-related info. + * qed_mcp_fill_shmem_func_info(): Calls during init to read shmem of + * all function-related info. * - * @param p_hwfn + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_fill_shmem_func_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Reset the MCP using mailbox command. + * qed_mcp_reset(): Reset the MCP using mailbox command. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_reset(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Sends an NVM read command request to the MFW to get - * a buffer. + * qed_mcp_nvm_rd_cmd(): Sends an NVM read command request to the MFW to get + * a buffer. * - * @param p_hwfn - * @param p_ptt - * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or - * DRV_MSG_CODE_NVM_READ_NVRAM commands - * @param param - [0:23] - Offset [24:31] - Size - * @param o_mcp_resp - MCP response - * @param o_mcp_param - MCP response param - * @param o_txn_size - Buffer size output - * @param o_buf - Pointer to the buffer returned by the MFW. + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @cmd: (Command) DRV_MSG_CODE_NVM_GET_FILE_DATA or + * DRV_MSG_CODE_NVM_READ_NVRAM commands. + * @param: [0:23] - Offset [24:31] - Size. + * @o_mcp_resp: MCP response. + * @o_mcp_param: MCP response param. + * @o_txn_size: Buffer size output. + * @o_buf: Pointer to the buffer returned by the MFW. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -967,60 +980,61 @@ int qed_mcp_nvm_rd_cmd(struct qed_hwfn *p_hwfn, u32 *o_mcp_param, u32 *o_txn_size, u32 *o_buf); /** - * @brief Read from sfp + * qed_mcp_phy_sfp_read(): Read from sfp. * - * @param p_hwfn - hw function - * @param p_ptt - PTT required for register access - * @param port - transceiver port - * @param addr - I2C address - * @param offset - offset in sfp - * @param len - buffer length - * @param p_buf - buffer to read into + * @p_hwfn: HW device data. + * @p_ptt: PTT required for register access. + * @port: transceiver port. + * @addr: I2C address. + * @offset: offset in sfp. + * @len: buffer length. + * @p_buf: buffer to read into. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_phy_sfp_read(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 port, u32 addr, u32 offset, u32 len, u8 *p_buf); /** - * @brief indicates whether the MFW objects [under mcp_info] are accessible + * qed_mcp_is_init(): indicates whether the MFW objects [under mcp_info] + * are accessible * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return true iff MFW is running and mcp_info is initialized + * Return: true if MFW is running and mcp_info is initialized. */ bool qed_mcp_is_init(struct qed_hwfn *p_hwfn); /** - * @brief request MFW to configure MSI-X for a VF + * qed_mcp_config_vf_msix(): Request MFW to configure MSI-X for a VF. * - * @param p_hwfn - * @param p_ptt - * @param vf_id - absolute inside engine - * @param num_sbs - number of entries to request + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @vf_id: absolute inside engine. + * @num: number of entries to request. * - * @return int + * Return: Int. */ int qed_mcp_config_vf_msix(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u8 vf_id, u8 num); /** - * @brief - Halt the MCP. + * qed_mcp_halt(): Halt the MCP. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief - Wake up the MCP. + * qed_mcp_resume: Wake up the MCP. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param return 0 upon success. + * Return: 0 upon success. */ int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); @@ -1038,13 +1052,13 @@ int __qed_configure_pf_min_bandwidth(struct qed_hwfn *p_hwfn, int qed_mcp_mask_parities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u32 mask_parities); -/* @brief - Gets the mdump retained data from the MFW. +/* qed_mcp_mdump_get_retain(): Gets the mdump retained data from the MFW. * - * @param p_hwfn - * @param p_ptt - * @param p_mdump_retain + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_mdump_retain: mdump retain. * - * @param return 0 upon success. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, @@ -1052,15 +1066,15 @@ qed_mcp_mdump_get_retain(struct qed_hwfn *p_hwfn, struct mdump_retain_data_stc *p_mdump_retain); /** - * @brief - Sets the MFW's max value for the given resource + * qed_mcp_set_resc_max_val(): Sets the MFW's max value for the given resource. * - * @param p_hwfn - * @param p_ptt - * @param res_id - * @param resc_max_val - * @param p_mcp_resp + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @res_id: RES ID. + * @resc_max_val: Resec max val. + * @p_mcp_resp: MCP Resp * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, @@ -1069,16 +1083,17 @@ qed_mcp_set_resc_max_val(struct qed_hwfn *p_hwfn, u32 resc_max_val, u32 *p_mcp_resp); /** - * @brief - Gets the MFW allocation info for the given resource + * qed_mcp_get_resc_info(): Gets the MFW allocation info for the given + * resource. * - * @param p_hwfn - * @param p_ptt - * @param res_id - * @param p_mcp_resp - * @param p_resc_num - * @param p_resc_start + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @res_id: Res ID. + * @p_mcp_resp: MCP resp. + * @p_resc_num: Resc num. + * @p_resc_start: Resc start. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, @@ -1087,13 +1102,13 @@ qed_mcp_get_resc_info(struct qed_hwfn *p_hwfn, u32 *p_mcp_resp, u32 *p_resc_num, u32 *p_resc_start); /** - * @brief Send eswitch mode to MFW + * qed_mcp_ov_update_eswitch(): Send eswitch mode to MFW. * - * @param p_hwfn - * @param p_ptt - * @param eswitch - eswitch mode + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @eswitch: eswitch mode. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_ov_update_eswitch(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, @@ -1113,12 +1128,12 @@ enum qed_resc_lock { }; /** - * @brief - Initiates PF FLR + * qed_mcp_initiate_pf_flr(): Initiates PF FLR. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_initiate_pf_flr(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); struct qed_resc_lock_params { @@ -1151,13 +1166,13 @@ struct qed_resc_lock_params { }; /** - * @brief Acquires MFW generic resource lock + * qed_mcp_resc_lock(): Acquires MFW generic resource lock. * - * @param p_hwfn - * @param p_ptt - * @param p_params + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_resc_lock(struct qed_hwfn *p_hwfn, @@ -1175,13 +1190,13 @@ struct qed_resc_unlock_params { }; /** - * @brief Releases MFW generic resource lock + * qed_mcp_resc_unlock(): Releases MFW generic resource lock. * - * @param p_hwfn - * @param p_ptt - * @param p_params + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_params: Params. * - * @return int - 0 - operation was successful. + * Return: Int - 0 - Operation was successul. */ int qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, @@ -1189,12 +1204,15 @@ qed_mcp_resc_unlock(struct qed_hwfn *p_hwfn, struct qed_resc_unlock_params *p_params); /** - * @brief - default initialization for lock/unlock resource structs + * qed_mcp_resc_lock_default_init(): Default initialization for + * lock/unlock resource structs. * - * @param p_lock - lock params struct to be initialized; Can be NULL - * @param p_unlock - unlock params struct to be initialized; Can be NULL - * @param resource - the requested resource - * @paral b_is_permanent - disable retries & aging when set + * @p_lock: lock params struct to be initialized; Can be NULL. + * @p_unlock: unlock params struct to be initialized; Can be NULL. + * @resource: the requested resource. + * @b_is_permanent: disable retries & aging when set. + * + * Return: Void. */ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, struct qed_resc_unlock_params *p_unlock, @@ -1202,94 +1220,117 @@ void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params *p_lock, resource, bool b_is_permanent); /** - * @brief - Return whether management firmware support smart AN + * qed_mcp_is_smart_an_supported(): Return whether management firmware + * support smart AN * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return bool - true if feature is supported. + * Return: bool true if feature is supported. */ bool qed_mcp_is_smart_an_supported(struct qed_hwfn *p_hwfn); /** - * @brief Learn of supported MFW features; To be done during early init + * qed_mcp_get_capabilities(): Learn of supported MFW features; + * To be done during early init. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_mcp_get_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Inform MFW of set of features supported by driver. Should be done - * inside the content of the LOAD_REQ. + * qed_mcp_set_capabilities(): Inform MFW of set of features supported + * by driver. Should be done inside the content + * of the LOAD_REQ. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Int. */ int qed_mcp_set_capabilities(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Read ufp config from the shared memory. + * qed_mcp_read_ufp_config(): Read ufp config from the shared memory. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. * - * @param p_hwfn - * @param p_ptt + * Return: Void. */ void qed_mcp_read_ufp_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Populate the nvm info shadow in the given hardware function + * qed_mcp_nvm_info_populate(): Populate the nvm info shadow in the given + * hardware function. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Int. */ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn); /** - * @brief Delete nvm info shadow in the given hardware function + * qed_mcp_nvm_info_free(): Delete nvm info shadow in the given + * hardware function. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_mcp_nvm_info_free(struct qed_hwfn *p_hwfn); /** - * @brief Get the engine affinity configuration. + * qed_mcp_get_engine_config(): Get the engine affinity configuration. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_mcp_get_engine_config(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Get the PPFID bitmap. + * qed_mcp_get_ppfid_bitmap(): Get the PPFID bitmap. * - * @param p_hwfn - * @param p_ptt + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * + * Return: Int. */ int qed_mcp_get_ppfid_bitmap(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); /** - * @brief Get NVM config attribute value. + * qed_mcp_nvm_get_cfg(): Get NVM config attribute value. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @option_id: Option ID. + * @entity_id: Entity ID. + * @flags: Flags. + * @p_buf: Buf. + * @p_len: Len. * - * @param p_hwfn - * @param p_ptt - * @param option_id - * @param entity_id - * @param flags - * @param p_buf - * @param p_len + * Return: Int. */ int qed_mcp_nvm_get_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, u32 *p_len); /** - * @brief Set NVM config attribute value. + * qed_mcp_nvm_set_cfg(): Set NVM config attribute value. * - * @param p_hwfn - * @param p_ptt - * @param option_id - * @param entity_id - * @param flags - * @param p_buf - * @param len + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @option_id: Option ID. + * @entity_id: Entity ID. + * @flags: Flags. + * @p_buf: Buf. + * @len: Len. + * + * Return: Int. */ int qed_mcp_nvm_set_cfg(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, u16 option_id, u8 entity_id, u16 flags, u8 *p_buf, diff --git a/drivers/net/ethernet/qlogic/qed/qed_selftest.h b/drivers/net/ethernet/qlogic/qed/qed_selftest.h index e27dd9a4547e..7a3bd749e1e4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_selftest.h +++ b/drivers/net/ethernet/qlogic/qed/qed_selftest.h @@ -6,47 +6,47 @@ #include /** - * @brief qed_selftest_memory - Perform memory test + * qed_selftest_memory(): Perform memory test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_memory(struct qed_dev *cdev); /** - * @brief qed_selftest_interrupt - Perform interrupt test + * qed_selftest_interrupt(): Perform interrupt test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_interrupt(struct qed_dev *cdev); /** - * @brief qed_selftest_register - Perform register test + * qed_selftest_register(): Perform register test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_register(struct qed_dev *cdev); /** - * @brief qed_selftest_clock - Perform clock test + * qed_selftest_clock(): Perform clock test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_clock(struct qed_dev *cdev); /** - * @brief qed_selftest_nvram - Perform nvram test + * qed_selftest_nvram(): Perform nvram test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return int + * Return: Int. */ int qed_selftest_nvram(struct qed_dev *cdev); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h index 60ff3222bf55..c5a38f3c92b0 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sp.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h @@ -31,23 +31,18 @@ struct qed_spq_comp_cb { }; /** - * @brief qed_eth_cqe_completion - handles the completion of a - * ramrod on the cqe ring + * qed_eth_cqe_completion(): handles the completion of a + * ramrod on the cqe ring. * - * @param p_hwfn - * @param cqe + * @p_hwfn: HW device data. + * @cqe: CQE. * - * @return int + * Return: Int. */ int qed_eth_cqe_completion(struct qed_hwfn *p_hwfn, struct eth_slow_path_rx_cqe *cqe); -/** - * @file - * - * QED Slow-hwfn queue interface - */ - + /* QED Slow-hwfn queue interface */ union ramrod_data { struct pf_start_ramrod_data pf_start; struct pf_update_ramrod_data pf_update; @@ -207,117 +202,128 @@ struct qed_spq { }; /** - * @brief qed_spq_post - Posts a Slow hwfn request to FW, or lacking that - * Pends it to the future list. + * qed_spq_post(): Posts a Slow hwfn request to FW, or lacking that + * Pends it to the future list. * - * @param p_hwfn - * @param p_req + * @p_hwfn: HW device data. + * @p_ent: Ent. + * @fw_return_code: Return code from firmware. * - * @return int + * Return: Int. */ int qed_spq_post(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent, u8 *fw_return_code); /** - * @brief qed_spq_allocate - Alloocates & initializes the SPQ and EQ. + * qed_spq_alloc(): Alloocates & initializes the SPQ and EQ. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_spq_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_spq_setup - Reset the SPQ to its start state. + * qed_spq_setup(): Reset the SPQ to its start state. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_spq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_spq_deallocate - Deallocates the given SPQ struct. + * qed_spq_free(): Deallocates the given SPQ struct. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_spq_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_spq_get_entry - Obtain an entrry from the spq - * free pool list. - * - * + * qed_spq_get_entry(): Obtain an entrry from the spq + * free pool list. * - * @param p_hwfn - * @param pp_ent + * @p_hwfn: HW device data. + * @pp_ent: PP ENT. * - * @return int + * Return: Int. */ int qed_spq_get_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry **pp_ent); /** - * @brief qed_spq_return_entry - Return an entry to spq free - * pool list + * qed_spq_return_entry(): Return an entry to spq free pool list. * - * @param p_hwfn - * @param p_ent + * @p_hwfn: HW device data. + * @p_ent: P ENT. + * + * Return: Void. */ void qed_spq_return_entry(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent); /** - * @brief qed_eq_allocate - Allocates & initializes an EQ struct + * qed_eq_alloc(): Allocates & initializes an EQ struct. * - * @param p_hwfn - * @param num_elem number of elements in the eq + * @p_hwfn: HW device data. + * @num_elem: number of elements in the eq. * - * @return int + * Return: Int. */ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem); /** - * @brief qed_eq_setup - Reset the EQ to its start state. + * qed_eq_setup(): Reset the EQ to its start state. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_eq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_eq_free - deallocates the given EQ struct. + * qed_eq_free(): deallocates the given EQ struct. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_eq_free(struct qed_hwfn *p_hwfn); /** - * @brief qed_eq_prod_update - update the FW with default EQ producer + * qed_eq_prod_update(): update the FW with default EQ producer. + * + * @p_hwfn: HW device data. + * @prod: Prod. * - * @param p_hwfn - * @param prod + * Return: Void. */ void qed_eq_prod_update(struct qed_hwfn *p_hwfn, u16 prod); /** - * @brief qed_eq_completion - Completes currently pending EQ elements + * qed_eq_completion(): Completes currently pending EQ elements. * - * @param p_hwfn - * @param cookie + * @p_hwfn: HW device data. + * @cookie: Cookie. * - * @return int + * Return: Int. */ int qed_eq_completion(struct qed_hwfn *p_hwfn, void *cookie); /** - * @brief qed_spq_completion - Completes a single event + * qed_spq_completion(): Completes a single event. * - * @param p_hwfn - * @param echo - echo value from cookie (used for determining completion) - * @param p_data - data from cookie (used in callback function if applicable) + * @p_hwfn: HW device data. + * @echo: echo value from cookie (used for determining completion). + * @fw_return_code: FW return code. + * @p_data: data from cookie (used in callback function if applicable). * - * @return int + * Return: Int. */ int qed_spq_completion(struct qed_hwfn *p_hwfn, __le16 echo, @@ -325,44 +331,43 @@ int qed_spq_completion(struct qed_hwfn *p_hwfn, union event_ring_data *p_data); /** - * @brief qed_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ + * qed_spq_get_cid(): Given p_hwfn, return cid for the hwfn's SPQ. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return u32 - SPQ CID + * Return: u32 - SPQ CID. */ u32 qed_spq_get_cid(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_alloc - Allocates & initializes an ConsQ - * struct + * qed_consq_alloc(): Allocates & initializes an ConsQ struct. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_consq_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_setup - Reset the ConsQ to its start state. + * qed_consq_setup(): Reset the ConsQ to its start state. * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return Void. */ void qed_consq_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_consq_free - deallocates the given ConsQ struct. + * qed_consq_free(): deallocates the given ConsQ struct. + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return Void. */ void qed_consq_free(struct qed_hwfn *p_hwfn); int qed_spq_pend_post(struct qed_hwfn *p_hwfn); -/** - * @file - * - * @brief Slow-hwfn low-level commands (Ramrods) function definitions. - */ +/* Slow-hwfn low-level commands (Ramrods) function definitions. */ #define QED_SP_EQ_COMPLETION 0x01 #define QED_SP_CQE_COMPLETION 0x02 @@ -377,12 +382,15 @@ struct qed_sp_init_data { }; /** - * @brief Returns a SPQ entry to the pool / frees the entry if allocated. - * Should be called on in error flows after initializing the SPQ entry - * and before posting it. + * qed_sp_destroy_request(): Returns a SPQ entry to the pool / frees the + * entry if allocated. Should be called on in error + * flows after initializing the SPQ entry + * and before posting it. + * + * @p_hwfn: HW device data. + * @p_ent: Ent. * - * @param p_hwfn - * @param p_ent + * Return: Void. */ void qed_sp_destroy_request(struct qed_hwfn *p_hwfn, struct qed_spq_entry *p_ent); @@ -394,7 +402,14 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, struct qed_sp_init_data *p_data); /** - * @brief qed_sp_pf_start - PF Function Start Ramrod + * qed_sp_pf_start(): PF Function Start Ramrod. + * + * @p_hwfn: HW device data. + * @p_ptt: P_ptt. + * @p_tunn: P_tunn. + * @allow_npar_tx_switch: Allow NPAR TX Switch. + * + * Return: Int. * * This ramrod is sent to initialize a physical function (PF). It will * configure the function related parameters and write its completion to the @@ -404,12 +419,6 @@ int qed_sp_init_request(struct qed_hwfn *p_hwfn, * allocated by the driver on host memory and its parameters are written * to the internal RAM of the UStorm by the Function Start Ramrod. * - * @param p_hwfn - * @param p_ptt - * @param p_tunn - * @param allow_npar_tx_switch - * - * @return int */ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, @@ -418,47 +427,33 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, bool allow_npar_tx_switch); /** - * @brief qed_sp_pf_update - PF Function Update Ramrod + * qed_sp_pf_update(): PF Function Update Ramrod. * - * This ramrod updates function-related parameters. Every parameter can be - * updated independently, according to configuration flags. + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Int. * - * @return int + * This ramrod updates function-related parameters. Every parameter can be + * updated independently, according to configuration flags. */ int qed_sp_pf_update(struct qed_hwfn *p_hwfn); /** - * @brief qed_sp_pf_update_stag - Update firmware of new outer tag + * qed_sp_pf_update_stag(): Update firmware of new outer tag. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_sp_pf_update_stag(struct qed_hwfn *p_hwfn); /** - * @brief qed_sp_pf_stop - PF Function Stop Ramrod - * - * This ramrod is sent to close a Physical Function (PF). It is the last ramrod - * sent and the last completion written to the PFs Event Ring. This ramrod also - * deletes the context for the Slowhwfn connection on this PF. - * - * @note Not required for first packet. - * - * @param p_hwfn - * - * @return int - */ - -/** - * @brief qed_sp_pf_update_ufp - PF ufp update Ramrod + * qed_sp_pf_update_ufp(): PF ufp update Ramrod. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_sp_pf_update_ufp(struct qed_hwfn *p_hwfn); @@ -470,11 +465,11 @@ int qed_sp_pf_update_tunn_cfg(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data); /** - * @brief qed_sp_heartbeat_ramrod - Send empty Ramrod + * qed_sp_heartbeat_ramrod(): Send empty Ramrod. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_sp_heartbeat_ramrod(struct qed_hwfn *p_hwfn); diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.h b/drivers/net/ethernet/qlogic/qed/qed_sriov.h index 7ff23ef8ccc1..0a1e44d45c1a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.h +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.h @@ -251,29 +251,31 @@ extern const struct qed_iov_hv_ops qed_iov_ops_pass; #ifdef CONFIG_QED_SRIOV /** - * @brief Check if given VF ID @vfid is valid - * w.r.t. @b_enabled_only value - * if b_enabled_only = true - only enabled VF id is valid - * else any VF id less than max_vfs is valid + * qed_iov_is_valid_vfid(): Check if given VF ID @vfid is valid + * w.r.t. @b_enabled_only value + * if b_enabled_only = true - only enabled + * VF id is valid. + * else any VF id less than max_vfs is valid. * - * @param p_hwfn - * @param rel_vf_id - Relative VF ID - * @param b_enabled_only - consider only enabled VF - * @param b_non_malicious - true iff we want to validate vf isn't malicious. + * @p_hwfn: HW device data. + * @rel_vf_id: Relative VF ID. + * @b_enabled_only: consider only enabled VF. + * @b_non_malicious: true iff we want to validate vf isn't malicious. * - * @return bool - true for valid VF ID + * Return: bool - true for valid VF ID */ bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn, int rel_vf_id, bool b_enabled_only, bool b_non_malicious); /** - * @brief - Given a VF index, return index of next [including that] active VF. + * qed_iov_get_next_active_vf(): Given a VF index, return index of + * next [including that] active VF. * - * @param p_hwfn - * @param rel_vf_id + * @p_hwfn: HW device data. + * @rel_vf_id: VF ID. * - * @return MAX_NUM_VFS in case no further active VFs, otherwise index. + * Return: MAX_NUM_VFS in case no further active VFs, otherwise index. */ u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, u16 rel_vf_id); @@ -281,83 +283,92 @@ void qed_iov_bulletin_set_udp_ports(struct qed_hwfn *p_hwfn, int vfid, u16 vxlan_port, u16 geneve_port); /** - * @brief Read sriov related information and allocated resources - * reads from configuration space, shmem, etc. + * qed_iov_hw_info(): Read sriov related information and allocated resources + * reads from configuration space, shmem, etc. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_iov_hw_info(struct qed_hwfn *p_hwfn); /** - * @brief qed_add_tlv - place a given tlv on the tlv buffer at next offset + * qed_add_tlv(): place a given tlv on the tlv buffer at next offset * - * @param p_hwfn - * @param p_iov - * @param type - * @param length + * @p_hwfn: HW device data. + * @offset: offset. + * @type: Type + * @length: Length. * - * @return pointer to the newly placed tlv + * Return: pointer to the newly placed tlv */ void *qed_add_tlv(struct qed_hwfn *p_hwfn, u8 **offset, u16 type, u16 length); /** - * @brief list the types and lengths of the tlvs on the buffer + * qed_dp_tlv_list(): list the types and lengths of the tlvs on the buffer * - * @param p_hwfn - * @param tlvs_list + * @p_hwfn: HW device data. + * @tlvs_list: Tlvs_list. + * + * Return: Void. */ void qed_dp_tlv_list(struct qed_hwfn *p_hwfn, void *tlvs_list); /** - * @brief qed_iov_alloc - allocate sriov related resources + * qed_iov_alloc(): allocate sriov related resources * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_iov_alloc(struct qed_hwfn *p_hwfn); /** - * @brief qed_iov_setup - setup sriov related resources + * qed_iov_setup(): setup sriov related resources + * + * @p_hwfn: HW device data. * - * @param p_hwfn + * Return: Void. */ void qed_iov_setup(struct qed_hwfn *p_hwfn); /** - * @brief qed_iov_free - free sriov related resources + * qed_iov_free(): free sriov related resources * - * @param p_hwfn + * @p_hwfn: HW device data. + * + * Return: Void. */ void qed_iov_free(struct qed_hwfn *p_hwfn); /** - * @brief free sriov related memory that was allocated during hw_prepare + * qed_iov_free_hw_info(): free sriov related memory that was + * allocated during hw_prepare + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Void. */ void qed_iov_free_hw_info(struct qed_dev *cdev); /** - * @brief Mark structs of vfs that have been FLR-ed. + * qed_iov_mark_vf_flr(): Mark structs of vfs that have been FLR-ed. * - * @param p_hwfn - * @param disabled_vfs - bitmask of all VFs on path that were FLRed + * @p_hwfn: HW device data. + * @disabled_vfs: bitmask of all VFs on path that were FLRed * - * @return true iff one of the PF's vfs got FLRed. false otherwise. + * Return: true iff one of the PF's vfs got FLRed. false otherwise. */ bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, u32 *disabled_vfs); /** - * @brief Search extended TLVs in request/reply buffer. + * qed_iov_search_list_tlvs(): Search extended TLVs in request/reply buffer. * - * @param p_hwfn - * @param p_tlvs_list - Pointer to tlvs list - * @param req_type - Type of TLV + * @p_hwfn: HW device data. + * @p_tlvs_list: Pointer to tlvs list + * @req_type: Type of TLV * - * @return pointer to tlv type if found, otherwise returns NULL. + * Return: pointer to tlv type if found, otherwise returns NULL. */ void *qed_iov_search_list_tlvs(struct qed_hwfn *p_hwfn, void *p_tlvs_list, u16 req_type); diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h index 60d2bb64e65f..976201fc7d4a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_vf.h +++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h @@ -688,13 +688,16 @@ struct qed_vf_iov { }; /** - * @brief VF - Set Rx/Tx coalesce per VF's relative queue. - * Coalesce value '0' will omit the configuration. + * qed_vf_pf_set_coalesce(): VF - Set Rx/Tx coalesce per VF's relative queue. + * Coalesce value '0' will omit the + * configuration. * - * @param p_hwfn - * @param rx_coal - coalesce value in micro second for rx queue - * @param tx_coal - coalesce value in micro second for tx queue - * @param p_cid - queue cid + * @p_hwfn: HW device data. + * @rx_coal: coalesce value in micro second for rx queue. + * @tx_coal: coalesce value in micro second for tx queue. + * @p_cid: queue cid. + * + * Return: Int. * **/ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, @@ -702,148 +705,172 @@ int qed_vf_pf_set_coalesce(struct qed_hwfn *p_hwfn, u16 tx_coal, struct qed_queue_cid *p_cid); /** - * @brief VF - Get coalesce per VF's relative queue. + * qed_vf_pf_get_coalesce(): VF - Get coalesce per VF's relative queue. * - * @param p_hwfn - * @param p_coal - coalesce value in micro second for VF queues. - * @param p_cid - queue cid + * @p_hwfn: HW device data. + * @p_coal: coalesce value in micro second for VF queues. + * @p_cid: queue cid. * + * Return: Int. **/ int qed_vf_pf_get_coalesce(struct qed_hwfn *p_hwfn, u16 *p_coal, struct qed_queue_cid *p_cid); #ifdef CONFIG_QED_SRIOV /** - * @brief Read the VF bulletin and act on it if needed + * qed_vf_read_bulletin(): Read the VF bulletin and act on it if needed. * - * @param p_hwfn - * @param p_change - qed fills 1 iff bulletin board has changed, 0 otherwise. + * @p_hwfn: HW device data. + * @p_change: qed fills 1 iff bulletin board has changed, 0 otherwise. * - * @return enum _qed_status + * Return: enum _qed_status. */ int qed_vf_read_bulletin(struct qed_hwfn *p_hwfn, u8 *p_change); /** - * @brief Get link paramters for VF from qed + * qed_vf_get_link_params(): Get link parameters for VF from qed + * + * @p_hwfn: HW device data. + * @params: the link params structure to be filled for the VF. * - * @param p_hwfn - * @param params - the link params structure to be filled for the VF + * Return: Void. */ void qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *params); /** - * @brief Get link state for VF from qed + * qed_vf_get_link_state(): Get link state for VF from qed. + * + * @p_hwfn: HW device data. + * @link: the link state structure to be filled for the VF * - * @param p_hwfn - * @param link - the link state structure to be filled for the VF + * Return: Void. */ void qed_vf_get_link_state(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *link); /** - * @brief Get link capabilities for VF from qed + * qed_vf_get_link_caps(): Get link capabilities for VF from qed. * - * @param p_hwfn - * @param p_link_caps - the link capabilities structure to be filled for the VF + * @p_hwfn: HW device data. + * @p_link_caps: the link capabilities structure to be filled for the VF + * + * Return: Void. */ void qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, struct qed_mcp_link_capabilities *p_link_caps); /** - * @brief Get number of Rx queues allocated for VF by qed + * qed_vf_get_num_rxqs(): Get number of Rx queues allocated for VF by qed + * + * @p_hwfn: HW device data. + * @num_rxqs: allocated RX queues * - * @param p_hwfn - * @param num_rxqs - allocated RX queues + * Return: Void. */ void qed_vf_get_num_rxqs(struct qed_hwfn *p_hwfn, u8 *num_rxqs); /** - * @brief Get number of Rx queues allocated for VF by qed + * qed_vf_get_num_txqs(): Get number of Rx queues allocated for VF by qed * - * @param p_hwfn - * @param num_txqs - allocated RX queues + * @p_hwfn: HW device data. + * @num_txqs: allocated RX queues + * + * Return: Void. */ void qed_vf_get_num_txqs(struct qed_hwfn *p_hwfn, u8 *num_txqs); /** - * @brief Get number of available connections [both Rx and Tx] for VF + * qed_vf_get_num_cids(): Get number of available connections + * [both Rx and Tx] for VF + * + * @p_hwfn: HW device data. + * @num_cids: allocated number of connections * - * @param p_hwfn - * @param num_cids - allocated number of connections + * Return: Void. */ void qed_vf_get_num_cids(struct qed_hwfn *p_hwfn, u8 *num_cids); /** - * @brief Get port mac address for VF + * qed_vf_get_port_mac(): Get port mac address for VF. * - * @param p_hwfn - * @param port_mac - destination location for port mac + * @p_hwfn: HW device data. + * @port_mac: destination location for port mac + * + * Return: Void. */ void qed_vf_get_port_mac(struct qed_hwfn *p_hwfn, u8 *port_mac); /** - * @brief Get number of VLAN filters allocated for VF by qed + * qed_vf_get_num_vlan_filters(): Get number of VLAN filters allocated + * for VF by qed. + * + * @p_hwfn: HW device data. + * @num_vlan_filters: allocated VLAN filters * - * @param p_hwfn - * @param num_rxqs - allocated VLAN filters + * Return: Void. */ void qed_vf_get_num_vlan_filters(struct qed_hwfn *p_hwfn, u8 *num_vlan_filters); /** - * @brief Get number of MAC filters allocated for VF by qed + * qed_vf_get_num_mac_filters(): Get number of MAC filters allocated + * for VF by qed * - * @param p_hwfn - * @param num_rxqs - allocated MAC filters + * @p_hwfn: HW device data. + * @num_mac_filters: allocated MAC filters + * + * Return: Void. */ void qed_vf_get_num_mac_filters(struct qed_hwfn *p_hwfn, u8 *num_mac_filters); /** - * @brief Check if VF can set a MAC address + * qed_vf_check_mac(): Check if VF can set a MAC address * - * @param p_hwfn - * @param mac + * @p_hwfn: HW device data. + * @mac: Mac. * - * @return bool + * Return: bool. */ bool qed_vf_check_mac(struct qed_hwfn *p_hwfn, u8 *mac); /** - * @brief Set firmware version information in dev_info from VFs acquire response tlv + * qed_vf_get_fw_version(): Set firmware version information + * in dev_info from VFs acquire response tlv + * + * @p_hwfn: HW device data. + * @fw_major: FW major. + * @fw_minor: FW minor. + * @fw_rev: FW rev. + * @fw_eng: FW eng. * - * @param p_hwfn - * @param fw_major - * @param fw_minor - * @param fw_rev - * @param fw_eng + * Return: Void. */ void qed_vf_get_fw_version(struct qed_hwfn *p_hwfn, u16 *fw_major, u16 *fw_minor, u16 *fw_rev, u16 *fw_eng); /** - * @brief hw preparation for VF - * sends ACQUIRE message + * qed_vf_hw_prepare(): hw preparation for VF sends ACQUIRE message * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return int + * Return: Int. */ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); /** - * @brief VF - start the RX Queue by sending a message to the PF - * @param p_hwfn - * @param p_cid - Only relative fields are relevant - * @param bd_max_bytes - maximum number of bytes per bd - * @param bd_chain_phys_addr - physical address of bd chain - * @param cqe_pbl_addr - physical address of pbl - * @param cqe_pbl_size - pbl size - * @param pp_prod - pointer to the producer to be - * used in fastpath + * qed_vf_pf_rxq_start(): start the RX Queue by sending a message to the PF + * + * @p_hwfn: HW device data. + * @p_cid: Only relative fields are relevant + * @bd_max_bytes: maximum number of bytes per bd + * @bd_chain_phys_addr: physical address of bd chain + * @cqe_pbl_addr: physical address of pbl + * @cqe_pbl_size: pbl size + * @pp_prod: pointer to the producer to be used in fastpath * - * @return int + * Return: Int. */ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, @@ -853,18 +880,16 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, u16 cqe_pbl_size, void __iomem **pp_prod); /** - * @brief VF - start the TX queue by sending a message to the - * PF. + * qed_vf_pf_txq_start(): VF - start the TX queue by sending a message to the + * PF. * - * @param p_hwfn - * @param tx_queue_id - zero based within the VF - * @param sb - status block for this queue - * @param sb_index - index within the status block - * @param bd_chain_phys_addr - physical address of tx chain - * @param pp_doorbell - pointer to address to which to - * write the doorbell too.. + * @p_hwfn: HW device data. + * @p_cid: CID. + * @pbl_addr: PBL address. + * @pbl_size: PBL Size. + * @pp_doorbell: pointer to address to which to write the doorbell too. * - * @return int + * Return: Int. */ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, @@ -873,90 +898,91 @@ qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, u16 pbl_size, void __iomem **pp_doorbell); /** - * @brief VF - stop the RX queue by sending a message to the PF + * qed_vf_pf_rxq_stop(): VF - stop the RX queue by sending a message to the PF. * - * @param p_hwfn - * @param p_cid - * @param cqe_completion + * @p_hwfn: HW device data. + * @p_cid: CID. + * @cqe_completion: CQE Completion. * - * @return int + * Return: Int. */ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid, bool cqe_completion); /** - * @brief VF - stop the TX queue by sending a message to the PF + * qed_vf_pf_txq_stop(): VF - stop the TX queue by sending a message to the PF. * - * @param p_hwfn - * @param tx_qid + * @p_hwfn: HW device data. + * @p_cid: CID. * - * @return int + * Return: Int. */ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid); /** - * @brief VF - send a vport update command + * qed_vf_pf_vport_update(): VF - send a vport update command. * - * @param p_hwfn - * @param params + * @p_hwfn: HW device data. + * @p_params: Params * - * @return int + * Return: Int. */ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn, struct qed_sp_vport_update_params *p_params); /** + * qed_vf_pf_reset(): VF - send a close message to PF. * - * @brief VF - send a close message to PF + * @p_hwfn: HW device data. * - * @param p_hwfn - * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_reset(struct qed_hwfn *p_hwfn); /** - * @brief VF - free vf`s memories + * qed_vf_pf_release(): VF - free vf`s memories. * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_release(struct qed_hwfn *p_hwfn); /** - * @brief qed_vf_get_igu_sb_id - Get the IGU SB ID for a given + * qed_vf_get_igu_sb_id(): Get the IGU SB ID for a given * sb_id. For VFs igu sbs don't have to be contiguous * - * @param p_hwfn - * @param sb_id + * @p_hwfn: HW device data. + * @sb_id: SB ID. * - * @return INLINE u16 + * Return: INLINE u16 */ u16 qed_vf_get_igu_sb_id(struct qed_hwfn *p_hwfn, u16 sb_id); /** - * @brief Stores [or removes] a configured sb_info. + * qed_vf_set_sb_info(): Stores [or removes] a configured sb_info. + * + * @p_hwfn: HW device data. + * @sb_id: zero-based SB index [for fastpath] + * @p_sb: may be NULL [during removal]. * - * @param p_hwfn - * @param sb_id - zero-based SB index [for fastpath] - * @param sb_info - may be NULL [during removal]. + * Return: Void. */ void qed_vf_set_sb_info(struct qed_hwfn *p_hwfn, u16 sb_id, struct qed_sb_info *p_sb); /** - * @brief qed_vf_pf_vport_start - perform vport start for VF. + * qed_vf_pf_vport_start(): perform vport start for VF. * - * @param p_hwfn - * @param vport_id - * @param mtu - * @param inner_vlan_removal - * @param tpa_mode - * @param max_buffers_per_cqe, - * @param only_untagged - default behavior regarding vlan acceptance + * @p_hwfn: HW device data. + * @vport_id: Vport ID. + * @mtu: MTU. + * @inner_vlan_removal: Innter VLAN removal. + * @tpa_mode: TPA mode + * @max_buffers_per_cqe: Max buffer pre CQE. + * @only_untagged: default behavior regarding vlan acceptance * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, u8 vport_id, @@ -966,11 +992,11 @@ int qed_vf_pf_vport_start(struct qed_hwfn *p_hwfn, u8 max_buffers_per_cqe, u8 only_untagged); /** - * @brief qed_vf_pf_vport_stop - stop the VF's vport + * qed_vf_pf_vport_stop(): stop the VF's vport * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_vport_stop(struct qed_hwfn *p_hwfn); @@ -981,42 +1007,49 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn, struct qed_filter_mcast *p_filter_cmd); /** - * @brief qed_vf_pf_int_cleanup - clean the SB of the VF + * qed_vf_pf_int_cleanup(): clean the SB of the VF * - * @param p_hwfn + * @p_hwfn: HW device data. * - * @return enum _qed_status + * Return: enum _qed_status */ int qed_vf_pf_int_cleanup(struct qed_hwfn *p_hwfn); /** - * @brief - return the link params in a given bulletin board + * __qed_vf_get_link_params(): return the link params in a given bulletin board * - * @param p_hwfn - * @param p_params - pointer to a struct to fill with link params - * @param p_bulletin + * @p_hwfn: HW device data. + * @p_params: pointer to a struct to fill with link params + * @p_bulletin: Bulletin. + * + * Return: Void. */ void __qed_vf_get_link_params(struct qed_hwfn *p_hwfn, struct qed_mcp_link_params *p_params, struct qed_bulletin_content *p_bulletin); /** - * @brief - return the link state in a given bulletin board + * __qed_vf_get_link_state(): return the link state in a given bulletin board + * + * @p_hwfn: HW device data. + * @p_link: pointer to a struct to fill with link state + * @p_bulletin: Bulletin. * - * @param p_hwfn - * @param p_link - pointer to a struct to fill with link state - * @param p_bulletin + * Return: Void. */ void __qed_vf_get_link_state(struct qed_hwfn *p_hwfn, struct qed_mcp_link_state *p_link, struct qed_bulletin_content *p_bulletin); /** - * @brief - return the link capabilities in a given bulletin board + * __qed_vf_get_link_caps(): return the link capabilities in a given + * bulletin board * - * @param p_hwfn - * @param p_link - pointer to a struct to fill with link capabilities - * @param p_bulletin + * @p_hwfn: HW device data. + * @p_link_caps: pointer to a struct to fill with link capabilities + * @p_bulletin: Bulletin. + * + * Return: Void. */ void __qed_vf_get_link_caps(struct qed_hwfn *p_hwfn, struct qed_mcp_link_capabilities *p_link_caps, @@ -1029,9 +1062,13 @@ int qed_vf_pf_tunnel_param_update(struct qed_hwfn *p_hwfn, u32 qed_vf_hw_bar_size(struct qed_hwfn *p_hwfn, enum BAR_ID bar_id); /** - * @brief - Ask PF to update the MAC address in it's bulletin board + * qed_vf_pf_bulletin_update_mac(): Ask PF to update the MAC address in + * it's bulletin board + * + * @p_hwfn: HW device data. + * @p_mac: mac address to be updated in bulletin board * - * @param p_mac - mac address to be updated in bulletin board + * Return: Int. */ int qed_vf_pf_bulletin_update_mac(struct qed_hwfn *p_hwfn, u8 *p_mac); diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 6c22bfc16ee6..fee47c8eeff4 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -2832,10 +2832,13 @@ static void qede_get_eth_tlv_data(void *dev, void *data) } /** - * qede_io_error_detected - called when PCI error is detected + * qede_io_error_detected(): Called when PCI error is detected + * * @pdev: Pointer to PCI device * @state: The current pci connection state * + *Return: pci_ers_result_t. + * * This function is called after a PCI bus error affecting * this device has been detected. */ diff --git a/include/linux/qed/qed_chain.h b/include/linux/qed/qed_chain.h index f34dbd0db795..a84063492c71 100644 --- a/include/linux/qed/qed_chain.h +++ b/include/linux/qed/qed_chain.h @@ -268,14 +268,15 @@ static inline dma_addr_t qed_chain_get_pbl_phys(const struct qed_chain *chain) } /** - * @brief qed_chain_advance_page - + * qed_chain_advance_page(): Advance the next element across pages for a + * linked chain. * - * Advance the next element across pages for a linked chain + * @p_chain: P_chain. + * @p_next_elem: P_next_elem. + * @idx_to_inc: Idx_to_inc. + * @page_to_inc: page_to_inc. * - * @param p_chain - * @param p_next_elem - * @param idx_to_inc - * @param page_to_inc + * Return: Void. */ static inline void qed_chain_advance_page(struct qed_chain *p_chain, @@ -336,12 +337,14 @@ qed_chain_advance_page(struct qed_chain *p_chain, } while (0) /** - * @brief qed_chain_return_produced - + * qed_chain_return_produced(): A chain in which the driver "Produces" + * elements should use this API + * to indicate previous produced elements + * are now consumed. * - * A chain in which the driver "Produces" elements should use this API - * to indicate previous produced elements are now consumed. + * @p_chain: Chain. * - * @param p_chain + * Return: Void. */ static inline void qed_chain_return_produced(struct qed_chain *p_chain) { @@ -353,15 +356,15 @@ static inline void qed_chain_return_produced(struct qed_chain *p_chain) } /** - * @brief qed_chain_produce - + * qed_chain_produce(): A chain in which the driver "Produces" + * elements should use this to get a pointer to + * the next element which can be "Produced". It's driver + * responsibility to validate that the chain has room for + * new element. * - * A chain in which the driver "Produces" elements should use this to get - * a pointer to the next element which can be "Produced". It's driver - * responsibility to validate that the chain has room for new element. + * @p_chain: Chain. * - * @param p_chain - * - * @return void*, a pointer to next element + * Return: void*, a pointer to next element. */ static inline void *qed_chain_produce(struct qed_chain *p_chain) { @@ -395,14 +398,11 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain) } /** - * @brief qed_chain_get_capacity - - * - * Get the maximum number of BDs in chain + * qed_chain_get_capacity(): Get the maximum number of BDs in chain * - * @param p_chain - * @param num + * @p_chain: Chain. * - * @return number of unusable BDs + * Return: number of unusable BDs. */ static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain) { @@ -410,12 +410,14 @@ static inline u32 qed_chain_get_capacity(struct qed_chain *p_chain) } /** - * @brief qed_chain_recycle_consumed - + * qed_chain_recycle_consumed(): Returns an element which was + * previously consumed; + * Increments producers so they could + * be written to FW. * - * Returns an element which was previously consumed; - * Increments producers so they could be written to FW. + * @p_chain: Chain. * - * @param p_chain + * Return: Void. */ static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain) { @@ -427,14 +429,13 @@ static inline void qed_chain_recycle_consumed(struct qed_chain *p_chain) } /** - * @brief qed_chain_consume - + * qed_chain_consume(): A Chain in which the driver utilizes data written + * by a different source (i.e., FW) should use this to + * access passed buffers. * - * A Chain in which the driver utilizes data written by a different source - * (i.e., FW) should use this to access passed buffers. + * @p_chain: Chain. * - * @param p_chain - * - * @return void*, a pointer to the next buffer written + * Return: void*, a pointer to the next buffer written. */ static inline void *qed_chain_consume(struct qed_chain *p_chain) { @@ -468,9 +469,11 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) } /** - * @brief qed_chain_reset - Resets the chain to its start state + * qed_chain_reset(): Resets the chain to its start state. + * + * @p_chain: pointer to a previously allocated chain. * - * @param p_chain pointer to a previously allocated chain + * Return Void. */ static inline void qed_chain_reset(struct qed_chain *p_chain) { @@ -519,13 +522,12 @@ static inline void qed_chain_reset(struct qed_chain *p_chain) } /** - * @brief qed_chain_get_last_elem - + * qed_chain_get_last_elem(): Returns a pointer to the last element of the + * chain. * - * Returns a pointer to the last element of the chain + * @p_chain: Chain. * - * @param p_chain - * - * @return void* + * Return: void*. */ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain) { @@ -563,10 +565,13 @@ static inline void *qed_chain_get_last_elem(struct qed_chain *p_chain) } /** - * @brief qed_chain_set_prod - sets the prod to the given value + * qed_chain_set_prod(): sets the prod to the given value. + * + * @p_chain: Chain. + * @prod_idx: Prod Idx. + * @p_prod_elem: Prod elem. * - * @param prod_idx - * @param p_prod_elem + * Return Void. */ static inline void qed_chain_set_prod(struct qed_chain *p_chain, u32 prod_idx, void *p_prod_elem) @@ -610,9 +615,11 @@ static inline void qed_chain_set_prod(struct qed_chain *p_chain, } /** - * @brief qed_chain_pbl_zero_mem - set chain memory to 0 + * qed_chain_pbl_zero_mem(): set chain memory to 0. + * + * @p_chain: Chain. * - * @param p_chain + * Return: Void. */ static inline void qed_chain_pbl_zero_mem(struct qed_chain *p_chain) { diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index 850b98991670..f39451aaaeec 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -819,47 +819,47 @@ struct qed_common_cb_ops { struct qed_selftest_ops { /** - * @brief selftest_interrupt - Perform interrupt test + * selftest_interrupt(): Perform interrupt test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*selftest_interrupt)(struct qed_dev *cdev); /** - * @brief selftest_memory - Perform memory test + * selftest_memory(): Perform memory test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*selftest_memory)(struct qed_dev *cdev); /** - * @brief selftest_register - Perform register test + * selftest_register(): Perform register test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*selftest_register)(struct qed_dev *cdev); /** - * @brief selftest_clock - Perform clock test + * selftest_clock(): Perform clock test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*selftest_clock)(struct qed_dev *cdev); /** - * @brief selftest_nvram - Perform nvram test + * selftest_nvram(): Perform nvram test. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*selftest_nvram) (struct qed_dev *cdev); }; @@ -927,47 +927,53 @@ struct qed_common_ops { enum qed_hw_err_type err_type); /** - * @brief can_link_change - can the instance change the link or not + * can_link_change(): can the instance change the link or not. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return true if link-change is allowed, false otherwise. + * Return: true if link-change is allowed, false otherwise. */ bool (*can_link_change)(struct qed_dev *cdev); /** - * @brief set_link - set links according to params + * set_link(): set links according to params. * - * @param cdev - * @param params - values used to override the default link configuration + * @cdev: Qed dev pointer. + * @params: values used to override the default link configuration. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*set_link)(struct qed_dev *cdev, struct qed_link_params *params); /** - * @brief get_link - returns the current link state. + * get_link(): returns the current link state. * - * @param cdev - * @param if_link - structure to be filled with current link configuration. + * @cdev: Qed dev pointer. + * @if_link: structure to be filled with current link configuration. + * + * Return: Void. */ void (*get_link)(struct qed_dev *cdev, struct qed_link_output *if_link); /** - * @brief - drains chip in case Tx completions fail to arrive due to pause. + * drain(): drains chip in case Tx completions fail to arrive due to pause. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: Int. */ int (*drain)(struct qed_dev *cdev); /** - * @brief update_msglvl - update module debug level + * update_msglvl(): update module debug level. * - * @param cdev - * @param dp_module - * @param dp_level + * @cdev: Qed dev pointer. + * @dp_module: Debug module. + * @dp_level: Debug level. + * + * Return: Void. */ void (*update_msglvl)(struct qed_dev *cdev, u32 dp_module, @@ -981,70 +987,73 @@ struct qed_common_ops { struct qed_chain *p_chain); /** - * @brief nvm_flash - Flash nvm data. + * nvm_flash(): Flash nvm data. * - * @param cdev - * @param name - file containing the data + * @cdev: Qed dev pointer. + * @name: file containing the data. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*nvm_flash)(struct qed_dev *cdev, const char *name); /** - * @brief nvm_get_image - reads an entire image from nvram + * nvm_get_image(): reads an entire image from nvram. * - * @param cdev - * @param type - type of the request nvram image - * @param buf - preallocated buffer to fill with the image - * @param len - length of the allocated buffer + * @cdev: Qed dev pointer. + * @type: type of the request nvram image. + * @buf: preallocated buffer to fill with the image. + * @len: length of the allocated buffer. * - * @return 0 on success, error otherwise + * Return: 0 on success, error otherwise. */ int (*nvm_get_image)(struct qed_dev *cdev, enum qed_nvm_images type, u8 *buf, u16 len); /** - * @brief set_coalesce - Configure Rx coalesce value in usec + * set_coalesce(): Configure Rx coalesce value in usec. * - * @param cdev - * @param rx_coal - Rx coalesce value in usec - * @param tx_coal - Tx coalesce value in usec - * @param qid - Queue index - * @param sb_id - Status Block Id + * @cdev: Qed dev pointer. + * @rx_coal: Rx coalesce value in usec. + * @tx_coal: Tx coalesce value in usec. + * @handle: Handle. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal, void *handle); /** - * @brief set_led - Configure LED mode + * set_led() - Configure LED mode. * - * @param cdev - * @param mode - LED mode + * @cdev: Qed dev pointer. + * @mode: LED mode. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*set_led)(struct qed_dev *cdev, enum qed_led_mode mode); /** - * @brief attn_clr_enable - Prevent attentions from being reasserted + * attn_clr_enable(): Prevent attentions from being reasserted. + * + * @cdev: Qed dev pointer. + * @clr_enable: Clear enable. * - * @param cdev - * @param clr_enable + * Return: Void. */ void (*attn_clr_enable)(struct qed_dev *cdev, bool clr_enable); /** - * @brief db_recovery_add - add doorbell information to the doorbell - * recovery mechanism. + * db_recovery_add(): add doorbell information to the doorbell + * recovery mechanism. * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address of where db_data is stored - * @param db_is_32b - doorbell is 32b pr 64b - * @param db_is_user - doorbell recovery addresses are user or kernel space + * @cdev: Qed dev pointer. + * @db_addr: Doorbell address. + * @db_data: Dddress of where db_data is stored. + * @db_width: Doorbell is 32b or 64b. + * @db_space: Doorbell recovery addresses are user or kernel space. + * + * Return: Int. */ int (*db_recovery_add)(struct qed_dev *cdev, void __iomem *db_addr, @@ -1053,114 +1062,130 @@ struct qed_common_ops { enum qed_db_rec_space db_space); /** - * @brief db_recovery_del - remove doorbell information from the doorbell + * db_recovery_del(): remove doorbell information from the doorbell * recovery mechanism. db_data serves as key (db_addr is not unique). * - * @param cdev - * @param db_addr - doorbell address - * @param db_data - address where db_data is stored. Serves as key for the - * entry to delete. + * @cdev: Qed dev pointer. + * @db_addr: Doorbell address. + * @db_data: Address where db_data is stored. Serves as key for the + * entry to delete. + * + * Return: Int. */ int (*db_recovery_del)(struct qed_dev *cdev, void __iomem *db_addr, void *db_data); /** - * @brief recovery_process - Trigger a recovery process + * recovery_process(): Trigger a recovery process. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*recovery_process)(struct qed_dev *cdev); /** - * @brief recovery_prolog - Execute the prolog operations of a recovery process + * recovery_prolog(): Execute the prolog operations of a recovery process. * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*recovery_prolog)(struct qed_dev *cdev); /** - * @brief update_drv_state - API to inform the change in the driver state. + * update_drv_state(): API to inform the change in the driver state. * - * @param cdev - * @param active + * @cdev: Qed dev pointer. + * @active: Active * + * Return: Int. */ int (*update_drv_state)(struct qed_dev *cdev, bool active); /** - * @brief update_mac - API to inform the change in the mac address + * update_mac(): API to inform the change in the mac address. * - * @param cdev - * @param mac + * @cdev: Qed dev pointer. + * @mac: MAC. * + * Return: Int. */ int (*update_mac)(struct qed_dev *cdev, u8 *mac); /** - * @brief update_mtu - API to inform the change in the mtu + * update_mtu(): API to inform the change in the mtu. * - * @param cdev - * @param mtu + * @cdev: Qed dev pointer. + * @mtu: MTU. * + * Return: Int. */ int (*update_mtu)(struct qed_dev *cdev, u16 mtu); /** - * @brief update_wol - update of changes in the WoL configuration + * update_wol(): Update of changes in the WoL configuration. + * + * @cdev: Qed dev pointer. + * @enabled: true iff WoL should be enabled. * - * @param cdev - * @param enabled - true iff WoL should be enabled. + * Return: Int. */ int (*update_wol) (struct qed_dev *cdev, bool enabled); /** - * @brief read_module_eeprom + * read_module_eeprom(): Read EEPROM. * - * @param cdev - * @param buf - buffer - * @param dev_addr - PHY device memory region - * @param offset - offset into eeprom contents to be read - * @param len - buffer length, i.e., max bytes to be read + * @cdev: Qed dev pointer. + * @buf: buffer. + * @dev_addr: PHY device memory region. + * @offset: offset into eeprom contents to be read. + * @len: buffer length, i.e., max bytes to be read. + * + * Return: Int. */ int (*read_module_eeprom)(struct qed_dev *cdev, char *buf, u8 dev_addr, u32 offset, u32 len); /** - * @brief get_affin_hwfn_idx + * get_affin_hwfn_idx(): Get affine HW function. + * + * @cdev: Qed dev pointer. * - * @param cdev + * Return: u8. */ u8 (*get_affin_hwfn_idx)(struct qed_dev *cdev); /** - * @brief read_nvm_cfg - Read NVM config attribute value. - * @param cdev - * @param buf - buffer - * @param cmd - NVM CFG command id - * @param entity_id - Entity id + * read_nvm_cfg(): Read NVM config attribute value. + * + * @cdev: Qed dev pointer. + * @buf: Buffer. + * @cmd: NVM CFG command id. + * @entity_id: Entity id. * + * Return: Int. */ int (*read_nvm_cfg)(struct qed_dev *cdev, u8 **buf, u32 cmd, u32 entity_id); /** - * @brief read_nvm_cfg - Read NVM config attribute value. - * @param cdev - * @param cmd - NVM CFG command id + * read_nvm_cfg_len(): Read NVM config attribute value. * - * @return config id length, 0 on error. + * @cdev: Qed dev pointer. + * @cmd: NVM CFG command id. + * + * Return: config id length, 0 on error. */ int (*read_nvm_cfg_len)(struct qed_dev *cdev, u32 cmd); /** - * @brief set_grc_config - Configure value for grc config id. - * @param cdev - * @param cfg_id - grc config id - * @param val - grc config value + * set_grc_config(): Configure value for grc config id. + * + * @cdev: Qed dev pointer. + * @cfg_id: grc config id + * @val: grc config value * + * Return: Int. */ int (*set_grc_config)(struct qed_dev *cdev, u32 cfg_id, u32 val); @@ -1397,18 +1422,16 @@ static inline u16 qed_sb_update_sb_idx(struct qed_sb_info *sb_info) } /** + * qed_sb_ack(): This function creates an update command for interrupts + * that is written to the IGU. * - * @brief This function creates an update command for interrupts that is - * written to the IGU. - * - * @param sb_info - This is the structure allocated and - * initialized per status block. Assumption is - * that it was initialized using qed_sb_init - * @param int_cmd - Enable/Disable/Nop - * @param upd_flg - whether igu consumer should be - * updated. + * @sb_info: This is the structure allocated and + * initialized per status block. Assumption is + * that it was initialized using qed_sb_init + * @int_cmd: Enable/Disable/Nop + * @upd_flg: Whether igu consumer should be updated. * - * @return inline void + * Return: inline void. */ static inline void qed_sb_ack(struct qed_sb_info *sb_info, enum igu_int_cmd int_cmd, diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h index 04180d9af560..494cdc3cd840 100644 --- a/include/linux/qed/qed_iscsi_if.h +++ b/include/linux/qed/qed_iscsi_if.h @@ -182,7 +182,7 @@ struct qed_iscsi_cb_ops { * @param stats - pointer to struck that would be filled * we stats * @return 0 on success, error otherwise. - * @change_mac Change MAC of interface + * @change_mac: Change MAC of interface * @param cdev * @param handle - the connection handle. * @param mac - new MAC to configure. diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h index ff808d248883..5b67cd03276e 100644 --- a/include/linux/qed/qed_ll2_if.h +++ b/include/linux/qed/qed_ll2_if.h @@ -208,57 +208,57 @@ enum qed_ll2_xmit_flags { struct qed_ll2_ops { /** - * @brief start - initializes ll2 + * start(): Initializes ll2. * - * @param cdev - * @param params - protocol driver configuration for the ll2. + * @cdev: Qed dev pointer. + * @params: Protocol driver configuration for the ll2. * - * @return 0 on success, otherwise error value. + * Return: 0 on success, otherwise error value. */ int (*start)(struct qed_dev *cdev, struct qed_ll2_params *params); /** - * @brief stop - stops the ll2 + * stop(): Stops the ll2 * - * @param cdev + * @cdev: Qed dev pointer. * - * @return 0 on success, otherwise error value. + * Return: 0 on success, otherwise error value. */ int (*stop)(struct qed_dev *cdev); /** - * @brief start_xmit - transmits an skb over the ll2 interface + * start_xmit(): Transmits an skb over the ll2 interface * - * @param cdev - * @param skb - * @param xmit_flags - Transmit options defined by the enum qed_ll2_xmit_flags. + * @cdev: Qed dev pointer. + * @skb: SKB. + * @xmit_flags: Transmit options defined by the enum qed_ll2_xmit_flags. * - * @return 0 on success, otherwise error value. + * Return: 0 on success, otherwise error value. */ int (*start_xmit)(struct qed_dev *cdev, struct sk_buff *skb, unsigned long xmit_flags); /** - * @brief register_cb_ops - protocol driver register the callback for Rx/Tx + * register_cb_ops(): Protocol driver register the callback for Rx/Tx * packets. Should be called before `start'. * - * @param cdev - * @param cookie - to be passed to the callback functions. - * @param ops - the callback functions to register for Rx / Tx. + * @cdev: Qed dev pointer. + * @cookie: to be passed to the callback functions. + * @ops: the callback functions to register for Rx / Tx. * - * @return 0 on success, otherwise error value. + * Return: 0 on success, otherwise error value. */ void (*register_cb_ops)(struct qed_dev *cdev, const struct qed_ll2_cb_ops *ops, void *cookie); /** - * @brief get LL2 related statistics + * get_stats(): Get LL2 related statistics. * - * @param cdev - * @param stats - pointer to struct that would be filled with stats + * @cdev: Qed dev pointer. + * @stats: Pointer to struct that would be filled with stats. * - * @return 0 on success, error otherwise. + * Return: 0 on success, error otherwise. */ int (*get_stats)(struct qed_dev *cdev, struct qed_ll2_stats *stats); }; diff --git a/include/linux/qed/qed_nvmetcp_if.h b/include/linux/qed/qed_nvmetcp_if.h index 14671bc19ed1..1d51df347560 100644 --- a/include/linux/qed/qed_nvmetcp_if.h +++ b/include/linux/qed/qed_nvmetcp_if.h @@ -171,6 +171,23 @@ struct nvmetcp_task_params { * @param dest_port * @clear_all_filters: Clear all filters. * @param cdev + * @init_read_io: Init read IO. + * @task_params + * @cmd_pdu_header + * @nvme_cmd + * @sgl_task_params + * @init_write_io: Init write IO. + * @task_params + * @cmd_pdu_header + * @nvme_cmd + * @sgl_task_params + * @init_icreq_exchange: Exchange ICReq. + * @task_params + * @init_conn_req_pdu_hdr + * @tx_sgl_task_params + * @rx_sgl_task_params + * @init_task_cleanup: Init task cleanup. + * @task_params */ struct qed_nvmetcp_ops { const struct qed_common_ops *common; From 2950c5ac65b32477d2978f4a5bd8ea67f33f004c Mon Sep 17 00:00:00 2001 From: Konstantin Khorenko Date: Thu, 27 Jul 2023 18:26:09 +0300 Subject: [PATCH 215/513] qed: Fix scheduling in a tasklet while getting stats [ Upstream commit e346e231b42bcae6822a6326acfb7b741e9e6026 ] Here we've got to a situation when tasklet called usleep_range() in PTT acquire logic, thus welcome to the "scheduling while atomic" BUG(). BUG: scheduling while atomic: swapper/24/0/0x00000100 [] schedule+0x29/0x70 [] schedule_hrtimeout_range_clock+0xb2/0x150 [] schedule_hrtimeout_range+0x13/0x20 [] usleep_range+0x4f/0x70 [] qed_ptt_acquire+0x38/0x100 [qed] [] _qed_get_vport_stats+0x458/0x580 [qed] [] qed_get_vport_stats+0x1c/0xd0 [qed] [] qed_get_protocol_stats+0x93/0x100 [qed] qed_mcp_send_protocol_stats case MFW_DRV_MSG_GET_LAN_STATS: case MFW_DRV_MSG_GET_FCOE_STATS: case MFW_DRV_MSG_GET_ISCSI_STATS: case MFW_DRV_MSG_GET_RDMA_STATS: [] qed_mcp_handle_events+0x2d8/0x890 [qed] qed_int_assertion qed_int_attentions [] qed_int_sp_dpc+0xa50/0xdc0 [qed] [] tasklet_action+0x83/0x140 [] __do_softirq+0x125/0x2bb [] call_softirq+0x1c/0x30 [] do_softirq+0x65/0xa0 [] irq_exit+0x105/0x110 [] do_IRQ+0x56/0xf0 Fix this by making caller to provide the context whether it could be in atomic context flow or not when getting stats from QED driver. QED driver based on the context provided decide to schedule out or not when acquiring the PTT BAR window. We faced the BUG_ON() while getting vport stats, but according to the code same issue could happen for fcoe and iscsi statistics as well, so fixing them too. Fixes: 6c75424612a7 ("qed: Add support for NCSI statistics.") Fixes: 1e128c81290a ("qed: Add support for hardware offloaded FCoE.") Fixes: 2f2b2614e893 ("qed: Provide iSCSI statistics to management") Cc: Sudarsana Kalluru Cc: David Miller Cc: Manish Chopra Signed-off-by: Konstantin Khorenko Reviewed-by: Simon Horman Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/qlogic/qed/qed_dev_api.h | 16 ++++++++++++ drivers/net/ethernet/qlogic/qed/qed_fcoe.c | 19 ++++++++++---- drivers/net/ethernet/qlogic/qed/qed_fcoe.h | 17 ++++++++++-- drivers/net/ethernet/qlogic/qed/qed_hw.c | 26 ++++++++++++++++--- drivers/net/ethernet/qlogic/qed/qed_iscsi.c | 19 ++++++++++---- drivers/net/ethernet/qlogic/qed/qed_iscsi.h | 8 ++++-- drivers/net/ethernet/qlogic/qed/qed_l2.c | 19 ++++++++++---- drivers/net/ethernet/qlogic/qed/qed_l2.h | 24 +++++++++++++++++ drivers/net/ethernet/qlogic/qed/qed_main.c | 6 ++--- 9 files changed, 128 insertions(+), 26 deletions(-) diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h index f0a825b985a4..a0a766a1723c 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev_api.h +++ b/drivers/net/ethernet/qlogic/qed/qed_dev_api.h @@ -194,6 +194,22 @@ void qed_hw_remove(struct qed_dev *cdev); */ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn); +/** + * qed_ptt_acquire_context(): Allocate a PTT window honoring the context + * atomicy. + * + * @p_hwfn: HW device data. + * @is_atomic: Hint from the caller - if the func can sleep or not. + * + * Context: The function should not sleep in case is_atomic == true. + * Return: struct qed_ptt. + * + * Should be called at the entry point to the driver + * (at the beginning of an exported function). + */ +struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn, + bool is_atomic); + /** * qed_ptt_release(): Release PTT Window. * diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c index b768f0698170..0c55249b3a35 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.c +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.c @@ -694,13 +694,14 @@ static void _qed_fcoe_get_pstats(struct qed_hwfn *p_hwfn, } static int qed_fcoe_get_stats(struct qed_hwfn *p_hwfn, - struct qed_fcoe_stats *p_stats) + struct qed_fcoe_stats *p_stats, + bool is_atomic) { struct qed_ptt *p_ptt; memset(p_stats, 0, sizeof(*p_stats)); - p_ptt = qed_ptt_acquire(p_hwfn); + p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic); if (!p_ptt) { DP_ERR(p_hwfn, "Failed to acquire ptt\n"); @@ -974,19 +975,27 @@ static int qed_fcoe_destroy_conn(struct qed_dev *cdev, QED_SPQ_MODE_EBLOCK, NULL); } +static int qed_fcoe_stats_context(struct qed_dev *cdev, + struct qed_fcoe_stats *stats, + bool is_atomic) +{ + return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic); +} + static int qed_fcoe_stats(struct qed_dev *cdev, struct qed_fcoe_stats *stats) { - return qed_fcoe_get_stats(QED_AFFIN_HWFN(cdev), stats); + return qed_fcoe_stats_context(cdev, stats, false); } void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, - struct qed_mcp_fcoe_stats *stats) + struct qed_mcp_fcoe_stats *stats, + bool is_atomic) { struct qed_fcoe_stats proto_stats; /* Retrieve FW statistics */ memset(&proto_stats, 0, sizeof(proto_stats)); - if (qed_fcoe_stats(cdev, &proto_stats)) { + if (qed_fcoe_stats_context(cdev, &proto_stats, is_atomic)) { DP_VERBOSE(cdev, QED_MSG_STORAGE, "Failed to collect FCoE statistics\n"); return; diff --git a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h index 19c85adf4ceb..214e8299ecb4 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_fcoe.h +++ b/drivers/net/ethernet/qlogic/qed/qed_fcoe.h @@ -28,8 +28,20 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn); void qed_fcoe_setup(struct qed_hwfn *p_hwfn); void qed_fcoe_free(struct qed_hwfn *p_hwfn); +/** + * qed_get_protocol_stats_fcoe(): Fills provided statistics + * struct with statistics. + * + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * @is_atomic: Hint from the caller - if the func can sleep or not. + * + * Context: The function should not sleep in case is_atomic == true. + * Return: Void. + */ void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, - struct qed_mcp_fcoe_stats *stats); + struct qed_mcp_fcoe_stats *stats, + bool is_atomic); #else /* CONFIG_QED_FCOE */ static inline int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) { @@ -40,7 +52,8 @@ static inline void qed_fcoe_setup(struct qed_hwfn *p_hwfn) {} static inline void qed_fcoe_free(struct qed_hwfn *p_hwfn) {} static inline void qed_get_protocol_stats_fcoe(struct qed_dev *cdev, - struct qed_mcp_fcoe_stats *stats) + struct qed_mcp_fcoe_stats *stats, + bool is_atomic) { } #endif /* CONFIG_QED_FCOE */ diff --git a/drivers/net/ethernet/qlogic/qed/qed_hw.c b/drivers/net/ethernet/qlogic/qed/qed_hw.c index 554f30b0cfd5..6263f847b6b9 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_hw.c +++ b/drivers/net/ethernet/qlogic/qed/qed_hw.c @@ -23,7 +23,10 @@ #include "qed_reg_addr.h" #include "qed_sriov.h" -#define QED_BAR_ACQUIRE_TIMEOUT 1000 +#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT 1000 +#define QED_BAR_ACQUIRE_TIMEOUT_USLEEP 1000 +#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT 100000 +#define QED_BAR_ACQUIRE_TIMEOUT_UDELAY 10 /* Invalid values */ #define QED_BAR_INVALID_OFFSET (cpu_to_le32(-1)) @@ -84,12 +87,22 @@ void qed_ptt_pool_free(struct qed_hwfn *p_hwfn) } struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) +{ + return qed_ptt_acquire_context(p_hwfn, false); +} + +struct qed_ptt *qed_ptt_acquire_context(struct qed_hwfn *p_hwfn, bool is_atomic) { struct qed_ptt *p_ptt; - unsigned int i; + unsigned int i, count; + + if (is_atomic) + count = QED_BAR_ACQUIRE_TIMEOUT_UDELAY_CNT; + else + count = QED_BAR_ACQUIRE_TIMEOUT_USLEEP_CNT; /* Take the free PTT from the list */ - for (i = 0; i < QED_BAR_ACQUIRE_TIMEOUT; i++) { + for (i = 0; i < count; i++) { spin_lock_bh(&p_hwfn->p_ptt_pool->lock); if (!list_empty(&p_hwfn->p_ptt_pool->free_list)) { @@ -105,7 +118,12 @@ struct qed_ptt *qed_ptt_acquire(struct qed_hwfn *p_hwfn) } spin_unlock_bh(&p_hwfn->p_ptt_pool->lock); - usleep_range(1000, 2000); + + if (is_atomic) + udelay(QED_BAR_ACQUIRE_TIMEOUT_UDELAY); + else + usleep_range(QED_BAR_ACQUIRE_TIMEOUT_USLEEP, + QED_BAR_ACQUIRE_TIMEOUT_USLEEP * 2); } DP_NOTICE(p_hwfn, "PTT acquire timeout - failed to allocate PTT\n"); diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c index db926d8b3033..f11139177277 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.c +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.c @@ -1000,13 +1000,14 @@ static void _qed_iscsi_get_pstats(struct qed_hwfn *p_hwfn, } static int qed_iscsi_get_stats(struct qed_hwfn *p_hwfn, - struct qed_iscsi_stats *stats) + struct qed_iscsi_stats *stats, + bool is_atomic) { struct qed_ptt *p_ptt; memset(stats, 0, sizeof(*stats)); - p_ptt = qed_ptt_acquire(p_hwfn); + p_ptt = qed_ptt_acquire_context(p_hwfn, is_atomic); if (!p_ptt) { DP_ERR(p_hwfn, "Failed to acquire ptt\n"); return -EAGAIN; @@ -1337,9 +1338,16 @@ static int qed_iscsi_destroy_conn(struct qed_dev *cdev, QED_SPQ_MODE_EBLOCK, NULL); } +static int qed_iscsi_stats_context(struct qed_dev *cdev, + struct qed_iscsi_stats *stats, + bool is_atomic) +{ + return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats, is_atomic); +} + static int qed_iscsi_stats(struct qed_dev *cdev, struct qed_iscsi_stats *stats) { - return qed_iscsi_get_stats(QED_AFFIN_HWFN(cdev), stats); + return qed_iscsi_stats_context(cdev, stats, false); } static int qed_iscsi_change_mac(struct qed_dev *cdev, @@ -1359,13 +1367,14 @@ static int qed_iscsi_change_mac(struct qed_dev *cdev, } void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, - struct qed_mcp_iscsi_stats *stats) + struct qed_mcp_iscsi_stats *stats, + bool is_atomic) { struct qed_iscsi_stats proto_stats; /* Retrieve FW statistics */ memset(&proto_stats, 0, sizeof(proto_stats)); - if (qed_iscsi_stats(cdev, &proto_stats)) { + if (qed_iscsi_stats_context(cdev, &proto_stats, is_atomic)) { DP_VERBOSE(cdev, QED_MSG_STORAGE, "Failed to collect ISCSI statistics\n"); return; diff --git a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h index dec2b00259d4..974cb8d26608 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_iscsi.h +++ b/drivers/net/ethernet/qlogic/qed/qed_iscsi.h @@ -39,11 +39,14 @@ void qed_iscsi_free(struct qed_hwfn *p_hwfn); * * @cdev: Qed dev pointer. * @stats: Points to struct that will be filled with statistics. + * @is_atomic: Hint from the caller - if the func can sleep or not. * + * Context: The function should not sleep in case is_atomic == true. * Return: Void. */ void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, - struct qed_mcp_iscsi_stats *stats); + struct qed_mcp_iscsi_stats *stats, + bool is_atomic); #else /* IS_ENABLED(CONFIG_QED_ISCSI) */ static inline int qed_iscsi_alloc(struct qed_hwfn *p_hwfn) { @@ -56,7 +59,8 @@ static inline void qed_iscsi_free(struct qed_hwfn *p_hwfn) {} static inline void qed_get_protocol_stats_iscsi(struct qed_dev *cdev, - struct qed_mcp_iscsi_stats *stats) {} + struct qed_mcp_iscsi_stats *stats, + bool is_atomic) {} #endif /* IS_ENABLED(CONFIG_QED_ISCSI) */ #endif diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index bc17bc36d346..6ffa6425a75a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1863,7 +1863,8 @@ static void __qed_get_vport_stats(struct qed_hwfn *p_hwfn, } static void _qed_get_vport_stats(struct qed_dev *cdev, - struct qed_eth_stats *stats) + struct qed_eth_stats *stats, + bool is_atomic) { u8 fw_vport = 0; int i; @@ -1872,10 +1873,11 @@ static void _qed_get_vport_stats(struct qed_dev *cdev, for_each_hwfn(cdev, i) { struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; - struct qed_ptt *p_ptt = IS_PF(cdev) ? qed_ptt_acquire(p_hwfn) - : NULL; + struct qed_ptt *p_ptt; bool b_get_port_stats; + p_ptt = IS_PF(cdev) ? qed_ptt_acquire_context(p_hwfn, is_atomic) + : NULL; if (IS_PF(cdev)) { /* The main vport index is relative first */ if (qed_fw_vport(p_hwfn, 0, &fw_vport)) { @@ -1900,6 +1902,13 @@ static void _qed_get_vport_stats(struct qed_dev *cdev, } void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) +{ + qed_get_vport_stats_context(cdev, stats, false); +} + +void qed_get_vport_stats_context(struct qed_dev *cdev, + struct qed_eth_stats *stats, + bool is_atomic) { u32 i; @@ -1908,7 +1917,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats) return; } - _qed_get_vport_stats(cdev, stats); + _qed_get_vport_stats(cdev, stats, is_atomic); if (!cdev->reset_stats) return; @@ -1960,7 +1969,7 @@ void qed_reset_vport_stats(struct qed_dev *cdev) if (!cdev->reset_stats) { DP_INFO(cdev, "Reset stats not allocated\n"); } else { - _qed_get_vport_stats(cdev, cdev->reset_stats); + _qed_get_vport_stats(cdev, cdev->reset_stats, false); cdev->reset_stats->common.link_change_count = 0; } } diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h index 2ab7f3f0cf6c..602a12a348b2 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.h +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h @@ -250,8 +250,32 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, enum spq_mode comp_mode, struct qed_spq_comp_cb *p_comp_data); +/** + * qed_get_vport_stats(): Fills provided statistics + * struct with statistics. + * + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * + * Return: Void. + */ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); +/** + * qed_get_vport_stats_context(): Fills provided statistics + * struct with statistics. + * + * @cdev: Qed dev pointer. + * @stats: Points to struct that will be filled with statistics. + * @is_atomic: Hint from the caller - if the func can sleep or not. + * + * Context: The function should not sleep in case is_atomic == true. + * Return: Void. + */ +void qed_get_vport_stats_context(struct qed_dev *cdev, + struct qed_eth_stats *stats, + bool is_atomic); + void qed_reset_vport_stats(struct qed_dev *cdev); /** diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index d10e1cd6d2ba..26700b0b4b37 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -3054,7 +3054,7 @@ void qed_get_protocol_stats(struct qed_dev *cdev, switch (type) { case QED_MCP_LAN_STATS: - qed_get_vport_stats(cdev, ð_stats); + qed_get_vport_stats_context(cdev, ð_stats, true); stats->lan_stats.ucast_rx_pkts = eth_stats.common.rx_ucast_pkts; stats->lan_stats.ucast_tx_pkts = @@ -3062,10 +3062,10 @@ void qed_get_protocol_stats(struct qed_dev *cdev, stats->lan_stats.fcs_err = -1; break; case QED_MCP_FCOE_STATS: - qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats); + qed_get_protocol_stats_fcoe(cdev, &stats->fcoe_stats, true); break; case QED_MCP_ISCSI_STATS: - qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats); + qed_get_protocol_stats_iscsi(cdev, &stats->iscsi_stats, true); break; default: DP_VERBOSE(cdev, QED_MSG_SP, From 6c058a1f67f0b2cbc22dedb05b02e1555d6d19b6 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 28 Jul 2023 15:03:10 +0000 Subject: [PATCH 216/513] net: annotate data-races around sk->sk_max_pacing_rate [ Upstream commit ea7f45ef77b39e72244d282e47f6cb1ef4135cd2 ] sk_getsockopt() runs locklessly. This means sk->sk_max_pacing_rate can be read while other threads are changing its value. Fixes: 62748f32d501 ("net: introduce SO_MAX_PACING_RATE") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/core/sock.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/net/core/sock.c b/net/core/sock.c index cf1e437ae487..d392efa0d955 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1297,7 +1297,8 @@ int sock_setsockopt(struct socket *sock, int level, int optname, cmpxchg(&sk->sk_pacing_status, SK_PACING_NONE, SK_PACING_NEEDED); - sk->sk_max_pacing_rate = ulval; + /* Pairs with READ_ONCE() from sk_getsockopt() */ + WRITE_ONCE(sk->sk_max_pacing_rate, ulval); sk->sk_pacing_rate = min(sk->sk_pacing_rate, ulval); break; } @@ -1680,12 +1681,14 @@ int sock_getsockopt(struct socket *sock, int level, int optname, #endif case SO_MAX_PACING_RATE: + /* The READ_ONCE() pair with the WRITE_ONCE() in sk_setsockopt() */ if (sizeof(v.ulval) != sizeof(v.val) && len >= sizeof(v.ulval)) { lv = sizeof(v.ulval); - v.ulval = sk->sk_max_pacing_rate; + v.ulval = READ_ONCE(sk->sk_max_pacing_rate); } else { /* 32bit version */ - v.val = min_t(unsigned long, sk->sk_max_pacing_rate, ~0U); + v.val = min_t(unsigned long, ~0U, + READ_ONCE(sk->sk_max_pacing_rate)); } break; From 0d1047b77b237d36dedb88581582fd0bf92a02dc Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 28 Jul 2023 15:03:11 +0000 Subject: [PATCH 217/513] net: add missing READ_ONCE(sk->sk_rcvlowat) annotation [ Upstream commit e6d12bdb435d23ff6c1890c852d85408a2f496ee ] In a prior commit, I forgot to change sk_getsockopt() when reading sk->sk_rcvlowat locklessly. Fixes: eac66402d1c3 ("net: annotate sk->sk_rcvlowat lockless reads") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/core/sock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/sock.c b/net/core/sock.c index d392efa0d955..ffd566d52eec 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1549,7 +1549,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_RCVLOWAT: - v.val = sk->sk_rcvlowat; + v.val = READ_ONCE(sk->sk_rcvlowat); break; case SO_SNDLOWAT: From 98f0d1db3a274400d480a3214d7ebb5d21f480f7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 28 Jul 2023 15:03:13 +0000 Subject: [PATCH 218/513] net: add missing READ_ONCE(sk->sk_sndbuf) annotation [ Upstream commit 74bc084327c643499474ba75df485607da37dd6e ] In a prior commit, I forgot to change sk_getsockopt() when reading sk->sk_sndbuf locklessly. Fixes: e292f05e0df7 ("tcp: annotate sk->sk_sndbuf lockless reads") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/core/sock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/sock.c b/net/core/sock.c index ffd566d52eec..0e480ac6c4de 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1456,7 +1456,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_SNDBUF: - v.val = sk->sk_sndbuf; + v.val = READ_ONCE(sk->sk_sndbuf); break; case SO_RCVBUF: From fdd8d8d54d6a096cea6b44a7e3822f43d502cf75 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 28 Jul 2023 15:03:14 +0000 Subject: [PATCH 219/513] net: add missing READ_ONCE(sk->sk_rcvbuf) annotation [ Upstream commit b4b553253091cafe9ec38994acf42795e073bef5 ] In a prior commit, I forgot to change sk_getsockopt() when reading sk->sk_rcvbuf locklessly. Fixes: ebb3b78db7bf ("tcp: annotate sk->sk_rcvbuf lockless reads") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/core/sock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/sock.c b/net/core/sock.c index 0e480ac6c4de..59abfb2c9226 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1460,7 +1460,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, break; case SO_RCVBUF: - v.val = sk->sk_rcvbuf; + v.val = READ_ONCE(sk->sk_rcvbuf); break; case SO_REUSEADDR: From e934c50c48e2861652735aadc9f3feac9920764d Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 28 Jul 2023 15:03:16 +0000 Subject: [PATCH 220/513] net: add missing data-race annotations around sk->sk_peek_off [ Upstream commit 11695c6e966b0ec7ed1d16777d294cef865a5c91 ] sk_getsockopt() runs locklessly, thus we need to annotate the read of sk->sk_peek_off. While we are at it, add corresponding annotations to sk_set_peek_off() and unix_set_peek_off(). Fixes: b9bb53f3836f ("sock: convert sk_peek_offset functions to WRITE_ONCE") Signed-off-by: Eric Dumazet Cc: Willem de Bruijn Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/core/sock.c | 4 ++-- net/unix/af_unix.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/net/core/sock.c b/net/core/sock.c index 59abfb2c9226..0cca1ccf1499 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1643,7 +1643,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, if (!sock->ops->set_peek_off) return -EOPNOTSUPP; - v.val = sk->sk_peek_off; + v.val = READ_ONCE(sk->sk_peek_off); break; case SO_NOFCS: v.val = sock_flag(sk, SOCK_NOFCS); @@ -2901,7 +2901,7 @@ EXPORT_SYMBOL(__sk_mem_reclaim); int sk_set_peek_off(struct sock *sk, int val) { - sk->sk_peek_off = val; + WRITE_ONCE(sk->sk_peek_off, val); return 0; } EXPORT_SYMBOL_GPL(sk_set_peek_off); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index d326540e4938..7a076d5017d1 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -717,7 +717,7 @@ static int unix_set_peek_off(struct sock *sk, int val) if (mutex_lock_interruptible(&u->iolock)) return -EINTR; - sk->sk_peek_off = val; + WRITE_ONCE(sk->sk_peek_off, val); mutex_unlock(&u->iolock); return 0; From 2c55d4941518a0cb55f3d28aef753bffc435f005 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 28 Jul 2023 15:03:17 +0000 Subject: [PATCH 221/513] net: add missing data-race annotation for sk_ll_usec [ Upstream commit e5f0d2dd3c2faa671711dac6d3ff3cef307bcfe3 ] In a prior commit I forgot that sk_getsockopt() reads sk->sk_ll_usec without holding a lock. Fixes: 0dbffbb5335a ("net: annotate data race around sk_ll_usec") Signed-off-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/core/sock.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/core/sock.c b/net/core/sock.c index 0cca1ccf1499..1f9401d757cb 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -1673,7 +1673,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname, #ifdef CONFIG_NET_RX_BUSY_POLL case SO_BUSY_POLL: - v.val = sk->sk_ll_usec; + v.val = READ_ONCE(sk->sk_ll_usec); break; case SO_PREFER_BUSY_POLL: v.val = READ_ONCE(sk->sk_prefer_busy_poll); From f04f6d9b3b060f7e11219a65a76da65f1489e391 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Fri, 28 Jul 2023 17:07:05 -0700 Subject: [PATCH 222/513] net/sched: taprio: Limit TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME to INT_MAX. [ Upstream commit e739718444f7bf2fa3d70d101761ad83056ca628 ] syzkaller found zero division error [0] in div_s64_rem() called from get_cycle_time_elapsed(), where sched->cycle_time is the divisor. We have tests in parse_taprio_schedule() so that cycle_time will never be 0, and actually cycle_time is not 0 in get_cycle_time_elapsed(). The problem is that the types of divisor are different; cycle_time is s64, but the argument of div_s64_rem() is s32. syzkaller fed this input and 0x100000000 is cast to s32 to be 0. @TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME={0xc, 0x8, 0x100000000} We use s64 for cycle_time to cast it to ktime_t, so let's keep it and set max for cycle_time. While at it, we prevent overflow in setup_txtime() and add another test in parse_taprio_schedule() to check if cycle_time overflows. Also, we add a new tdc test case for this issue. [0]: divide error: 0000 [#1] PREEMPT SMP KASAN NOPTI CPU: 1 PID: 103 Comm: kworker/1:3 Not tainted 6.5.0-rc1-00330-g60cc1f7d0605 #3 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 Workqueue: ipv6_addrconf addrconf_dad_work RIP: 0010:div_s64_rem include/linux/math64.h:42 [inline] RIP: 0010:get_cycle_time_elapsed net/sched/sch_taprio.c:223 [inline] RIP: 0010:find_entry_to_transmit+0x252/0x7e0 net/sched/sch_taprio.c:344 Code: 3c 02 00 0f 85 5e 05 00 00 48 8b 4c 24 08 4d 8b bd 40 01 00 00 48 8b 7c 24 48 48 89 c8 4c 29 f8 48 63 f7 48 99 48 89 74 24 70 <48> f7 fe 48 29 d1 48 8d 04 0f 49 89 cc 48 89 44 24 20 49 8d 85 10 RSP: 0018:ffffc90000acf260 EFLAGS: 00010206 RAX: 177450e0347560cf RBX: 0000000000000000 RCX: 177450e0347560cf RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000100000000 RBP: 0000000000000056 R08: 0000000000000000 R09: ffffed10020a0934 R10: ffff8880105049a7 R11: ffff88806cf3a520 R12: ffff888010504800 R13: ffff88800c00d800 R14: ffff8880105049a0 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff88806cf00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f0edf84f0e8 CR3: 000000000d73c002 CR4: 0000000000770ee0 PKRU: 55555554 Call Trace: get_packet_txtime net/sched/sch_taprio.c:508 [inline] taprio_enqueue_one+0x900/0xff0 net/sched/sch_taprio.c:577 taprio_enqueue+0x378/0xae0 net/sched/sch_taprio.c:658 dev_qdisc_enqueue+0x46/0x170 net/core/dev.c:3732 __dev_xmit_skb net/core/dev.c:3821 [inline] __dev_queue_xmit+0x1b2f/0x3000 net/core/dev.c:4169 dev_queue_xmit include/linux/netdevice.h:3088 [inline] neigh_resolve_output net/core/neighbour.c:1552 [inline] neigh_resolve_output+0x4a7/0x780 net/core/neighbour.c:1532 neigh_output include/net/neighbour.h:544 [inline] ip6_finish_output2+0x924/0x17d0 net/ipv6/ip6_output.c:135 __ip6_finish_output+0x620/0xaa0 net/ipv6/ip6_output.c:196 ip6_finish_output net/ipv6/ip6_output.c:207 [inline] NF_HOOK_COND include/linux/netfilter.h:292 [inline] ip6_output+0x206/0x410 net/ipv6/ip6_output.c:228 dst_output include/net/dst.h:458 [inline] NF_HOOK.constprop.0+0xea/0x260 include/linux/netfilter.h:303 ndisc_send_skb+0x872/0xe80 net/ipv6/ndisc.c:508 ndisc_send_ns+0xb5/0x130 net/ipv6/ndisc.c:666 addrconf_dad_work+0xc14/0x13f0 net/ipv6/addrconf.c:4175 process_one_work+0x92c/0x13a0 kernel/workqueue.c:2597 worker_thread+0x60f/0x1240 kernel/workqueue.c:2748 kthread+0x2fe/0x3f0 kernel/kthread.c:389 ret_from_fork+0x2c/0x50 arch/x86/entry/entry_64.S:308 Modules linked in: Fixes: 4cfd5779bd6e ("taprio: Add support for txtime-assist mode") Reported-by: syzkaller Signed-off-by: Kuniyuki Iwashima Co-developed-by: Eric Dumazet Co-developed-by: Pedro Tammela Acked-by: Vinicius Costa Gomes Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/sched/sch_taprio.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index e203deacc953..e40b4425eb6b 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -780,6 +780,11 @@ static const struct nla_policy entry_policy[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { [TCA_TAPRIO_SCHED_ENTRY_INTERVAL] = { .type = NLA_U32 }, }; +static struct netlink_range_validation_signed taprio_cycle_time_range = { + .min = 0, + .max = INT_MAX, +}; + static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { [TCA_TAPRIO_ATTR_PRIOMAP] = { .len = sizeof(struct tc_mqprio_qopt) @@ -788,7 +793,8 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { [TCA_TAPRIO_ATTR_SCHED_BASE_TIME] = { .type = NLA_S64 }, [TCA_TAPRIO_ATTR_SCHED_SINGLE_ENTRY] = { .type = NLA_NESTED }, [TCA_TAPRIO_ATTR_SCHED_CLOCKID] = { .type = NLA_S32 }, - [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = { .type = NLA_S64 }, + [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME] = + NLA_POLICY_FULL_RANGE_SIGNED(NLA_S64, &taprio_cycle_time_range), [TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME_EXTENSION] = { .type = NLA_S64 }, [TCA_TAPRIO_ATTR_FLAGS] = { .type = NLA_U32 }, [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 }, @@ -923,6 +929,11 @@ static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, return -EINVAL; } + if (cycle < 0 || cycle > INT_MAX) { + NL_SET_ERR_MSG(extack, "'cycle_time' is too big"); + return -EINVAL; + } + new->cycle_time = cycle; } @@ -1127,7 +1138,7 @@ static void setup_txtime(struct taprio_sched *q, struct sched_gate_list *sched, ktime_t base) { struct sched_entry *entry; - u32 interval = 0; + u64 interval = 0; list_for_each_entry(entry, &sched->entries, list) { entry->next_txtime = ktime_add_ns(base, interval); From b58d34068fd9f96bfc7d389988dfaf9a92a8fe00 Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Sat, 29 Jul 2023 17:51:07 +0800 Subject: [PATCH 223/513] bpf, cpumap: Handle skb as well when clean up ptr_ring [ Upstream commit 7c62b75cd1a792e14b037fa4f61f9b18914e7de1 ] The following warning was reported when running xdp_redirect_cpu with both skb-mode and stress-mode enabled: ------------[ cut here ]------------ Incorrect XDP memory type (-2128176192) usage WARNING: CPU: 7 PID: 1442 at net/core/xdp.c:405 Modules linked in: CPU: 7 PID: 1442 Comm: kworker/7:0 Tainted: G 6.5.0-rc2+ #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996) Workqueue: events __cpu_map_entry_free RIP: 0010:__xdp_return+0x1e4/0x4a0 ...... Call Trace: ? show_regs+0x65/0x70 ? __warn+0xa5/0x240 ? __xdp_return+0x1e4/0x4a0 ...... xdp_return_frame+0x4d/0x150 __cpu_map_entry_free+0xf9/0x230 process_one_work+0x6b0/0xb80 worker_thread+0x96/0x720 kthread+0x1a5/0x1f0 ret_from_fork+0x3a/0x70 ret_from_fork_asm+0x1b/0x30 The reason for the warning is twofold. One is due to the kthread cpu_map_kthread_run() is stopped prematurely. Another one is __cpu_map_ring_cleanup() doesn't handle skb mode and treats skbs in ptr_ring as XDP frames. Prematurely-stopped kthread will be fixed by the preceding patch and ptr_ring will be empty when __cpu_map_ring_cleanup() is called. But as the comments in __cpu_map_ring_cleanup() said, handling and freeing skbs in ptr_ring as well to "catch any broken behaviour gracefully". Fixes: 11941f8a8536 ("bpf: cpumap: Implement generic cpumap") Signed-off-by: Hou Tao Acked-by: Jesper Dangaard Brouer Link: https://lore.kernel.org/r/20230729095107.1722450-3-houtao@huaweicloud.com Signed-off-by: Martin KaFai Lau Signed-off-by: Sasha Levin --- kernel/bpf/cpumap.c | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index db6221773e43..17f52efba1c8 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -133,11 +133,17 @@ static void __cpu_map_ring_cleanup(struct ptr_ring *ring) * invoked cpu_map_kthread_stop(). Catch any broken behaviour * gracefully and warn once. */ - struct xdp_frame *xdpf; + void *ptr; - while ((xdpf = ptr_ring_consume(ring))) - if (WARN_ON_ONCE(xdpf)) - xdp_return_frame(xdpf); + while ((ptr = ptr_ring_consume(ring))) { + WARN_ON_ONCE(1); + if (unlikely(__ptr_test_bit(0, &ptr))) { + __ptr_clear_bit(0, &ptr); + kfree_skb(ptr); + continue; + } + xdp_return_frame(ptr); + } } static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) From 262430dfc618509246e07acd26211cb4cca79ecc Mon Sep 17 00:00:00 2001 From: valis Date: Sat, 29 Jul 2023 08:32:00 -0400 Subject: [PATCH 224/513] net/sched: cls_u32: No longer copy tcf_result on update to avoid use-after-free [ Upstream commit 3044b16e7c6fe5d24b1cdbcf1bd0a9d92d1ebd81 ] When u32_change() is called on an existing filter, the whole tcf_result struct is always copied into the new instance of the filter. This causes a problem when updating a filter bound to a class, as tcf_unbind_filter() is always called on the old instance in the success path, decreasing filter_cnt of the still referenced class and allowing it to be deleted, leading to a use-after-free. Fix this by no longer copying the tcf_result struct from the old filter. Fixes: de5df63228fc ("net: sched: cls_u32 changes to knode must appear atomic to readers") Reported-by: valis Reported-by: M A Ramdhan Signed-off-by: valis Signed-off-by: Jamal Hadi Salim Reviewed-by: Victor Nogueira Reviewed-by: Pedro Tammela Reviewed-by: M A Ramdhan Link: https://lore.kernel.org/r/20230729123202.72406-2-jhs@mojatatu.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/sched/cls_u32.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c index e4e38aa75111..17edcf1d1c3b 100644 --- a/net/sched/cls_u32.c +++ b/net/sched/cls_u32.c @@ -812,7 +812,6 @@ static struct tc_u_knode *u32_init_knode(struct net *net, struct tcf_proto *tp, new->ifindex = n->ifindex; new->fshift = n->fshift; - new->res = n->res; new->flags = n->flags; RCU_INIT_POINTER(new->ht_down, ht); From 9edf7955025a602ab6bcc94d923c436e160a10e3 Mon Sep 17 00:00:00 2001 From: valis Date: Sat, 29 Jul 2023 08:32:01 -0400 Subject: [PATCH 225/513] net/sched: cls_fw: No longer copy tcf_result on update to avoid use-after-free [ Upstream commit 76e42ae831991c828cffa8c37736ebfb831ad5ec ] When fw_change() is called on an existing filter, the whole tcf_result struct is always copied into the new instance of the filter. This causes a problem when updating a filter bound to a class, as tcf_unbind_filter() is always called on the old instance in the success path, decreasing filter_cnt of the still referenced class and allowing it to be deleted, leading to a use-after-free. Fix this by no longer copying the tcf_result struct from the old filter. Fixes: e35a8ee5993b ("net: sched: fw use RCU") Reported-by: valis Reported-by: Bing-Jhong Billy Jheng Signed-off-by: valis Signed-off-by: Jamal Hadi Salim Reviewed-by: Victor Nogueira Reviewed-by: Pedro Tammela Reviewed-by: M A Ramdhan Link: https://lore.kernel.org/r/20230729123202.72406-3-jhs@mojatatu.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/sched/cls_fw.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index ea52c320f67c..a2f53aee3909 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -265,7 +265,6 @@ static int fw_change(struct net *net, struct sk_buff *in_skb, return -ENOBUFS; fnew->id = f->id; - fnew->res = f->res; fnew->ifindex = f->ifindex; fnew->tp = f->tp; From 79c3d81c9ad140957b081c91908d7e2964dc603f Mon Sep 17 00:00:00 2001 From: valis Date: Sat, 29 Jul 2023 08:32:02 -0400 Subject: [PATCH 226/513] net/sched: cls_route: No longer copy tcf_result on update to avoid use-after-free [ Upstream commit b80b829e9e2c1b3f7aae34855e04d8f6ecaf13c8 ] When route4_change() is called on an existing filter, the whole tcf_result struct is always copied into the new instance of the filter. This causes a problem when updating a filter bound to a class, as tcf_unbind_filter() is always called on the old instance in the success path, decreasing filter_cnt of the still referenced class and allowing it to be deleted, leading to a use-after-free. Fix this by no longer copying the tcf_result struct from the old filter. Fixes: 1109c00547fc ("net: sched: RCU cls_route") Reported-by: valis Reported-by: Bing-Jhong Billy Jheng Signed-off-by: valis Signed-off-by: Jamal Hadi Salim Reviewed-by: Victor Nogueira Reviewed-by: Pedro Tammela Reviewed-by: M A Ramdhan Link: https://lore.kernel.org/r/20230729123202.72406-4-jhs@mojatatu.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/sched/cls_route.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/sched/cls_route.c b/net/sched/cls_route.c index 48712bc51bda..194468d0355a 100644 --- a/net/sched/cls_route.c +++ b/net/sched/cls_route.c @@ -511,7 +511,6 @@ static int route4_change(struct net *net, struct sk_buff *in_skb, if (fold) { f->id = fold->id; f->iif = fold->iif; - f->res = fold->res; f->handle = fold->handle; f->tp = fold->tp; From 5c534640a7da6d47243deab1d22400c306763439 Mon Sep 17 00:00:00 2001 From: Tomas Glozar Date: Fri, 28 Jul 2023 08:44:11 +0200 Subject: [PATCH 227/513] bpf: sockmap: Remove preempt_disable in sock_map_sk_acquire [ Upstream commit 13d2618b48f15966d1adfe1ff6a1985f5eef40ba ] Disabling preemption in sock_map_sk_acquire conflicts with GFP_ATOMIC allocation later in sk_psock_init_link on PREEMPT_RT kernels, since GFP_ATOMIC might sleep on RT (see bpf: Make BPF and PREEMPT_RT co-exist patchset notes for details). This causes calling bpf_map_update_elem on BPF_MAP_TYPE_SOCKMAP maps to BUG (sleeping function called from invalid context) on RT kernels. preempt_disable was introduced together with lock_sk and rcu_read_lock in commit 99ba2b5aba24e ("bpf: sockhash, disallow bpf_tcp_close and update in parallel"), probably to match disabled migration of BPF programs, and is no longer necessary. Remove preempt_disable to fix BUG in sock_map_update_common on RT. Signed-off-by: Tomas Glozar Reviewed-by: Jakub Sitnicki Link: https://lore.kernel.org/all/20200224140131.461979697@linutronix.de/ Fixes: 99ba2b5aba24 ("bpf: sockhash, disallow bpf_tcp_close and update in parallel") Reviewed-by: John Fastabend Link: https://lore.kernel.org/r/20230728064411.305576-1-tglozar@redhat.com Signed-off-by: Paolo Abeni Signed-off-by: Sasha Levin --- net/core/sock_map.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 86b4e8909ad1..85d3c62bdfa6 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -117,7 +117,6 @@ static void sock_map_sk_acquire(struct sock *sk) __acquires(&sk->sk_lock.slock) { lock_sock(sk); - preempt_disable(); rcu_read_lock(); } @@ -125,7 +124,6 @@ static void sock_map_sk_release(struct sock *sk) __releases(&sk->sk_lock.slock) { rcu_read_unlock(); - preempt_enable(); release_sock(sk); } From 3761ff4f8670023c2071975149825869b439bfd6 Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Thu, 15 Sep 2022 19:42:14 +0800 Subject: [PATCH 228/513] net: ll_temac: Switch to use dev_err_probe() helper [ Upstream commit 75ae8c284c00dc3584b7c173f6fcf96ee15bd02c ] dev_err() can be replace with dev_err_probe() which will check if error code is -EPROBE_DEFER. Signed-off-by: Yang Yingliang Signed-off-by: David S. Miller Stable-dep-of: ef45e8400f5b ("net: ll_temac: fix error checking of irq_of_parse_and_map()") Signed-off-by: Sasha Levin --- drivers/net/ethernet/xilinx/ll_temac_main.c | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 2ab29efa6b6e..303de9293fc7 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1556,16 +1556,12 @@ static int temac_probe(struct platform_device *pdev) } /* Error handle returned DMA RX and TX interrupts */ - if (lp->rx_irq < 0) { - if (lp->rx_irq != -EPROBE_DEFER) - dev_err(&pdev->dev, "could not get DMA RX irq\n"); - return lp->rx_irq; - } - if (lp->tx_irq < 0) { - if (lp->tx_irq != -EPROBE_DEFER) - dev_err(&pdev->dev, "could not get DMA TX irq\n"); - return lp->tx_irq; - } + if (lp->rx_irq < 0) + return dev_err_probe(&pdev->dev, lp->rx_irq, + "could not get DMA RX irq\n"); + if (lp->tx_irq < 0) + return dev_err_probe(&pdev->dev, lp->tx_irq, + "could not get DMA TX irq\n"); if (temac_np) { /* Retrieve the MAC address */ From 6cecfdf65053340f3dd08941ed35229262cc70b3 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 31 Jul 2023 10:42:32 +0300 Subject: [PATCH 229/513] net: ll_temac: fix error checking of irq_of_parse_and_map() [ Upstream commit ef45e8400f5bb66b03cc949f76c80e2a118447de ] Most kernel functions return negative error codes but some irq functions return zero on error. In this code irq_of_parse_and_map(), returns zero and platform_get_irq() returns negative error codes. We need to handle both cases appropriately. Fixes: 8425c41d1ef7 ("net: ll_temac: Extend support to non-device-tree platforms") Signed-off-by: Dan Carpenter Acked-by: Esben Haabendal Reviewed-by: Yang Yingliang Reviewed-by: Harini Katakam Link: https://lore.kernel.org/r/3d0aef75-06e0-45a5-a2a6-2cc4738d4143@moroto.mountain Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/xilinx/ll_temac_main.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 303de9293fc7..b4db50c9e703 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1556,12 +1556,16 @@ static int temac_probe(struct platform_device *pdev) } /* Error handle returned DMA RX and TX interrupts */ - if (lp->rx_irq < 0) - return dev_err_probe(&pdev->dev, lp->rx_irq, + if (lp->rx_irq <= 0) { + rc = lp->rx_irq ?: -EINVAL; + return dev_err_probe(&pdev->dev, rc, "could not get DMA RX irq\n"); - if (lp->tx_irq < 0) - return dev_err_probe(&pdev->dev, lp->tx_irq, + } + if (lp->tx_irq <= 0) { + rc = lp->tx_irq ?: -EINVAL; + return dev_err_probe(&pdev->dev, rc, "could not get DMA TX irq\n"); + } if (temac_np) { /* Retrieve the MAC address */ From 766c9dd00c5f1b7cd983dffe9c0ea0fb951a1cca Mon Sep 17 00:00:00 2001 From: Yuanjun Gong Date: Mon, 31 Jul 2023 17:05:35 +0800 Subject: [PATCH 230/513] net: korina: handle clk prepare error in korina_probe() [ Upstream commit 0b6291ad1940c403734312d0e453e8dac9148f69 ] in korina_probe(), the return value of clk_prepare_enable() should be checked since it might fail. we can use devm_clk_get_optional_enabled() instead of devm_clk_get_optional() and clk_prepare_enable() to automatically handle the error. Fixes: e4cd854ec487 ("net: korina: Get mdio input clock via common clock framework") Signed-off-by: Yuanjun Gong Link: https://lore.kernel.org/r/20230731090535.21416-1-ruc_gongyuanjun@163.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/korina.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c index df9a8eefa007..916e85039b61 100644 --- a/drivers/net/ethernet/korina.c +++ b/drivers/net/ethernet/korina.c @@ -1301,11 +1301,10 @@ static int korina_probe(struct platform_device *pdev) else if (of_get_ethdev_address(pdev->dev.of_node, dev) < 0) eth_hw_addr_random(dev); - clk = devm_clk_get_optional(&pdev->dev, "mdioclk"); + clk = devm_clk_get_optional_enabled(&pdev->dev, "mdioclk"); if (IS_ERR(clk)) return PTR_ERR(clk); if (clk) { - clk_prepare_enable(clk); lp->mii_clock_freq = clk_get_rate(clk); } else { lp->mii_clock_freq = 200000000; /* max possible input clk */ From 193333229aace56f83607890652811c9ba7ed781 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 31 Jul 2023 11:48:32 +0100 Subject: [PATCH 231/513] net: netsec: Ignore 'phy-mode' on SynQuacer in DT mode [ Upstream commit f3bb7759a924713bc54d15f6d0d70733b5935fad ] As documented in acd7aaf51b20 ("netsec: ignore 'phy-mode' device property on ACPI systems") the SocioNext SynQuacer platform ships with firmware defining the PHY mode as RGMII even though the physical configuration of the PHY is for TX and RX delays. Since bbc4d71d63549bc ("net: phy: realtek: fix rtl8211e rx/tx delay config") this has caused misconfiguration of the PHY, rendering the network unusable. This was worked around for ACPI by ignoring the phy-mode property but the system is also used with DT. For DT instead if we're running on a SynQuacer force a working PHY mode, as well as the standard EDK2 firmware with DT there are also some of these systems that use u-boot and might not initialise the PHY if not netbooting. Newer firmware imagaes for at least EDK2 are available from Linaro so print a warning when doing this. Fixes: 533dd11a12f6 ("net: socionext: Add Synquacer NetSec driver") Signed-off-by: Mark Brown Acked-by: Ard Biesheuvel Acked-by: Ilias Apalodimas Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/20230731-synquacer-net-v3-1-944be5f06428@kernel.org Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/socionext/netsec.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/drivers/net/ethernet/socionext/netsec.c b/drivers/net/ethernet/socionext/netsec.c index 6b8013fb17c3..eb59e8abe691 100644 --- a/drivers/net/ethernet/socionext/netsec.c +++ b/drivers/net/ethernet/socionext/netsec.c @@ -1851,6 +1851,17 @@ static int netsec_of_probe(struct platform_device *pdev, return err; } + /* + * SynQuacer is physically configured with TX and RX delays + * but the standard firmware claimed otherwise for a long + * time, ignore it. + */ + if (of_machine_is_compatible("socionext,developer-box") && + priv->phy_interface != PHY_INTERFACE_MODE_RGMII_ID) { + dev_warn(&pdev->dev, "Outdated firmware reports incorrect PHY mode, overriding\n"); + priv->phy_interface = PHY_INTERFACE_MODE_RGMII_ID; + } + priv->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); if (!priv->phy_np) { dev_err(&pdev->dev, "missing required property 'phy-handle'\n"); From a0da2684db18dead3bcee12fb185e596e3d63c2b Mon Sep 17 00:00:00 2001 From: Lin Ma Date: Tue, 1 Aug 2023 09:32:48 +0800 Subject: [PATCH 232/513] net: dcb: choose correct policy to parse DCB_ATTR_BCN [ Upstream commit 31d49ba033095f6e8158c60f69714a500922e0c3 ] The dcbnl_bcn_setcfg uses erroneous policy to parse tb[DCB_ATTR_BCN], which is introduced in commit 859ee3c43812 ("DCB: Add support for DCB BCN"). Please see the comment in below code static int dcbnl_bcn_setcfg(...) { ... ret = nla_parse_nested_deprecated(..., dcbnl_pfc_up_nest, .. ) // !!! dcbnl_pfc_up_nest for attributes // DCB_PFC_UP_ATTR_0 to DCB_PFC_UP_ATTR_ALL in enum dcbnl_pfc_up_attrs ... for (i = DCB_BCN_ATTR_RP_0; i <= DCB_BCN_ATTR_RP_7; i++) { // !!! DCB_BCN_ATTR_RP_0 to DCB_BCN_ATTR_RP_7 in enum dcbnl_bcn_attrs ... value_byte = nla_get_u8(data[i]); ... } ... for (i = DCB_BCN_ATTR_BCNA_0; i <= DCB_BCN_ATTR_RI; i++) { // !!! DCB_BCN_ATTR_BCNA_0 to DCB_BCN_ATTR_RI in enum dcbnl_bcn_attrs ... value_int = nla_get_u32(data[i]); ... } ... } That is, the nla_parse_nested_deprecated uses dcbnl_pfc_up_nest attributes to parse nlattr defined in dcbnl_pfc_up_attrs. But the following access code fetch each nlattr as dcbnl_bcn_attrs attributes. By looking up the associated nla_policy for dcbnl_bcn_attrs. We can find the beginning part of these two policies are "same". static const struct nla_policy dcbnl_pfc_up_nest[...] = { [DCB_PFC_UP_ATTR_0] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_1] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_2] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_3] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_4] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_5] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_6] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_7] = {.type = NLA_U8}, [DCB_PFC_UP_ATTR_ALL] = {.type = NLA_FLAG}, }; static const struct nla_policy dcbnl_bcn_nest[...] = { [DCB_BCN_ATTR_RP_0] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_1] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_2] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_3] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_4] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_5] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_6] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_7] = {.type = NLA_U8}, [DCB_BCN_ATTR_RP_ALL] = {.type = NLA_FLAG}, // from here is somewhat different [DCB_BCN_ATTR_BCNA_0] = {.type = NLA_U32}, ... [DCB_BCN_ATTR_ALL] = {.type = NLA_FLAG}, }; Therefore, the current code is buggy and this nla_parse_nested_deprecated could overflow the dcbnl_pfc_up_nest and use the adjacent nla_policy to parse attributes from DCB_BCN_ATTR_BCNA_0. Hence use the correct policy dcbnl_bcn_nest to parse the nested tb[DCB_ATTR_BCN] TLV. Fixes: 859ee3c43812 ("DCB: Add support for DCB BCN") Signed-off-by: Lin Ma Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230801013248.87240-1-linma@zju.edu.cn Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/dcb/dcbnl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index dc4fb699b56c..d2981e89d363 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -946,7 +946,7 @@ static int dcbnl_bcn_setcfg(struct net_device *netdev, struct nlmsghdr *nlh, return -EOPNOTSUPP; ret = nla_parse_nested_deprecated(data, DCB_BCN_ATTR_MAX, - tb[DCB_ATTR_BCN], dcbnl_pfc_up_nest, + tb[DCB_ATTR_BCN], dcbnl_bcn_nest, NULL); if (ret) return ret; From 64e3affee2881bb22df7ce45dd1f1fd7990e382b Mon Sep 17 00:00:00 2001 From: Alexandra Winter Date: Tue, 1 Aug 2023 10:00:16 +0200 Subject: [PATCH 233/513] s390/qeth: Don't call dev_close/dev_open (DOWN/UP) [ Upstream commit 1cfef80d4c2b2c599189f36f36320b205d9447d9 ] dev_close() and dev_open() are issued to change the interface state to DOWN or UP (dev->flags IFF_UP). When the netdev is set DOWN it loses e.g its Ipv6 addresses and routes. We don't want this in cases of device recovery (triggered by hardware or software) or when the qeth device is set offline. Setting a qeth device offline or online and device recovery actions call netif_device_detach() and/or netif_device_attach(). That will reset or set the LOWER_UP indication i.e. change the dev->state Bit __LINK_STATE_PRESENT. That is enough to e.g. cause bond failovers, and still preserves the interface settings that are handled by the network stack. Don't call dev_open() nor dev_close() from the qeth device driver. Let the network stack handle this. Fixes: d4560150cb47 ("s390/qeth: call dev_close() during recovery") Signed-off-by: Alexandra Winter Reviewed-by: Wenjia Zhang Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/s390/net/qeth_core.h | 1 - drivers/s390/net/qeth_core_main.c | 2 -- drivers/s390/net/qeth_l2_main.c | 9 ++++++--- drivers/s390/net/qeth_l3_main.c | 8 +++++--- 4 files changed, 11 insertions(+), 9 deletions(-) diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index a5aa0bdc61d6..e8c360879883 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h @@ -717,7 +717,6 @@ struct qeth_card_info { u16 chid; u8 ids_valid:1; /* cssid,iid,chid */ u8 dev_addr_is_registered:1; - u8 open_when_online:1; u8 promisc_mode:1; u8 use_v1_blkt:1; u8 is_vm_nic:1; diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index e9807d2996a9..62e7576bff53 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c @@ -5459,8 +5459,6 @@ int qeth_set_offline(struct qeth_card *card, const struct qeth_discipline *disc, qeth_clear_ipacmd_list(card); rtnl_lock(); - card->info.open_when_online = card->dev->flags & IFF_UP; - dev_close(card->dev); netif_device_detach(card->dev); netif_carrier_off(card->dev); rtnl_unlock(); diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d694e3ff8086..7cdf3274cf96 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -2373,9 +2373,12 @@ static int qeth_l2_set_online(struct qeth_card *card, bool carrier_ok) qeth_enable_hw_features(dev); qeth_l2_enable_brport_features(card); - if (card->info.open_when_online) { - card->info.open_when_online = 0; - dev_open(dev, NULL); + if (netif_running(dev)) { + local_bh_disable(); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); + qeth_l2_set_rx_mode(dev); } rtnl_unlock(); } diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 6fd3e288f059..93f55c734802 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -2029,9 +2029,11 @@ static int qeth_l3_set_online(struct qeth_card *card, bool carrier_ok) netif_device_attach(dev); qeth_enable_hw_features(dev); - if (card->info.open_when_online) { - card->info.open_when_online = 0; - dev_open(dev, NULL); + if (netif_running(dev)) { + local_bh_disable(); + napi_schedule(&card->napi); + /* kick-start the NAPI softirq: */ + local_bh_enable(); } rtnl_unlock(); } From 1bb54a21f4d9b88442f8c3307c780e2db64417e4 Mon Sep 17 00:00:00 2001 From: Yue Haibing Date: Tue, 1 Aug 2023 14:43:18 +0800 Subject: [PATCH 234/513] ip6mr: Fix skb_under_panic in ip6mr_cache_report() [ Upstream commit 30e0191b16e8a58e4620fa3e2839ddc7b9d4281c ] skbuff: skb_under_panic: text:ffffffff88771f69 len:56 put:-4 head:ffff88805f86a800 data:ffff887f5f86a850 tail:0x88 end:0x2c0 dev:pim6reg ------------[ cut here ]------------ kernel BUG at net/core/skbuff.c:192! invalid opcode: 0000 [#1] PREEMPT SMP KASAN CPU: 2 PID: 22968 Comm: kworker/2:11 Not tainted 6.5.0-rc3-00044-g0a8db05b571a #236 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014 Workqueue: ipv6_addrconf addrconf_dad_work RIP: 0010:skb_panic+0x152/0x1d0 Call Trace: skb_push+0xc4/0xe0 ip6mr_cache_report+0xd69/0x19b0 reg_vif_xmit+0x406/0x690 dev_hard_start_xmit+0x17e/0x6e0 __dev_queue_xmit+0x2d6a/0x3d20 vlan_dev_hard_start_xmit+0x3ab/0x5c0 dev_hard_start_xmit+0x17e/0x6e0 __dev_queue_xmit+0x2d6a/0x3d20 neigh_connected_output+0x3ed/0x570 ip6_finish_output2+0x5b5/0x1950 ip6_finish_output+0x693/0x11c0 ip6_output+0x24b/0x880 NF_HOOK.constprop.0+0xfd/0x530 ndisc_send_skb+0x9db/0x1400 ndisc_send_rs+0x12a/0x6c0 addrconf_dad_completed+0x3c9/0xea0 addrconf_dad_work+0x849/0x1420 process_one_work+0xa22/0x16e0 worker_thread+0x679/0x10c0 ret_from_fork+0x28/0x60 ret_from_fork_asm+0x11/0x20 When setup a vlan device on dev pim6reg, DAD ns packet may sent on reg_vif_xmit(). reg_vif_xmit() ip6mr_cache_report() skb_push(skb, -skb_network_offset(pkt));//skb_network_offset(pkt) is 4 And skb_push declared as: void *skb_push(struct sk_buff *skb, unsigned int len); skb->data -= len; //0xffff88805f86a84c - 0xfffffffc = 0xffff887f5f86a850 skb->data is set to 0xffff887f5f86a850, which is invalid mem addr, lead to skb_push() fails. Fixes: 14fb64e1f449 ("[IPV6] MROUTE: Support PIM-SM (SSM).") Signed-off-by: Yue Haibing Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/ipv6/ip6mr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index 91f1c5f56d5f..ee094645c7ce 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1068,7 +1068,7 @@ static int ip6mr_cache_report(struct mr_table *mrt, struct sk_buff *pkt, And all this only to mangle msg->im6_msgtype and to set msg->im6_mbz to "mbz" :-) */ - skb_push(skb, -skb_network_offset(pkt)); + __skb_pull(skb, skb_network_offset(pkt)); skb_push(skb, sizeof(*msg)); skb_reset_transport_header(skb); From 32ef2c0c6cf11a076f0280a7866b9abc47821e19 Mon Sep 17 00:00:00 2001 From: Benjamin Poirier Date: Mon, 31 Jul 2023 16:02:08 -0400 Subject: [PATCH 235/513] vxlan: Fix nexthop hash size [ Upstream commit 0756384fb1bd38adb2ebcfd1307422f433a1d772 ] The nexthop code expects a 31 bit hash, such as what is returned by fib_multipath_hash() and rt6_multipath_hash(). Passing the 32 bit hash returned by skb_get_hash() can lead to problems related to the fact that 'int hash' is a negative number when the MSB is set. In the case of hash threshold nexthop groups, nexthop_select_path_hthr() will disproportionately select the first nexthop group entry. In the case of resilient nexthop groups, nexthop_select_path_res() may do an out of bounds access in nh_buckets[], for example: hash = -912054133 num_nh_buckets = 2 bucket_index = 65535 which leads to the following panic: BUG: unable to handle page fault for address: ffffc900025910c8 PGD 100000067 P4D 100000067 PUD 10026b067 PMD 0 Oops: 0002 [#1] PREEMPT SMP KASAN NOPTI CPU: 4 PID: 856 Comm: kworker/4:3 Not tainted 6.5.0-rc2+ #34 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.2-debian-1.16.2-1 04/01/2014 Workqueue: ipv6_addrconf addrconf_dad_work RIP: 0010:nexthop_select_path+0x197/0xbf0 Code: c1 e4 05 be 08 00 00 00 4c 8b 35 a4 14 7e 01 4e 8d 6c 25 00 4a 8d 7c 25 08 48 01 dd e8 c2 25 15 ff 49 8d 7d 08 e8 39 13 15 ff <4d> 89 75 08 48 89 ef e8 7d 12 15 ff 48 8b 5d 00 e8 14 55 2f 00 85 RSP: 0018:ffff88810c36f260 EFLAGS: 00010246 RAX: 0000000000000000 RBX: 00000000002000c0 RCX: ffffffffaf02dd77 RDX: dffffc0000000000 RSI: 0000000000000008 RDI: ffffc900025910c8 RBP: ffffc900025910c0 R08: 0000000000000001 R09: fffff520004b2219 R10: ffffc900025910cf R11: 31392d2068736168 R12: 00000000002000c0 R13: ffffc900025910c0 R14: 00000000fffef608 R15: ffff88811840e900 FS: 0000000000000000(0000) GS:ffff8881f7000000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffffc900025910c8 CR3: 0000000129d00000 CR4: 0000000000750ee0 PKRU: 55555554 Call Trace: ? __die+0x23/0x70 ? page_fault_oops+0x1ee/0x5c0 ? __pfx_is_prefetch.constprop.0+0x10/0x10 ? __pfx_page_fault_oops+0x10/0x10 ? search_bpf_extables+0xfe/0x1c0 ? fixup_exception+0x3b/0x470 ? exc_page_fault+0xf6/0x110 ? asm_exc_page_fault+0x26/0x30 ? nexthop_select_path+0x197/0xbf0 ? nexthop_select_path+0x197/0xbf0 ? lock_is_held_type+0xe7/0x140 vxlan_xmit+0x5b2/0x2340 ? __lock_acquire+0x92b/0x3370 ? __pfx_vxlan_xmit+0x10/0x10 ? __pfx___lock_acquire+0x10/0x10 ? __pfx_register_lock_class+0x10/0x10 ? skb_network_protocol+0xce/0x2d0 ? dev_hard_start_xmit+0xca/0x350 ? __pfx_vxlan_xmit+0x10/0x10 dev_hard_start_xmit+0xca/0x350 __dev_queue_xmit+0x513/0x1e20 ? __pfx___dev_queue_xmit+0x10/0x10 ? __pfx_lock_release+0x10/0x10 ? mark_held_locks+0x44/0x90 ? skb_push+0x4c/0x80 ? eth_header+0x81/0xe0 ? __pfx_eth_header+0x10/0x10 ? neigh_resolve_output+0x215/0x310 ? ip6_finish_output2+0x2ba/0xc90 ip6_finish_output2+0x2ba/0xc90 ? lock_release+0x236/0x3e0 ? ip6_mtu+0xbb/0x240 ? __pfx_ip6_finish_output2+0x10/0x10 ? find_held_lock+0x83/0xa0 ? lock_is_held_type+0xe7/0x140 ip6_finish_output+0x1ee/0x780 ip6_output+0x138/0x460 ? __pfx_ip6_output+0x10/0x10 ? __pfx___lock_acquire+0x10/0x10 ? __pfx_ip6_finish_output+0x10/0x10 NF_HOOK.constprop.0+0xc0/0x420 ? __pfx_NF_HOOK.constprop.0+0x10/0x10 ? ndisc_send_skb+0x2c0/0x960 ? __pfx_lock_release+0x10/0x10 ? __local_bh_enable_ip+0x93/0x110 ? lock_is_held_type+0xe7/0x140 ndisc_send_skb+0x4be/0x960 ? __pfx_ndisc_send_skb+0x10/0x10 ? mark_held_locks+0x65/0x90 ? find_held_lock+0x83/0xa0 ndisc_send_ns+0xb0/0x110 ? __pfx_ndisc_send_ns+0x10/0x10 addrconf_dad_work+0x631/0x8e0 ? lock_acquire+0x180/0x3f0 ? __pfx_addrconf_dad_work+0x10/0x10 ? mark_held_locks+0x24/0x90 process_one_work+0x582/0x9c0 ? __pfx_process_one_work+0x10/0x10 ? __pfx_do_raw_spin_lock+0x10/0x10 ? mark_held_locks+0x24/0x90 worker_thread+0x93/0x630 ? __kthread_parkme+0xdc/0x100 ? __pfx_worker_thread+0x10/0x10 kthread+0x1a5/0x1e0 ? __pfx_kthread+0x10/0x10 ret_from_fork+0x34/0x60 ? __pfx_kthread+0x10/0x10 ret_from_fork_asm+0x1b/0x30 RIP: 0000:0x0 Code: Unable to access opcode bytes at 0xffffffffffffffd6. RSP: 0000:0000000000000000 EFLAGS: 00000000 ORIG_RAX: 0000000000000000 RAX: 0000000000000000 RBX: 0000000000000000 RCX: 0000000000000000 RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000000000000000 RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: 0000000000000000 R13: 0000000000000000 R14: 0000000000000000 R15: 0000000000000000 Modules linked in: CR2: ffffc900025910c8 ---[ end trace 0000000000000000 ]--- RIP: 0010:nexthop_select_path+0x197/0xbf0 Code: c1 e4 05 be 08 00 00 00 4c 8b 35 a4 14 7e 01 4e 8d 6c 25 00 4a 8d 7c 25 08 48 01 dd e8 c2 25 15 ff 49 8d 7d 08 e8 39 13 15 ff <4d> 89 75 08 48 89 ef e8 7d 12 15 ff 48 8b 5d 00 e8 14 55 2f 00 85 RSP: 0018:ffff88810c36f260 EFLAGS: 00010246 RAX: 0000000000000000 RBX: 00000000002000c0 RCX: ffffffffaf02dd77 RDX: dffffc0000000000 RSI: 0000000000000008 RDI: ffffc900025910c8 RBP: ffffc900025910c0 R08: 0000000000000001 R09: fffff520004b2219 R10: ffffc900025910cf R11: 31392d2068736168 R12: 00000000002000c0 R13: ffffc900025910c0 R14: 00000000fffef608 R15: ffff88811840e900 FS: 0000000000000000(0000) GS:ffff8881f7000000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffffffffffffffd6 CR3: 0000000129d00000 CR4: 0000000000750ee0 PKRU: 55555554 Kernel panic - not syncing: Fatal exception in interrupt Kernel Offset: 0x2ca00000 from 0xffffffff81000000 (relocation range: 0xffffffff80000000-0xffffffffbfffffff) ---[ end Kernel panic - not syncing: Fatal exception in interrupt ]--- Fix this problem by ensuring the MSB of hash is 0 using a right shift - the same approach used in fib_multipath_hash() and rt6_multipath_hash(). Fixes: 1274e1cc4226 ("vxlan: ecmp support for mac fdb entries") Signed-off-by: Benjamin Poirier Reviewed-by: Ido Schimmel Reviewed-by: Simon Horman Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- include/net/vxlan.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/net/vxlan.h b/include/net/vxlan.h index cf1d870f7b9a..e149a0b6f9a3 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -497,12 +497,12 @@ static inline void vxlan_flag_attr_error(int attrtype, } static inline bool vxlan_fdb_nh_path_select(struct nexthop *nh, - int hash, + u32 hash, struct vxlan_rdst *rdst) { struct fib_nh_common *nhc; - nhc = nexthop_path_fdb_result(nh, hash); + nhc = nexthop_path_fdb_result(nh, hash >> 1); if (unlikely(!nhc)) return false; From c999fb1039dd977aec7a99036fc0f1b51ef27b71 Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Mon, 31 Jul 2023 14:58:40 +0300 Subject: [PATCH 236/513] net/mlx5: fs_core: Make find_closest_ft more generic [ Upstream commit 618d28a535a0582617465d14e05f3881736a2962 ] As find_closest_ft_recursive is called to find the closest FT, the first parameter of find_closest_ft can be changed from fs_prio to fs_node. Thus this function is extended to find the closest FT for the nodes of any type, not only prios, but also the sub namespaces. Signed-off-by: Jianbo Liu Signed-off-by: Leon Romanovsky Link: https://lore.kernel.org/r/d3962c2b443ec8dde7a740dc742a1f052d5e256c.1690803944.git.leonro@nvidia.com Signed-off-by: Jakub Kicinski Stable-dep-of: c635ca45a7a2 ("net/mlx5: fs_core: Skip the FTs in the same FS_TYPE_PRIO_CHAINS fs_prio") Signed-off-by: Sasha Levin --- .../net/ethernet/mellanox/mlx5/core/fs_core.c | 29 +++++++++---------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index cb3f9de3d00b..8b1a5f563759 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -818,18 +818,17 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, return ft; } -/* If reverse is false then return the first flow table in next priority of - * prio in the tree, else return the last flow table in the previous priority - * of prio in the tree. +/* If reverse is false then return the first flow table next to the passed node + * in the tree, else return the last flow table before the node in the tree. */ -static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool reverse) +static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool reverse) { struct mlx5_flow_table *ft = NULL; struct fs_node *curr_node; struct fs_node *parent; - parent = prio->node.parent; - curr_node = &prio->node; + parent = node->parent; + curr_node = node; while (!ft && parent) { ft = find_closest_ft_recursive(parent, &curr_node->list, reverse); curr_node = parent; @@ -839,15 +838,15 @@ static struct mlx5_flow_table *find_closest_ft(struct fs_prio *prio, bool revers } /* Assuming all the tree is locked by mutex chain lock */ -static struct mlx5_flow_table *find_next_chained_ft(struct fs_prio *prio) +static struct mlx5_flow_table *find_next_chained_ft(struct fs_node *node) { - return find_closest_ft(prio, false); + return find_closest_ft(node, false); } /* Assuming all the tree is locked by mutex chain lock */ -static struct mlx5_flow_table *find_prev_chained_ft(struct fs_prio *prio) +static struct mlx5_flow_table *find_prev_chained_ft(struct fs_node *node) { - return find_closest_ft(prio, true); + return find_closest_ft(node, true); } static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft, @@ -859,7 +858,7 @@ static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft, next_ns = flow_act->action & MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_NS; fs_get_obj(prio, next_ns ? ft->ns->node.parent : ft->node.parent); - return find_next_chained_ft(prio); + return find_next_chained_ft(&prio->node); } static int connect_fts_in_prio(struct mlx5_core_dev *dev, @@ -890,7 +889,7 @@ static int connect_prev_fts(struct mlx5_core_dev *dev, { struct mlx5_flow_table *prev_ft; - prev_ft = find_prev_chained_ft(prio); + prev_ft = find_prev_chained_ft(&prio->node); if (prev_ft) { struct fs_prio *prev_prio; @@ -1036,7 +1035,7 @@ static int connect_flow_table(struct mlx5_core_dev *dev, struct mlx5_flow_table if (err) return err; - next_ft = first_ft ? first_ft : find_next_chained_ft(prio); + next_ft = first_ft ? first_ft : find_next_chained_ft(&prio->node); err = connect_fwd_rules(dev, ft, next_ft); if (err) return err; @@ -1111,7 +1110,7 @@ static struct mlx5_flow_table *__mlx5_create_flow_table(struct mlx5_flow_namespa tree_init_node(&ft->node, del_hw_flow_table, del_sw_flow_table); next_ft = unmanaged ? ft_attr->next_ft : - find_next_chained_ft(fs_prio); + find_next_chained_ft(&fs_prio->node); ft->def_miss_action = ns->def_miss_action; ft->ns = ns; err = root->cmds->create_flow_table(root, ft, ft_attr->max_fte, next_ft); @@ -2086,7 +2085,7 @@ static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft) if (!list_is_last(&ft->node.list, &prio->node.children)) return list_next_entry(ft, node.list); - return find_next_chained_ft(prio); + return find_next_chained_ft(&prio->node); } static int update_root_ft_destroy(struct mlx5_flow_table *ft) From d6d9d0f5a5e00027bd9ec24235969fc4e896f195 Mon Sep 17 00:00:00 2001 From: Jianbo Liu Date: Mon, 31 Jul 2023 14:58:41 +0300 Subject: [PATCH 237/513] net/mlx5: fs_core: Skip the FTs in the same FS_TYPE_PRIO_CHAINS fs_prio [ Upstream commit c635ca45a7a2023904a1f851e99319af7b87017d ] In the cited commit, new type of FS_TYPE_PRIO_CHAINS fs_prio was added to support multiple parallel namespaces for multi-chains. And we skip all the flow tables under the fs_node of this type unconditionally, when searching for the next or previous flow table to connect for a new table. As this search function is also used for find new root table when the old one is being deleted, it will skip the entire FS_TYPE_PRIO_CHAINS fs_node next to the old root. However, new root table should be chosen from it if there is any table in it. Fix it by skipping only the flow tables in the same FS_TYPE_PRIO_CHAINS fs_node when finding the closest FT for a fs_node. Besides, complete the connecting from FTs of previous priority of prio because there should be multiple prevs after this fs_prio type is introduced. And also the next FT should be chosen from the first flow table next to the prio in the same FS_TYPE_PRIO_CHAINS fs_prio, if this prio is the first child. Fixes: 328edb499f99 ("net/mlx5: Split FDB fast path prio to multiple namespaces") Signed-off-by: Jianbo Liu Reviewed-by: Paul Blakey Signed-off-by: Leon Romanovsky Link: https://lore.kernel.org/r/7a95754df479e722038996c97c97b062b372591f.1690803944.git.leonro@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- .../net/ethernet/mellanox/mlx5/core/fs_core.c | 80 +++++++++++++++++-- 1 file changed, 72 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 8b1a5f563759..161ad2ae4019 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -802,7 +802,7 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, struct fs_node *iter = list_entry(start, struct fs_node, list); struct mlx5_flow_table *ft = NULL; - if (!root || root->type == FS_TYPE_PRIO_CHAINS) + if (!root) return NULL; list_for_each_advance_continue(iter, &root->children, reverse) { @@ -818,19 +818,42 @@ static struct mlx5_flow_table *find_closest_ft_recursive(struct fs_node *root, return ft; } +static struct fs_node *find_prio_chains_parent(struct fs_node *parent, + struct fs_node **child) +{ + struct fs_node *node = NULL; + + while (parent && parent->type != FS_TYPE_PRIO_CHAINS) { + node = parent; + parent = parent->parent; + } + + if (child) + *child = node; + + return parent; +} + /* If reverse is false then return the first flow table next to the passed node * in the tree, else return the last flow table before the node in the tree. + * If skip is true, skip the flow tables in the same prio_chains prio. */ -static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool reverse) +static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool reverse, + bool skip) { + struct fs_node *prio_chains_parent = NULL; struct mlx5_flow_table *ft = NULL; struct fs_node *curr_node; struct fs_node *parent; + if (skip) + prio_chains_parent = find_prio_chains_parent(node, NULL); parent = node->parent; curr_node = node; while (!ft && parent) { - ft = find_closest_ft_recursive(parent, &curr_node->list, reverse); + if (parent != prio_chains_parent) + ft = find_closest_ft_recursive(parent, &curr_node->list, + reverse); curr_node = parent; parent = curr_node->parent; } @@ -840,13 +863,13 @@ static struct mlx5_flow_table *find_closest_ft(struct fs_node *node, bool revers /* Assuming all the tree is locked by mutex chain lock */ static struct mlx5_flow_table *find_next_chained_ft(struct fs_node *node) { - return find_closest_ft(node, false); + return find_closest_ft(node, false, true); } /* Assuming all the tree is locked by mutex chain lock */ static struct mlx5_flow_table *find_prev_chained_ft(struct fs_node *node) { - return find_closest_ft(node, true); + return find_closest_ft(node, true, true); } static struct mlx5_flow_table *find_next_fwd_ft(struct mlx5_flow_table *ft, @@ -882,21 +905,55 @@ static int connect_fts_in_prio(struct mlx5_core_dev *dev, return 0; } +static struct mlx5_flow_table *find_closet_ft_prio_chains(struct fs_node *node, + struct fs_node *parent, + struct fs_node **child, + bool reverse) +{ + struct mlx5_flow_table *ft; + + ft = find_closest_ft(node, reverse, false); + + if (ft && parent == find_prio_chains_parent(&ft->node, child)) + return ft; + + return NULL; +} + /* Connect flow tables from previous priority of prio to ft */ static int connect_prev_fts(struct mlx5_core_dev *dev, struct mlx5_flow_table *ft, struct fs_prio *prio) { + struct fs_node *prio_parent, *parent = NULL, *child, *node; struct mlx5_flow_table *prev_ft; + int err = 0; + + prio_parent = find_prio_chains_parent(&prio->node, &child); + + /* return directly if not under the first sub ns of prio_chains prio */ + if (prio_parent && !list_is_first(&child->list, &prio_parent->children)) + return 0; prev_ft = find_prev_chained_ft(&prio->node); - if (prev_ft) { + while (prev_ft) { struct fs_prio *prev_prio; fs_get_obj(prev_prio, prev_ft->node.parent); - return connect_fts_in_prio(dev, prev_prio, ft); + err = connect_fts_in_prio(dev, prev_prio, ft); + if (err) + break; + + if (!parent) { + parent = find_prio_chains_parent(&prev_prio->node, &child); + if (!parent) + break; + } + + node = child; + prev_ft = find_closet_ft_prio_chains(node, parent, &child, true); } - return 0; + return err; } static int update_root_ft_create(struct mlx5_flow_table *ft, struct fs_prio @@ -2079,12 +2136,19 @@ EXPORT_SYMBOL(mlx5_del_flow_rules); /* Assuming prio->node.children(flow tables) is sorted by level */ static struct mlx5_flow_table *find_next_ft(struct mlx5_flow_table *ft) { + struct fs_node *prio_parent, *child; struct fs_prio *prio; fs_get_obj(prio, ft->node.parent); if (!list_is_last(&ft->node.list, &prio->node.children)) return list_next_entry(ft, node.list); + + prio_parent = find_prio_chains_parent(&prio->node, &child); + + if (prio_parent && list_is_first(&child->list, &prio_parent->children)) + return find_closest_ft(&prio->node, false, false); + return find_next_chained_ft(&prio->node); } From b0acbcf1e7a1cbe194b2536ceedeb0f879dfba3e Mon Sep 17 00:00:00 2001 From: Jonas Gorski Date: Wed, 2 Aug 2023 11:23:56 +0200 Subject: [PATCH 238/513] prestera: fix fallback to previous version on same major version [ Upstream commit b755c25fbcd568821a3bb0e0d5c2daa5fcb00bba ] When both supported and previous version have the same major version, and the firmwares are missing, the driver ends in a loop requesting the same (previous) version over and over again: [ 76.327413] Prestera DX 0000:01:00.0: missing latest mrvl/prestera/mvsw_prestera_fw-v4.1.img firmware, fall-back to previous 4.0 version [ 76.339802] Prestera DX 0000:01:00.0: missing latest mrvl/prestera/mvsw_prestera_fw-v4.0.img firmware, fall-back to previous 4.0 version [ 76.352162] Prestera DX 0000:01:00.0: missing latest mrvl/prestera/mvsw_prestera_fw-v4.0.img firmware, fall-back to previous 4.0 version [ 76.364502] Prestera DX 0000:01:00.0: missing latest mrvl/prestera/mvsw_prestera_fw-v4.0.img firmware, fall-back to previous 4.0 version [ 76.376848] Prestera DX 0000:01:00.0: missing latest mrvl/prestera/mvsw_prestera_fw-v4.0.img firmware, fall-back to previous 4.0 version [ 76.389183] Prestera DX 0000:01:00.0: missing latest mrvl/prestera/mvsw_prestera_fw-v4.0.img firmware, fall-back to previous 4.0 version [ 76.401522] Prestera DX 0000:01:00.0: missing latest mrvl/prestera/mvsw_prestera_fw-v4.0.img firmware, fall-back to previous 4.0 version [ 76.413860] Prestera DX 0000:01:00.0: missing latest mrvl/prestera/mvsw_prestera_fw-v4.0.img firmware, fall-back to previous 4.0 version [ 76.426199] Prestera DX 0000:01:00.0: missing latest mrvl/prestera/mvsw_prestera_fw-v4.0.img firmware, fall-back to previous 4.0 version ... Fix this by inverting the check to that we aren't yet at the previous version, and also check the minor version. This also catches the case where both versions are the same, as it was after commit bb5dbf2cc64d ("net: marvell: prestera: add firmware v4.0 support"). With this fix applied: [ 88.499622] Prestera DX 0000:01:00.0: missing latest mrvl/prestera/mvsw_prestera_fw-v4.1.img firmware, fall-back to previous 4.0 version [ 88.511995] Prestera DX 0000:01:00.0: failed to request previous firmware: mrvl/prestera/mvsw_prestera_fw-v4.0.img [ 88.522403] Prestera DX: probe of 0000:01:00.0 failed with error -2 Fixes: 47f26018a414 ("net: marvell: prestera: try to load previous fw version") Signed-off-by: Jonas Gorski Acked-by: Elad Nachman Reviewed-by: Jesse Brandeburg Acked-by: Taras Chornyi Link: https://lore.kernel.org/r/20230802092357.163944-1-jonas.gorski@bisdn.de Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/ethernet/marvell/prestera/prestera_pci.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/marvell/prestera/prestera_pci.c b/drivers/net/ethernet/marvell/prestera/prestera_pci.c index a8d7b889ebee..6bef633aa633 100644 --- a/drivers/net/ethernet/marvell/prestera/prestera_pci.c +++ b/drivers/net/ethernet/marvell/prestera/prestera_pci.c @@ -645,7 +645,8 @@ static int prestera_fw_get(struct prestera_fw *fw) err = request_firmware_direct(&fw->bin, fw_path, fw->dev.dev); if (err) { - if (ver_maj == PRESTERA_SUPP_FW_MAJ_VER) { + if (ver_maj != PRESTERA_PREV_FW_MAJ_VER || + ver_min != PRESTERA_PREV_FW_MIN_VER) { ver_maj = PRESTERA_PREV_FW_MAJ_VER; ver_min = PRESTERA_PREV_FW_MIN_VER; From 6f6bd67f4894bcda5ff96b8397b51097fe02493a Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 2 Aug 2023 13:14:55 +0000 Subject: [PATCH 239/513] tcp_metrics: fix addr_same() helper [ Upstream commit e6638094d7af6c7b9dcca05ad009e79e31b4f670 ] Because v4 and v6 families use separate inetpeer trees (respectively net->ipv4.peers and net->ipv6.peers), inetpeer_addr_cmp(a, b) assumes a & b share the same family. tcp_metrics use a common hash table, where entries can have different families. We must therefore make sure to not call inetpeer_addr_cmp() if the families do not match. Fixes: d39d14ffa24c ("net: Add helper function to compare inetpeer addresses") Signed-off-by: Eric Dumazet Reviewed-by: David Ahern Reviewed-by: Kuniyuki Iwashima Link: https://lore.kernel.org/r/20230802131500.1478140-2-edumazet@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/ipv4/tcp_metrics.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index d58e672be31c..6a06d2e0e11d 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -78,7 +78,7 @@ static void tcp_metric_set(struct tcp_metrics_block *tm, static bool addr_same(const struct inetpeer_addr *a, const struct inetpeer_addr *b) { - return inetpeer_addr_cmp(a, b) == 0; + return (a->family == b->family) && !inetpeer_addr_cmp(a, b); } struct tcpm_hash_bucket { From 9a7367cbe33d548ca3641a298475c7fa7909ac3b Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 2 Aug 2023 13:14:56 +0000 Subject: [PATCH 240/513] tcp_metrics: annotate data-races around tm->tcpm_stamp [ Upstream commit 949ad62a5d5311d36fce2e14fe5fed3f936da51c ] tm->tcpm_stamp can be read or written locklessly. Add needed READ_ONCE()/WRITE_ONCE() to document this. Also constify tcpm_check_stamp() dst argument. Fixes: 51c5d0c4b169 ("tcp: Maintain dynamic metrics in local cache.") Signed-off-by: Eric Dumazet Reviewed-by: David Ahern Reviewed-by: Kuniyuki Iwashima Link: https://lore.kernel.org/r/20230802131500.1478140-3-edumazet@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/ipv4/tcp_metrics.c | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 6a06d2e0e11d..cac654834090 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -97,7 +97,7 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, u32 msval; u32 val; - tm->tcpm_stamp = jiffies; + WRITE_ONCE(tm->tcpm_stamp, jiffies); val = 0; if (dst_metric_locked(dst, RTAX_RTT)) @@ -131,9 +131,15 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, #define TCP_METRICS_TIMEOUT (60 * 60 * HZ) -static void tcpm_check_stamp(struct tcp_metrics_block *tm, struct dst_entry *dst) +static void tcpm_check_stamp(struct tcp_metrics_block *tm, + const struct dst_entry *dst) { - if (tm && unlikely(time_after(jiffies, tm->tcpm_stamp + TCP_METRICS_TIMEOUT))) + unsigned long limit; + + if (!tm) + return; + limit = READ_ONCE(tm->tcpm_stamp) + TCP_METRICS_TIMEOUT; + if (unlikely(time_after(jiffies, limit))) tcpm_suck_dst(tm, dst, false); } @@ -174,7 +180,8 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, oldest = deref_locked(tcp_metrics_hash[hash].chain); for (tm = deref_locked(oldest->tcpm_next); tm; tm = deref_locked(tm->tcpm_next)) { - if (time_before(tm->tcpm_stamp, oldest->tcpm_stamp)) + if (time_before(READ_ONCE(tm->tcpm_stamp), + READ_ONCE(oldest->tcpm_stamp))) oldest = tm; } tm = oldest; @@ -434,7 +441,7 @@ void tcp_update_metrics(struct sock *sk) tp->reordering); } } - tm->tcpm_stamp = jiffies; + WRITE_ONCE(tm->tcpm_stamp, jiffies); out_unlock: rcu_read_unlock(); } @@ -647,7 +654,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg, } if (nla_put_msecs(msg, TCP_METRICS_ATTR_AGE, - jiffies - tm->tcpm_stamp, + jiffies - READ_ONCE(tm->tcpm_stamp), TCP_METRICS_ATTR_PAD) < 0) goto nla_put_failure; From d3184bea4ace15f893a59fbfeaf3ef1e614f0bd0 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 2 Aug 2023 13:14:57 +0000 Subject: [PATCH 241/513] tcp_metrics: annotate data-races around tm->tcpm_lock [ Upstream commit 285ce119a3c6c4502585936650143e54c8692788 ] tm->tcpm_lock can be read or written locklessly. Add needed READ_ONCE()/WRITE_ONCE() to document this. Fixes: 51c5d0c4b169 ("tcp: Maintain dynamic metrics in local cache.") Signed-off-by: Eric Dumazet Reviewed-by: David Ahern Reviewed-by: Kuniyuki Iwashima Link: https://lore.kernel.org/r/20230802131500.1478140-4-edumazet@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/ipv4/tcp_metrics.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index cac654834090..07bce467c71a 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -59,7 +59,8 @@ static inline struct net *tm_net(struct tcp_metrics_block *tm) static bool tcp_metric_locked(struct tcp_metrics_block *tm, enum tcp_metric_index idx) { - return tm->tcpm_lock & (1 << idx); + /* Paired with WRITE_ONCE() in tcpm_suck_dst() */ + return READ_ONCE(tm->tcpm_lock) & (1 << idx); } static u32 tcp_metric_get(struct tcp_metrics_block *tm, @@ -110,7 +111,8 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, val |= 1 << TCP_METRIC_CWND; if (dst_metric_locked(dst, RTAX_REORDERING)) val |= 1 << TCP_METRIC_REORDERING; - tm->tcpm_lock = val; + /* Paired with READ_ONCE() in tcp_metric_locked() */ + WRITE_ONCE(tm->tcpm_lock, val); msval = dst_metric_raw(dst, RTAX_RTT); tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC; From e842a68667d4f406da2571a7527bad74767bc496 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 2 Aug 2023 13:14:58 +0000 Subject: [PATCH 242/513] tcp_metrics: annotate data-races around tm->tcpm_vals[] [ Upstream commit 8c4d04f6b443869d25e59822f7cec88d647028a9 ] tm->tcpm_vals[] values can be read or written locklessly. Add needed READ_ONCE()/WRITE_ONCE() to document this, and force use of tcp_metric_get() and tcp_metric_set() Fixes: 51c5d0c4b169 ("tcp: Maintain dynamic metrics in local cache.") Signed-off-by: Eric Dumazet Reviewed-by: David Ahern Reviewed-by: Kuniyuki Iwashima Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/ipv4/tcp_metrics.c | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 07bce467c71a..0c309fd4e04b 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -63,17 +63,19 @@ static bool tcp_metric_locked(struct tcp_metrics_block *tm, return READ_ONCE(tm->tcpm_lock) & (1 << idx); } -static u32 tcp_metric_get(struct tcp_metrics_block *tm, +static u32 tcp_metric_get(const struct tcp_metrics_block *tm, enum tcp_metric_index idx) { - return tm->tcpm_vals[idx]; + /* Paired with WRITE_ONCE() in tcp_metric_set() */ + return READ_ONCE(tm->tcpm_vals[idx]); } static void tcp_metric_set(struct tcp_metrics_block *tm, enum tcp_metric_index idx, u32 val) { - tm->tcpm_vals[idx] = val; + /* Paired with READ_ONCE() in tcp_metric_get() */ + WRITE_ONCE(tm->tcpm_vals[idx], val); } static bool addr_same(const struct inetpeer_addr *a, @@ -115,13 +117,16 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, WRITE_ONCE(tm->tcpm_lock, val); msval = dst_metric_raw(dst, RTAX_RTT); - tm->tcpm_vals[TCP_METRIC_RTT] = msval * USEC_PER_MSEC; + tcp_metric_set(tm, TCP_METRIC_RTT, msval * USEC_PER_MSEC); msval = dst_metric_raw(dst, RTAX_RTTVAR); - tm->tcpm_vals[TCP_METRIC_RTTVAR] = msval * USEC_PER_MSEC; - tm->tcpm_vals[TCP_METRIC_SSTHRESH] = dst_metric_raw(dst, RTAX_SSTHRESH); - tm->tcpm_vals[TCP_METRIC_CWND] = dst_metric_raw(dst, RTAX_CWND); - tm->tcpm_vals[TCP_METRIC_REORDERING] = dst_metric_raw(dst, RTAX_REORDERING); + tcp_metric_set(tm, TCP_METRIC_RTTVAR, msval * USEC_PER_MSEC); + tcp_metric_set(tm, TCP_METRIC_SSTHRESH, + dst_metric_raw(dst, RTAX_SSTHRESH)); + tcp_metric_set(tm, TCP_METRIC_CWND, + dst_metric_raw(dst, RTAX_CWND)); + tcp_metric_set(tm, TCP_METRIC_REORDERING, + dst_metric_raw(dst, RTAX_REORDERING)); if (fastopen_clear) { tm->tcpm_fastopen.mss = 0; tm->tcpm_fastopen.syn_loss = 0; @@ -667,7 +672,7 @@ static int tcp_metrics_fill_info(struct sk_buff *msg, if (!nest) goto nla_put_failure; for (i = 0; i < TCP_METRIC_MAX_KERNEL + 1; i++) { - u32 val = tm->tcpm_vals[i]; + u32 val = tcp_metric_get(tm, i); if (!val) continue; From 4517782e1bc35d8d974653f6c382bbad56160ed2 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 2 Aug 2023 13:14:59 +0000 Subject: [PATCH 243/513] tcp_metrics: annotate data-races around tm->tcpm_net [ Upstream commit d5d986ce42c71a7562d32c4e21e026b0f87befec ] tm->tcpm_net can be read or written locklessly. Instead of changing write_pnet() and read_pnet() and potentially hurt performance, add the needed READ_ONCE()/WRITE_ONCE() in tm_net() and tcpm_new(). Fixes: 849e8a0ca8d5 ("tcp_metrics: Add a field tcpm_net and verify it matches on lookup") Signed-off-by: Eric Dumazet Reviewed-by: David Ahern Reviewed-by: Kuniyuki Iwashima Link: https://lore.kernel.org/r/20230802131500.1478140-6-edumazet@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/ipv4/tcp_metrics.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 0c309fd4e04b..88b0d952a288 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -40,7 +40,7 @@ struct tcp_fastopen_metrics { struct tcp_metrics_block { struct tcp_metrics_block __rcu *tcpm_next; - possible_net_t tcpm_net; + struct net *tcpm_net; struct inetpeer_addr tcpm_saddr; struct inetpeer_addr tcpm_daddr; unsigned long tcpm_stamp; @@ -51,9 +51,10 @@ struct tcp_metrics_block { struct rcu_head rcu_head; }; -static inline struct net *tm_net(struct tcp_metrics_block *tm) +static inline struct net *tm_net(const struct tcp_metrics_block *tm) { - return read_pnet(&tm->tcpm_net); + /* Paired with the WRITE_ONCE() in tcpm_new() */ + return READ_ONCE(tm->tcpm_net); } static bool tcp_metric_locked(struct tcp_metrics_block *tm, @@ -197,7 +198,9 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, if (!tm) goto out_unlock; } - write_pnet(&tm->tcpm_net, net); + /* Paired with the READ_ONCE() in tm_net() */ + WRITE_ONCE(tm->tcpm_net, net); + tm->tcpm_saddr = *saddr; tm->tcpm_daddr = *daddr; From dac3827253940013a0840f09603adfe7bddc4ae7 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 2 Aug 2023 13:15:00 +0000 Subject: [PATCH 244/513] tcp_metrics: fix data-race in tcpm_suck_dst() vs fastopen [ Upstream commit ddf251fa2bc1d3699eec0bae6ed0bc373b8fda79 ] Whenever tcpm_new() reclaims an old entry, tcpm_suck_dst() would overwrite data that could be read from tcp_fastopen_cache_get() or tcp_metrics_fill_info(). We need to acquire fastopen_seqlock to maintain consistency. For newly allocated objects, tcpm_new() can switch to kzalloc() to avoid an extra fastopen_seqlock acquisition. Fixes: 1fe4c481ba63 ("net-tcp: Fast Open client - cookie cache") Signed-off-by: Eric Dumazet Cc: Yuchung Cheng Reviewed-by: Kuniyuki Iwashima Link: https://lore.kernel.org/r/20230802131500.1478140-7-edumazet@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/ipv4/tcp_metrics.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c index 88b0d952a288..5df97aaac252 100644 --- a/net/ipv4/tcp_metrics.c +++ b/net/ipv4/tcp_metrics.c @@ -93,6 +93,7 @@ static struct tcpm_hash_bucket *tcp_metrics_hash __read_mostly; static unsigned int tcp_metrics_hash_log __read_mostly; static DEFINE_SPINLOCK(tcp_metrics_lock); +static DEFINE_SEQLOCK(fastopen_seqlock); static void tcpm_suck_dst(struct tcp_metrics_block *tm, const struct dst_entry *dst, @@ -129,11 +130,13 @@ static void tcpm_suck_dst(struct tcp_metrics_block *tm, tcp_metric_set(tm, TCP_METRIC_REORDERING, dst_metric_raw(dst, RTAX_REORDERING)); if (fastopen_clear) { + write_seqlock(&fastopen_seqlock); tm->tcpm_fastopen.mss = 0; tm->tcpm_fastopen.syn_loss = 0; tm->tcpm_fastopen.try_exp = 0; tm->tcpm_fastopen.cookie.exp = false; tm->tcpm_fastopen.cookie.len = 0; + write_sequnlock(&fastopen_seqlock); } } @@ -194,7 +197,7 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, } tm = oldest; } else { - tm = kmalloc(sizeof(*tm), GFP_ATOMIC); + tm = kzalloc(sizeof(*tm), GFP_ATOMIC); if (!tm) goto out_unlock; } @@ -204,7 +207,7 @@ static struct tcp_metrics_block *tcpm_new(struct dst_entry *dst, tm->tcpm_saddr = *saddr; tm->tcpm_daddr = *daddr; - tcpm_suck_dst(tm, dst, true); + tcpm_suck_dst(tm, dst, reclaim); if (likely(!reclaim)) { tm->tcpm_next = tcp_metrics_hash[hash].chain; @@ -556,8 +559,6 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst) return ret; } -static DEFINE_SEQLOCK(fastopen_seqlock); - void tcp_fastopen_cache_get(struct sock *sk, u16 *mss, struct tcp_fastopen_cookie *cookie) { From 212a9a3c67bee1707a5fa4437bd0638d7124443d Mon Sep 17 00:00:00 2001 From: Steffen Maier Date: Mon, 24 Jul 2023 16:51:56 +0200 Subject: [PATCH 245/513] scsi: zfcp: Defer fc_rport blocking until after ADISC response commit e65851989001c0c9ba9177564b13b38201c0854c upstream. Storage devices are free to send RSCNs, e.g. for internal state changes. If this happens on all connected paths, zfcp risks temporarily losing all paths at the same time. This has strong requirements on multipath configuration such as "no_path_retry queue". Avoid such situations by deferring fc_rport blocking until after the ADISC response, when any actual state change of the remote port became clear. The already existing port recovery triggers explicitly block the fc_rport. The triggers are: on ADISC reject or timeout (typical cable pull case), and on ADISC indicating that the remote port has changed its WWPN or the port is meanwhile no longer open. As a side effect, this also removes a confusing direct function call to another work item function zfcp_scsi_rport_work() instead of scheduling that other work item. It was probably done that way to have the rport block side effect immediate and synchronous to the caller. Fixes: a2fa0aede07c ("[SCSI] zfcp: Block FC transport rports early on errors") Cc: stable@vger.kernel.org #v2.6.30+ Reviewed-by: Benjamin Block Reviewed-by: Fedor Loshakov Signed-off-by: Steffen Maier Link: https://lore.kernel.org/r/20230724145156.3920244-1-maier@linux.ibm.com Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/s390/scsi/zfcp_fc.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index b61acbb09be3..d323f9985c48 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c @@ -534,8 +534,7 @@ static void zfcp_fc_adisc_handler(void *data) /* re-init to undo drop from zfcp_fc_adisc() */ port->d_id = ntoh24(adisc_resp->adisc_port_id); - /* port is good, unblock rport without going through erp */ - zfcp_scsi_schedule_rport_register(port); + /* port is still good, nothing to do */ out: atomic_andnot(ZFCP_STATUS_PORT_LINK_TEST, &port->status); put_device(&port->dev); @@ -595,9 +594,6 @@ void zfcp_fc_link_test_work(struct work_struct *work) int retval; set_worker_desc("zadisc%16llx", port->wwpn); /* < WORKER_DESC_LEN=24 */ - get_device(&port->dev); - port->rport_task = RPORT_DEL; - zfcp_scsi_rport_work(&port->rport_work); /* only issue one test command at one time per port */ if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST) From e5f5b4a89809630c24cf6f7bd8026a3e94a5c94e Mon Sep 17 00:00:00 2001 From: Michael Kelley Date: Thu, 20 Jul 2023 14:05:02 -0700 Subject: [PATCH 246/513] scsi: storvsc: Limit max_sectors for virtual Fibre Channel devices commit 010c1e1c5741365dbbf44a5a5bb9f30192875c4c upstream. The Hyper-V host is queried to get the max transfer size that it supports, and this value is used to set max_sectors for the synthetic SCSI controller. However, this max transfer size may be too large for virtual Fibre Channel devices, which are limited to 512 Kbytes. If a larger transfer size is used with a vFC device, Hyper-V always returns an error, and storvsc logs a message like this where the SRB status and SCSI status are both zero: hv_storvsc : tag#197 cmd 0x8a status: scsi 0x0 srb 0x0 hv 0xc0000001 Add logic to limit the max transfer size to 512 Kbytes for vFC devices. Fixes: 1d3e0980782f ("scsi: storvsc: Correct reporting of Hyper-V I/O size limits") Cc: stable@vger.kernel.org Signed-off-by: Michael Kelley Link: https://lore.kernel.org/r/1689887102-32806-1-git-send-email-mikelley@microsoft.com Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/storvsc_drv.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index c9b1500c2ab8..e78cfda035a1 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -406,6 +406,7 @@ static void storvsc_on_channel_callback(void *context); #define STORVSC_FC_MAX_LUNS_PER_TARGET 255 #define STORVSC_FC_MAX_TARGETS 128 #define STORVSC_FC_MAX_CHANNELS 8 +#define STORVSC_FC_MAX_XFER_SIZE ((u32)(512 * 1024)) #define STORVSC_IDE_MAX_LUNS_PER_TARGET 64 #define STORVSC_IDE_MAX_TARGETS 1 @@ -2071,6 +2072,9 @@ static int storvsc_probe(struct hv_device *device, * protecting it from any weird value. */ max_xfer_bytes = round_down(stor_device->max_transfer_bytes, HV_HYP_PAGE_SIZE); + if (is_fc) + max_xfer_bytes = min(max_xfer_bytes, STORVSC_FC_MAX_XFER_SIZE); + /* max_hw_sectors_kb */ host->max_sectors = max_xfer_bytes >> 9; /* From cd6872f2cf56626b5ae1f46589ad2b6aec281b7f Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Tue, 1 Aug 2023 19:14:24 +0200 Subject: [PATCH 247/513] libceph: fix potential hang in ceph_osdc_notify() commit e6e2843230799230fc5deb8279728a7218b0d63c upstream. If the cluster becomes unavailable, ceph_osdc_notify() may hang even with osd_request_timeout option set because linger_notify_finish_wait() waits for MWatchNotify NOTIFY_COMPLETE message with no associated OSD request in flight -- it's completely asynchronous. Introduce an additional timeout, derived from the specified notify timeout. While at it, switch both waits to killable which is more correct. Cc: stable@vger.kernel.org Signed-off-by: Ilya Dryomov Reviewed-by: Dongsheng Yang Reviewed-by: Xiubo Li Signed-off-by: Greg Kroah-Hartman --- net/ceph/osd_client.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c index f6b7436458ae..0c5e0d2c609e 100644 --- a/net/ceph/osd_client.c +++ b/net/ceph/osd_client.c @@ -3330,17 +3330,24 @@ static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq) int ret; dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); - ret = wait_for_completion_interruptible(&lreq->reg_commit_wait); + ret = wait_for_completion_killable(&lreq->reg_commit_wait); return ret ?: lreq->reg_commit_error; } -static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq) +static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq, + unsigned long timeout) { - int ret; + long left; dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id); - ret = wait_for_completion_interruptible(&lreq->notify_finish_wait); - return ret ?: lreq->notify_finish_error; + left = wait_for_completion_killable_timeout(&lreq->notify_finish_wait, + ceph_timeout_jiffies(timeout)); + if (left <= 0) + left = left ?: -ETIMEDOUT; + else + left = lreq->notify_finish_error; /* completed */ + + return left; } /* @@ -4890,7 +4897,8 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc, linger_submit(lreq); ret = linger_reg_commit_wait(lreq); if (!ret) - ret = linger_notify_finish_wait(lreq); + ret = linger_notify_finish_wait(lreq, + msecs_to_jiffies(2 * timeout * MSEC_PER_SEC)); else dout("lreq %p failed to initiate notify %d\n", lreq, ret); From 98b521d10e73d43ac3867463a7e8ad55225a544a Mon Sep 17 00:00:00 2001 From: Ross Maynard Date: Mon, 31 Jul 2023 15:42:04 +1000 Subject: [PATCH 248/513] USB: zaurus: Add ID for A-300/B-500/C-700 commit b99225b4fe297d07400f9e2332ecd7347b224f8d upstream. The SL-A300, B500/5600, and C700 devices no longer auto-load because of "usbnet: Remove over-broad module alias from zaurus." This patch adds IDs for those 3 devices. Link: https://bugzilla.kernel.org/show_bug.cgi?id=217632 Fixes: 16adf5d07987 ("usbnet: Remove over-broad module alias from zaurus.") Signed-off-by: Ross Maynard Cc: stable@vger.kernel.org Acked-by: Greg Kroah-Hartman Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/69b5423b-2013-9fc9-9569-58e707d9bafb@bigpond.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/cdc_ether.c | 21 +++++++++++++++++++++ drivers/net/usb/zaurus.c | 21 +++++++++++++++++++++ 2 files changed, 42 insertions(+) diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 3497b5a286ea..695e4efdc011 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -617,9 +617,23 @@ static const struct usb_device_id products[] = { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, + .idProduct = 0x8005, /* A-300 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, .idProduct = 0x8006, /* B-500/SL-5600 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8006, /* B-500/SL-5600 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, @@ -627,6 +641,13 @@ static const struct usb_device_id products[] = { .idProduct = 0x8007, /* C-700 */ ZAURUS_MASTER_INTERFACE, .driver_info = 0, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8007, /* C-700 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = 0, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, diff --git a/drivers/net/usb/zaurus.c b/drivers/net/usb/zaurus.c index 7984f2157d22..df3617c4c44e 100644 --- a/drivers/net/usb/zaurus.c +++ b/drivers/net/usb/zaurus.c @@ -289,9 +289,23 @@ static const struct usb_device_id products [] = { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x04DD, + .idProduct = 0x8005, /* A-300 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, .idProduct = 0x8006, /* B-500/SL-5600 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8006, /* B-500/SL-5600 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, @@ -299,6 +313,13 @@ static const struct usb_device_id products [] = { .idProduct = 0x8007, /* C-700 */ ZAURUS_MASTER_INTERFACE, .driver_info = ZAURUS_PXA_INFO, +}, { + .match_flags = USB_DEVICE_ID_MATCH_INT_INFO + | USB_DEVICE_ID_MATCH_DEVICE, + .idVendor = 0x04DD, + .idProduct = 0x8007, /* C-700 */ + ZAURUS_FAKE_INTERFACE, + .driver_info = (unsigned long)&bogus_mdlm_info, }, { .match_flags = USB_DEVICE_ID_MATCH_INT_INFO | USB_DEVICE_ID_MATCH_DEVICE, From 287c2c8677eddd5e0480374875abb3a3f433f1bb Mon Sep 17 00:00:00 2001 From: Xiubo Li Date: Tue, 25 Jul 2023 12:03:59 +0800 Subject: [PATCH 249/513] ceph: defer stopping mdsc delayed_work commit e7e607bd00481745550389a29ecabe33e13d67cf upstream. Flushing the dirty buffer may take a long time if the cluster is overloaded or if there is network issue. So we should ping the MDSs periodically to keep alive, else the MDS will blocklist the kclient. Cc: stable@vger.kernel.org Link: https://tracker.ceph.com/issues/61843 Signed-off-by: Xiubo Li Reviewed-by: Milind Changire Signed-off-by: Ilya Dryomov Signed-off-by: Greg Kroah-Hartman --- fs/ceph/mds_client.c | 4 ++-- fs/ceph/mds_client.h | 5 +++++ fs/ceph/super.c | 10 ++++++++++ 3 files changed, 17 insertions(+), 2 deletions(-) diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c index 0dc8871a4b66..a0b6ae02a70b 100644 --- a/fs/ceph/mds_client.c +++ b/fs/ceph/mds_client.c @@ -4607,7 +4607,7 @@ static void delayed_work(struct work_struct *work) dout("mdsc delayed_work\n"); - if (mdsc->stopping) + if (mdsc->stopping >= CEPH_MDSC_STOPPING_FLUSHED) return; mutex_lock(&mdsc->mutex); @@ -4786,7 +4786,7 @@ void send_flush_mdlog(struct ceph_mds_session *s) void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) { dout("pre_umount\n"); - mdsc->stopping = 1; + mdsc->stopping = CEPH_MDSC_STOPPING_BEGIN; ceph_mdsc_iterate_sessions(mdsc, send_flush_mdlog, true); ceph_mdsc_iterate_sessions(mdsc, lock_unlock_session, false); diff --git a/fs/ceph/mds_client.h b/fs/ceph/mds_client.h index 2667350eb72c..cd943842f0a3 100644 --- a/fs/ceph/mds_client.h +++ b/fs/ceph/mds_client.h @@ -370,6 +370,11 @@ struct cap_wait { int want; }; +enum { + CEPH_MDSC_STOPPING_BEGIN = 1, + CEPH_MDSC_STOPPING_FLUSHED = 2, +}; + /* * mds client state */ diff --git a/fs/ceph/super.c b/fs/ceph/super.c index 202ddde3d62a..1723ec21cd47 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -1227,6 +1227,16 @@ static void ceph_kill_sb(struct super_block *s) ceph_mdsc_pre_umount(fsc->mdsc); flush_fs_workqueues(fsc); + /* + * Though the kill_anon_super() will finally trigger the + * sync_filesystem() anyway, we still need to do it here + * and then bump the stage of shutdown to stop the work + * queue as earlier as possible. + */ + sync_filesystem(s); + + fsc->mdsc->stopping = CEPH_MDSC_STOPPING_FLUSHED; + kill_anon_super(s); fsc->client->extra_mon_dispatch = NULL; From b8f029fc4075987afc760587c3c255d3fee8f943 Mon Sep 17 00:00:00 2001 From: Krzysztof Kozlowski Date: Wed, 19 Jul 2023 08:16:52 +0200 Subject: [PATCH 250/513] firmware: arm_scmi: Drop OF node reference in the transport channel setup commit da042eb4f061a0b54aedadcaa15391490c48e1ad upstream. The OF node reference obtained from of_parse_phandle() should be dropped if node is not compatible with arm,scmi-shmem. Fixes: 507cd4d2c5eb ("firmware: arm_scmi: Add compatibility checks for shmem node") Cc: Signed-off-by: Krzysztof Kozlowski Reviewed-by: Cristian Marussi Link: https://lore.kernel.org/r/20230719061652.8850-1-krzysztof.kozlowski@linaro.org Signed-off-by: Sudeep Holla Signed-off-by: Greg Kroah-Hartman --- drivers/firmware/arm_scmi/mailbox.c | 4 +++- drivers/firmware/arm_scmi/smc.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/firmware/arm_scmi/mailbox.c b/drivers/firmware/arm_scmi/mailbox.c index ed9b83aee8bd..d1400de17eca 100644 --- a/drivers/firmware/arm_scmi/mailbox.c +++ b/drivers/firmware/arm_scmi/mailbox.c @@ -106,8 +106,10 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, return -ENOMEM; shmem = of_parse_phandle(cdev->of_node, "shmem", idx); - if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) + if (!of_device_is_compatible(shmem, "arm,scmi-shmem")) { + of_node_put(shmem); return -ENXIO; + } ret = of_address_to_resource(shmem, 0, &res); of_node_put(shmem); diff --git a/drivers/firmware/arm_scmi/smc.c b/drivers/firmware/arm_scmi/smc.c index 4effecc3bb46..ea1caf70e8df 100644 --- a/drivers/firmware/arm_scmi/smc.c +++ b/drivers/firmware/arm_scmi/smc.c @@ -76,8 +76,10 @@ static int smc_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, return -ENOMEM; np = of_parse_phandle(cdev->of_node, "shmem", 0); - if (!of_device_is_compatible(np, "arm,scmi-shmem")) + if (!of_device_is_compatible(np, "arm,scmi-shmem")) { + of_node_put(np); return -ENXIO; + } ret = of_address_to_resource(np, 0, &res); of_node_put(np); From a74878207b02060c5feaf88b5566208ed08eb78d Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Sat, 5 Aug 2023 00:06:43 +0200 Subject: [PATCH 251/513] x86/CPU/AMD: Do not leak quotient data after a division by 0 commit 77245f1c3c6495521f6a3af082696ee2f8ce3921 upstream. Under certain circumstances, an integer division by 0 which faults, can leave stale quotient data from a previous division operation on Zen1 microarchitectures. Do a dummy division 0/1 before returning from the #DE exception handler in order to avoid any leaks of potentially sensitive data. Signed-off-by: Borislav Petkov (AMD) Cc: Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/cpufeatures.h | 1 + arch/x86/include/asm/processor.h | 2 ++ arch/x86/kernel/cpu/amd.c | 19 +++++++++++++++++++ arch/x86/kernel/traps.c | 2 ++ 4 files changed, 24 insertions(+) diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 608ffc45fc0e..d6089072ee41 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -465,4 +465,5 @@ /* BUG word 2 */ #define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */ +#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */ #endif /* _ASM_X86_CPUFEATURES_H */ diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index 747ccc2ae383..aec714ea8230 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -804,10 +804,12 @@ extern u16 get_llc_id(unsigned int cpu); extern u32 amd_get_nodes_per_socket(void); extern u32 amd_get_highest_perf(void); extern bool cpu_has_ibpb_brtype_microcode(void); +extern void amd_clear_divider(void); #else static inline u32 amd_get_nodes_per_socket(void) { return 0; } static inline u32 amd_get_highest_perf(void) { return 0; } static inline bool cpu_has_ibpb_brtype_microcode(void) { return false; } +static inline void amd_clear_divider(void) { } #endif static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 1b90eb6ea503..77f4dfb0662e 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -75,6 +75,10 @@ static const int amd_zenbleed[] = AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); +static const int amd_div0[] = + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x00, 0x0, 0x2f, 0xf), + AMD_MODEL_RANGE(0x17, 0x50, 0x0, 0x5f, 0xf)); + static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum) { int osvw_id = *erratum++; @@ -1140,6 +1144,11 @@ static void init_amd(struct cpuinfo_x86 *c) check_null_seg_clears_base(c); zenbleed_check(c); + + if (cpu_has_amd_erratum(c, amd_div0)) { + pr_notice_once("AMD Zen1 DIV0 bug detected. Disable SMT for full protection.\n"); + setup_force_cpu_bug(X86_BUG_DIV0); + } } #ifdef CONFIG_X86_32 @@ -1300,3 +1309,13 @@ void amd_check_microcode(void) { on_each_cpu(zenbleed_check_cpu, NULL, 1); } + +/* + * Issue a DIV 0/1 insn to clear any division data from previous DIV + * operations. + */ +void noinstr amd_clear_divider(void) +{ + asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) + :: "a" (0), "d" (0), "r" (1)); +} diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index ca47080e3774..3361d32d090f 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -202,6 +202,8 @@ DEFINE_IDTENTRY(exc_divide_error) { do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, FPE_INTDIV, error_get_trap_addr(regs)); + + amd_clear_divider(); } DEFINE_IDTENTRY(exc_overflow) From 8a34a242cf03211cc89f68308d149b793f63c479 Mon Sep 17 00:00:00 2001 From: gaoming Date: Wed, 5 Jul 2023 15:15:15 +0800 Subject: [PATCH 252/513] exfat: use kvmalloc_array/kvfree instead of kmalloc_array/kfree commit daf60d6cca26e50d65dac374db92e58de745ad26 upstream. The call stack shown below is a scenario in the Linux 4.19 kernel. Allocating memory failed where exfat fs use kmalloc_array due to system memory fragmentation, while the u-disk was inserted without recognition. Devices such as u-disk using the exfat file system are pluggable and may be insert into the system at any time. However, long-term running systems cannot guarantee the continuity of physical memory. Therefore, it's necessary to address this issue. Binder:2632_6: page allocation failure: order:4, mode:0x6040c0(GFP_KERNEL|__GFP_COMP), nodemask=(null) Call trace: [242178.097582] dump_backtrace+0x0/0x4 [242178.097589] dump_stack+0xf4/0x134 [242178.097598] warn_alloc+0xd8/0x144 [242178.097603] __alloc_pages_nodemask+0x1364/0x1384 [242178.097608] kmalloc_order+0x2c/0x510 [242178.097612] kmalloc_order_trace+0x40/0x16c [242178.097618] __kmalloc+0x360/0x408 [242178.097624] load_alloc_bitmap+0x160/0x284 [242178.097628] exfat_fill_super+0xa3c/0xe7c [242178.097635] mount_bdev+0x2e8/0x3a0 [242178.097638] exfat_fs_mount+0x40/0x50 [242178.097643] mount_fs+0x138/0x2e8 [242178.097649] vfs_kern_mount+0x90/0x270 [242178.097655] do_mount+0x798/0x173c [242178.097659] ksys_mount+0x114/0x1ac [242178.097665] __arm64_sys_mount+0x24/0x34 [242178.097671] el0_svc_common+0xb8/0x1b8 [242178.097676] el0_svc_handler+0x74/0x90 [242178.097681] el0_svc+0x8/0x340 By analyzing the exfat code,we found that continuous physical memory is not required here,so kvmalloc_array is used can solve this problem. Cc: stable@vger.kernel.org Signed-off-by: gaoming Signed-off-by: Namjae Jeon Signed-off-by: Greg Kroah-Hartman --- fs/exfat/balloc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/exfat/balloc.c b/fs/exfat/balloc.c index e2113e0a848c..1dce6b4e9088 100644 --- a/fs/exfat/balloc.c +++ b/fs/exfat/balloc.c @@ -69,7 +69,7 @@ static int exfat_allocate_bitmap(struct super_block *sb, } sbi->map_sectors = ((need_map_size - 1) >> (sb->s_blocksize_bits)) + 1; - sbi->vol_amap = kmalloc_array(sbi->map_sectors, + sbi->vol_amap = kvmalloc_array(sbi->map_sectors, sizeof(struct buffer_head *), GFP_KERNEL); if (!sbi->vol_amap) return -ENOMEM; @@ -84,7 +84,7 @@ static int exfat_allocate_bitmap(struct super_block *sb, while (j < i) brelse(sbi->vol_amap[j++]); - kfree(sbi->vol_amap); + kvfree(sbi->vol_amap); sbi->vol_amap = NULL; return -EIO; } @@ -138,7 +138,7 @@ void exfat_free_bitmap(struct exfat_sb_info *sbi) for (i = 0; i < sbi->map_sectors; i++) __brelse(sbi->vol_amap[i]); - kfree(sbi->vol_amap); + kvfree(sbi->vol_amap); } int exfat_set_bitmap(struct inode *inode, unsigned int clu, bool sync) From 1c33ca1e197478b315cfe9242648b980dcfd5dbd Mon Sep 17 00:00:00 2001 From: Sungjong Seo Date: Fri, 14 Jul 2023 17:43:54 +0900 Subject: [PATCH 253/513] exfat: release s_lock before calling dir_emit() commit ff84772fd45d486e4fc78c82e2f70ce5333543e6 upstream. There is a potential deadlock reported by syzbot as below: ====================================================== WARNING: possible circular locking dependency detected 6.4.0-next-20230707-syzkaller #0 Not tainted ------------------------------------------------------ syz-executor330/5073 is trying to acquire lock: ffff8880218527a0 (&mm->mmap_lock){++++}-{3:3}, at: mmap_read_lock_killable include/linux/mmap_lock.h:151 [inline] ffff8880218527a0 (&mm->mmap_lock){++++}-{3:3}, at: get_mmap_lock_carefully mm/memory.c:5293 [inline] ffff8880218527a0 (&mm->mmap_lock){++++}-{3:3}, at: lock_mm_and_find_vma+0x369/0x510 mm/memory.c:5344 but task is already holding lock: ffff888019f760e0 (&sbi->s_lock){+.+.}-{3:3}, at: exfat_iterate+0x117/0xb50 fs/exfat/dir.c:232 which lock already depends on the new lock. Chain exists of: &mm->mmap_lock --> mapping.invalidate_lock#3 --> &sbi->s_lock Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock(&sbi->s_lock); lock(mapping.invalidate_lock#3); lock(&sbi->s_lock); rlock(&mm->mmap_lock); Let's try to avoid above potential deadlock condition by moving dir_emit*() out of sbi->s_lock coverage. Fixes: ca06197382bd ("exfat: add directory operations") Cc: stable@vger.kernel.org #v5.7+ Reported-by: syzbot+1741a5d9b79989c10bdc@syzkaller.appspotmail.com Link: https://lore.kernel.org/lkml/00000000000078ee7e060066270b@google.com/T/#u Tested-by: syzbot+1741a5d9b79989c10bdc@syzkaller.appspotmail.com Signed-off-by: Sungjong Seo Signed-off-by: Namjae Jeon Signed-off-by: Greg Kroah-Hartman --- fs/exfat/dir.c | 27 ++++++++++++--------------- 1 file changed, 12 insertions(+), 15 deletions(-) diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index 3940a56902dd..8475a8653c3a 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -211,7 +211,10 @@ static void exfat_free_namebuf(struct exfat_dentry_namebuf *nb) exfat_init_namebuf(nb); } -/* skip iterating emit_dots when dir is empty */ +/* + * Before calling dir_emit*(), sbi->s_lock should be released + * because page fault can occur in dir_emit*(). + */ #define ITER_POS_FILLED_DOTS (2) static int exfat_iterate(struct file *filp, struct dir_context *ctx) { @@ -226,11 +229,10 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx) int err = 0, fake_offset = 0; exfat_init_namebuf(nb); - mutex_lock(&EXFAT_SB(sb)->s_lock); cpos = ctx->pos; if (!dir_emit_dots(filp, ctx)) - goto unlock; + goto out; if (ctx->pos == ITER_POS_FILLED_DOTS) { cpos = 0; @@ -242,16 +244,18 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx) /* name buffer should be allocated before use */ err = exfat_alloc_namebuf(nb); if (err) - goto unlock; + goto out; get_new: + mutex_lock(&EXFAT_SB(sb)->s_lock); + if (ei->flags == ALLOC_NO_FAT_CHAIN && cpos >= i_size_read(inode)) goto end_of_dir; err = exfat_readdir(inode, &cpos, &de); if (err) { /* - * At least we tried to read a sector. Move cpos to next sector - * position (should be aligned). + * At least we tried to read a sector. + * Move cpos to next sector position (should be aligned). */ if (err == -EIO) { cpos += 1 << (sb->s_blocksize_bits); @@ -274,16 +278,10 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx) inum = iunique(sb, EXFAT_ROOT_INO); } - /* - * Before calling dir_emit(), sb_lock should be released. - * Because page fault can occur in dir_emit() when the size - * of buffer given from user is larger than one page size. - */ mutex_unlock(&EXFAT_SB(sb)->s_lock); if (!dir_emit(ctx, nb->lfn, strlen(nb->lfn), inum, (de.attr & ATTR_SUBDIR) ? DT_DIR : DT_REG)) - goto out_unlocked; - mutex_lock(&EXFAT_SB(sb)->s_lock); + goto out; ctx->pos = cpos; goto get_new; @@ -291,9 +289,8 @@ static int exfat_iterate(struct file *filp, struct dir_context *ctx) if (!cpos && fake_offset) cpos = ITER_POS_FILLED_DOTS; ctx->pos = cpos; -unlock: mutex_unlock(&EXFAT_SB(sb)->s_lock); -out_unlocked: +out: /* * To improve performance, free namebuf after unlock sb_lock. * If namebuf is not allocated, this function do nothing From b0875c583e41a7a245dde87463d81324988b22e1 Mon Sep 17 00:00:00 2001 From: Olivier Maignial Date: Fri, 23 Jun 2023 17:33:36 +0200 Subject: [PATCH 254/513] mtd: spinand: toshiba: Fix ecc_get_status commit 8544cda94dae6be3f1359539079c68bb731428b1 upstream. Reading ECC status is failing. tx58cxgxsxraix_ecc_get_status() is using on-stack buffer for SPINAND_GET_FEATURE_OP() output. It is not suitable for DMA needs of spi-mem. Fix this by using the spi-mem operations dedicated buffer spinand->scratchbuf. See spinand->scratchbuf: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/include/linux/mtd/spinand.h?h=v6.3#n418 spi_mem_check_op(): https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/spi/spi-mem.c?h=v6.3#n199 Fixes: 10949af1681d ("mtd: spinand: Add initial support for Toshiba TC58CVG2S0H") Cc: stable@vger.kernel.org Signed-off-by: Olivier Maignial Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/DB4P250MB1032553D05FBE36DEE0D311EFE23A@DB4P250MB1032.EURP250.PROD.OUTLOOK.COM Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/spi/toshiba.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mtd/nand/spi/toshiba.c b/drivers/mtd/nand/spi/toshiba.c index 7380b1ebaccd..a80427c13121 100644 --- a/drivers/mtd/nand/spi/toshiba.c +++ b/drivers/mtd/nand/spi/toshiba.c @@ -73,7 +73,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand, { struct nand_device *nand = spinand_to_nand(spinand); u8 mbf = 0; - struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, &mbf); + struct spi_mem_op op = SPINAND_GET_FEATURE_OP(0x30, spinand->scratchbuf); switch (status & STATUS_ECC_MASK) { case STATUS_ECC_NO_BITFLIPS: @@ -92,7 +92,7 @@ static int tx58cxgxsxraix_ecc_get_status(struct spinand_device *spinand, if (spi_mem_exec_op(spinand->spimem, &op)) return nanddev_get_ecc_conf(nand)->strength; - mbf >>= 4; + mbf = *(spinand->scratchbuf) >> 4; if (WARN_ON(mbf > nanddev_get_ecc_conf(nand)->strength || !mbf)) return nanddev_get_ecc_conf(nand)->strength; From b92c88009da12efaca21ac3af74497d42e79d864 Mon Sep 17 00:00:00 2001 From: Arseniy Krasnov Date: Wed, 5 Jul 2023 09:52:10 +0300 Subject: [PATCH 255/513] mtd: rawnand: meson: fix OOB available bytes for ECC commit 7e6b04f9238eab0f684fafd158c1f32ea65b9eaa upstream. It is incorrect to calculate number of OOB bytes for ECC engine using some "already known" ECC step size (1024 bytes here). Number of such bytes for ECC engine must be whole OOB except 2 bytes for bad block marker, while proper ECC step size and strength will be selected by ECC logic. Fixes: 8fae856c5350 ("mtd: rawnand: meson: add support for Amlogic NAND flash controller") Cc: Signed-off-by: Arseniy Krasnov Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20230705065211.293500-1-AVKrasnov@sberdevices.ru Signed-off-by: Greg Kroah-Hartman --- drivers/mtd/nand/raw/meson_nand.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/mtd/nand/raw/meson_nand.c b/drivers/mtd/nand/raw/meson_nand.c index bb256a3bb9be..9d441965321a 100644 --- a/drivers/mtd/nand/raw/meson_nand.c +++ b/drivers/mtd/nand/raw/meson_nand.c @@ -1180,7 +1180,6 @@ static int meson_nand_attach_chip(struct nand_chip *nand) struct meson_nfc *nfc = nand_get_controller_data(nand); struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); struct mtd_info *mtd = nand_to_mtd(nand); - int nsectors = mtd->writesize / 1024; int ret; if (!mtd->name) { @@ -1198,7 +1197,7 @@ static int meson_nand_attach_chip(struct nand_chip *nand) nand->options |= NAND_NO_SUBPAGE_WRITE; ret = nand_ecc_choose_conf(nand, nfc->data->ecc_caps, - mtd->oobsize - 2 * nsectors); + mtd->oobsize - 2); if (ret) { dev_err(nfc->dev, "failed to ECC init\n"); return -EINVAL; From adacc3a954fa882bb400aeae2033310a1072c899 Mon Sep 17 00:00:00 2001 From: Dinh Nguyen Date: Tue, 11 Jul 2023 15:44:30 -0500 Subject: [PATCH 256/513] arm64: dts: stratix10: fix incorrect I2C property for SCL signal commit db66795f61354c373ecdadbdae1ed253a96c47cb upstream. The correct dts property for the SCL falling time is "i2c-scl-falling-time-ns". Fixes: c8da1d15b8a4 ("arm64: dts: stratix10: i2c clock running out of spec") Cc: stable@vger.kernel.org Signed-off-by: Dinh Nguyen Signed-off-by: Greg Kroah-Hartman --- arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts | 2 +- arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts index 46e558ab7729..f0e8af12442a 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk.dts @@ -129,7 +129,7 @@ status = "okay"; clock-frequency = <100000>; i2c-sda-falling-time-ns = <890>; /* hcnt */ - i2c-sdl-falling-time-ns = <890>; /* lcnt */ + i2c-scl-falling-time-ns = <890>; /* lcnt */ adc@14 { compatible = "lltc,ltc2497"; diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts index f9b4a39683cf..92ac3c86ebd5 100644 --- a/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts +++ b/arch/arm64/boot/dts/altera/socfpga_stratix10_socdk_nand.dts @@ -162,7 +162,7 @@ status = "okay"; clock-frequency = <100000>; i2c-sda-falling-time-ns = <890>; /* hcnt */ - i2c-sdl-falling-time-ns = <890>; /* lcnt */ + i2c-scl-falling-time-ns = <890>; /* lcnt */ adc@14 { compatible = "lltc,ltc2497"; From 4ed3eed99ee6137cf6682621657f0e7699957f56 Mon Sep 17 00:00:00 2001 From: Laszlo Ersek Date: Mon, 31 Jul 2023 18:42:36 +0200 Subject: [PATCH 257/513] net: tun_chr_open(): set sk_uid from current_fsuid() commit 9bc3047374d5bec163e83e743709e23753376f0c upstream. Commit a096ccca6e50 initializes the "sk_uid" field in the protocol socket (struct sock) from the "/dev/net/tun" device node's owner UID. Per original commit 86741ec25462 ("net: core: Add a UID field to struct sock.", 2016-11-04), that's wrong: the idea is to cache the UID of the userspace process that creates the socket. Commit 86741ec25462 mentions socket() and accept(); with "tun", the action that creates the socket is open("/dev/net/tun"). Therefore the device node's owner UID is irrelevant. In most cases, "/dev/net/tun" will be owned by root, so in practice, commit a096ccca6e50 has no observable effect: - before, "sk_uid" would be zero, due to undefined behavior (CVE-2023-1076), - after, "sk_uid" would be zero, due to "/dev/net/tun" being owned by root. What matters is the (fs)UID of the process performing the open(), so cache that in "sk_uid". Cc: Eric Dumazet Cc: Lorenzo Colitti Cc: Paolo Abeni Cc: Pietro Borrello Cc: netdev@vger.kernel.org Cc: stable@vger.kernel.org Fixes: a096ccca6e50 ("tun: tun_chr_open(): correctly initialize socket uid") Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2173435 Signed-off-by: Laszlo Ersek Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/tun.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 30eea8270c9b..924bdae314c8 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -3411,7 +3411,7 @@ static int tun_chr_open(struct inode *inode, struct file * file) tfile->socket.file = file; tfile->socket.ops = &tun_socket_ops; - sock_init_data_uid(&tfile->socket, &tfile->sk, inode->i_uid); + sock_init_data_uid(&tfile->socket, &tfile->sk, current_fsuid()); tfile->sk.sk_write_space = tun_sock_write_space; tfile->sk.sk_sndbuf = INT_MAX; From 32ca6a55e10ed9736672565d64771f6ea74e4341 Mon Sep 17 00:00:00 2001 From: Laszlo Ersek Date: Mon, 31 Jul 2023 18:42:37 +0200 Subject: [PATCH 258/513] net: tap_open(): set sk_uid from current_fsuid() commit 5c9241f3ceab3257abe2923a59950db0dc8bb737 upstream. Commit 66b2c338adce initializes the "sk_uid" field in the protocol socket (struct sock) from the "/dev/tapX" device node's owner UID. Per original commit 86741ec25462 ("net: core: Add a UID field to struct sock.", 2016-11-04), that's wrong: the idea is to cache the UID of the userspace process that creates the socket. Commit 86741ec25462 mentions socket() and accept(); with "tap", the action that creates the socket is open("/dev/tapX"). Therefore the device node's owner UID is irrelevant. In most cases, "/dev/tapX" will be owned by root, so in practice, commit 66b2c338adce has no observable effect: - before, "sk_uid" would be zero, due to undefined behavior (CVE-2023-1076), - after, "sk_uid" would be zero, due to "/dev/tapX" being owned by root. What matters is the (fs)UID of the process performing the open(), so cache that in "sk_uid". Cc: Eric Dumazet Cc: Lorenzo Colitti Cc: Paolo Abeni Cc: Pietro Borrello Cc: netdev@vger.kernel.org Cc: stable@vger.kernel.org Fixes: 66b2c338adce ("tap: tap_open(): correctly initialize socket uid") Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=2173435 Signed-off-by: Laszlo Ersek Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/tap.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/tap.c b/drivers/net/tap.c index 663ce0e09c2d..bdb05d246b86 100644 --- a/drivers/net/tap.c +++ b/drivers/net/tap.c @@ -523,7 +523,7 @@ static int tap_open(struct inode *inode, struct file *file) q->sock.state = SS_CONNECTED; q->sock.file = file; q->sock.ops = &tap_socket_ops; - sock_init_data_uid(&q->sock, &q->sk, inode->i_uid); + sock_init_data_uid(&q->sock, &q->sk, current_fsuid()); q->sk.sk_write_space = tap_sock_write_space; q->sk.sk_destruct = tap_sock_destruct; q->flags = IFF_VNET_HDR | IFF_NO_PI | IFF_TAP; From 7978bcca4c1fae7a5a3a0e50d52a25e8c48bd1c1 Mon Sep 17 00:00:00 2001 From: Paul Fertser Date: Mon, 5 Jun 2023 10:34:07 +0300 Subject: [PATCH 259/513] wifi: mt76: mt7615: do not advertise 5 GHz on first phy of MT7615D (DBDC) commit 421033deb91521aa6a9255e495cb106741a52275 upstream. On DBDC devices the first (internal) phy is only capable of using 2.4 GHz band, and the 5 GHz band is exposed via a separate phy object, so avoid the false advertising. Reported-by: Rani Hod Closes: https://github.com/openwrt/openwrt/pull/12361 Fixes: 7660a1bd0c22 ("mt76: mt7615: register ext_phy if DBDC is detected") Cc: stable@vger.kernel.org Signed-off-by: Paul Fertser Reviewed-by: Simon Horman Acked-by: Felix Fietkau Signed-off-by: Kalle Valo Link: https://lore.kernel.org/r/20230605073408.8699-1-fercerpav@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c index 6dbaaf95ee38..2092aa373ab3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/eeprom.c @@ -123,12 +123,12 @@ mt7615_eeprom_parse_hw_band_cap(struct mt7615_dev *dev) case MT_EE_5GHZ: dev->mphy.cap.has_5ghz = true; break; - case MT_EE_2GHZ: - dev->mphy.cap.has_2ghz = true; - break; case MT_EE_DBDC: dev->dbdc_support = true; fallthrough; + case MT_EE_2GHZ: + dev->mphy.cap.has_2ghz = true; + break; default: dev->mphy.cap.has_2ghz = true; dev->mphy.cap.has_5ghz = true; From ae07cfe2b099e29f83c5fa3f625b7f9db12eb606 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Tue, 1 Aug 2023 19:14:24 +0200 Subject: [PATCH 260/513] rbd: prevent busy loop when requesting exclusive lock commit 9d01e07fd1bfb4daae156ab528aa196f5ac2b2bc upstream. Due to rbd_try_acquire_lock() effectively swallowing all but EBLOCKLISTED error from rbd_try_lock() ("request lock anyway") and rbd_request_lock() returning ETIMEDOUT error not only for an actual notify timeout but also when the lock owner doesn't respond, a busy loop inside of rbd_acquire_lock() between rbd_try_acquire_lock() and rbd_request_lock() is possible. Requesting the lock on EBUSY error (returned by get_lock_owner_info() if an incompatible lock or invalid lock owner is detected) makes very little sense. The same goes for ETIMEDOUT error (might pop up pretty much anywhere if osd_request_timeout option is set) and many others. Just fail I/O requests on rbd_dev->acquiring_list immediately on any error from rbd_try_lock(). Cc: stable@vger.kernel.org # 588159009d5b: rbd: retrieve and check lock owner twice before blocklisting Cc: stable@vger.kernel.org Signed-off-by: Ilya Dryomov Reviewed-by: Dongsheng Yang Signed-off-by: Greg Kroah-Hartman --- drivers/block/rbd.c | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c index c269c552a43a..fe8bdbf4616b 100644 --- a/drivers/block/rbd.c +++ b/drivers/block/rbd.c @@ -3677,7 +3677,7 @@ static int rbd_lock(struct rbd_device *rbd_dev) ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie, RBD_LOCK_TAG, "", 0); - if (ret) + if (ret && ret != -EEXIST) return ret; __rbd_lock(rbd_dev, cookie); @@ -3880,7 +3880,7 @@ static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev) &rbd_dev->header_oloc, RBD_LOCK_NAME, &lock_type, &lock_tag, &lockers, &num_lockers); if (ret) { - rbd_warn(rbd_dev, "failed to retrieve lockers: %d", ret); + rbd_warn(rbd_dev, "failed to get header lockers: %d", ret); return ERR_PTR(ret); } @@ -3942,8 +3942,10 @@ static int find_watcher(struct rbd_device *rbd_dev, ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc, &watchers, &num_watchers); - if (ret) + if (ret) { + rbd_warn(rbd_dev, "failed to get watchers: %d", ret); return ret; + } sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie); for (i = 0; i < num_watchers; i++) { @@ -3987,8 +3989,12 @@ static int rbd_try_lock(struct rbd_device *rbd_dev) locker = refreshed_locker = NULL; ret = rbd_lock(rbd_dev); - if (ret != -EBUSY) + if (!ret) + goto out; + if (ret != -EBUSY) { + rbd_warn(rbd_dev, "failed to lock header: %d", ret); goto out; + } /* determine if the current lock holder is still alive */ locker = get_lock_owner_info(rbd_dev); @@ -4091,11 +4097,8 @@ static int rbd_try_acquire_lock(struct rbd_device *rbd_dev) ret = rbd_try_lock(rbd_dev); if (ret < 0) { - rbd_warn(rbd_dev, "failed to lock header: %d", ret); - if (ret == -EBLOCKLISTED) - goto out; - - ret = 1; /* request lock anyway */ + rbd_warn(rbd_dev, "failed to acquire lock: %d", ret); + goto out; } if (ret > 0) { up_write(&rbd_dev->lock_rwsem); @@ -6631,12 +6634,11 @@ static int rbd_add_acquire_lock(struct rbd_device *rbd_dev) cancel_delayed_work_sync(&rbd_dev->lock_dwork); if (!ret) ret = -ETIMEDOUT; - } - if (ret) { - rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret); - return ret; + rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret); } + if (ret) + return ret; /* * The lock may have been released by now, unless automatic lock From c81bdf8f9f2b002d217c3d5357cdea9f2b82ff90 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 25 Jul 2023 10:42:06 +0200 Subject: [PATCH 261/513] bpf: Disable preemption in bpf_event_output commit d62cc390c2e99ae267ffe4b8d7e2e08b6c758c32 upstream. We received report [1] of kernel crash, which is caused by using nesting protection without disabled preemption. The bpf_event_output can be called by programs executed by bpf_prog_run_array_cg function that disabled migration but keeps preemption enabled. This can cause task to be preempted by another one inside the nesting protection and lead eventually to two tasks using same perf_sample_data buffer and cause crashes like: BUG: kernel NULL pointer dereference, address: 0000000000000001 #PF: supervisor instruction fetch in kernel mode #PF: error_code(0x0010) - not-present page ... ? perf_output_sample+0x12a/0x9a0 ? finish_task_switch.isra.0+0x81/0x280 ? perf_event_output+0x66/0xa0 ? bpf_event_output+0x13a/0x190 ? bpf_event_output_data+0x22/0x40 ? bpf_prog_dfc84bbde731b257_cil_sock4_connect+0x40a/0xacb ? xa_load+0x87/0xe0 ? __cgroup_bpf_run_filter_sock_addr+0xc1/0x1a0 ? release_sock+0x3e/0x90 ? sk_setsockopt+0x1a1/0x12f0 ? udp_pre_connect+0x36/0x50 ? inet_dgram_connect+0x93/0xa0 ? __sys_connect+0xb4/0xe0 ? udp_setsockopt+0x27/0x40 ? __pfx_udp_push_pending_frames+0x10/0x10 ? __sys_setsockopt+0xdf/0x1a0 ? __x64_sys_connect+0xf/0x20 ? do_syscall_64+0x3a/0x90 ? entry_SYSCALL_64_after_hwframe+0x72/0xdc Fixing this by disabling preemption in bpf_event_output. [1] https://github.com/cilium/cilium/issues/26756 Cc: stable@vger.kernel.org Reported-by: Oleg "livelace" Popov Closes: https://github.com/cilium/cilium/issues/26756 Fixes: 2a916f2f546c ("bpf: Use migrate_disable/enable in array macros and cgroup/lirc code.") Acked-by: Hou Tao Signed-off-by: Jiri Olsa Link: https://lore.kernel.org/r/20230725084206.580930-3-jolsa@kernel.org Signed-off-by: Alexei Starovoitov Signed-off-by: Greg Kroah-Hartman --- kernel/trace/bpf_trace.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index 8b3531172d8e..6352a41380e5 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -662,7 +662,6 @@ static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_misc_sds); u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy) { - int nest_level = this_cpu_inc_return(bpf_event_output_nest_level); struct perf_raw_frag frag = { .copy = ctx_copy, .size = ctx_size, @@ -679,8 +678,12 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, }; struct perf_sample_data *sd; struct pt_regs *regs; + int nest_level; u64 ret; + preempt_disable(); + nest_level = this_cpu_inc_return(bpf_event_output_nest_level); + if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(bpf_misc_sds.sds))) { ret = -EBUSY; goto out; @@ -695,6 +698,7 @@ u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, ret = __bpf_perf_event_output(regs, map, flags, sd); out: this_cpu_dec(bpf_event_output_nest_level); + preempt_enable(); return ret; } From ef0d07c66843160c3358bdc2c553bcfe43216f85 Mon Sep 17 00:00:00 2001 From: Aleksa Sarai Date: Sun, 6 Aug 2023 02:11:58 +1000 Subject: [PATCH 262/513] open: make RESOLVE_CACHED correctly test for O_TMPFILE commit a0fc452a5d7fed986205539259df1d60546f536c upstream. O_TMPFILE is actually __O_TMPFILE|O_DIRECTORY. This means that the old fast-path check for RESOLVE_CACHED would reject all users passing O_DIRECTORY with -EAGAIN, when in fact the intended test was to check for __O_TMPFILE. Cc: stable@vger.kernel.org # v5.12+ Fixes: 99668f618062 ("fs: expose LOOKUP_CACHED through openat2() RESOLVE_CACHED") Signed-off-by: Aleksa Sarai Message-Id: <20230806-resolve_cached-o_tmpfile-v1-1-7ba16308465e@cyphar.com> Signed-off-by: Christian Brauner Signed-off-by: Greg Kroah-Hartman --- fs/open.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/open.c b/fs/open.c index e93c33069055..159a2765b7eb 100644 --- a/fs/open.c +++ b/fs/open.c @@ -1126,7 +1126,7 @@ inline int build_open_flags(const struct open_how *how, struct open_flags *op) lookup_flags |= LOOKUP_IN_ROOT; if (how->resolve & RESOLVE_CACHED) { /* Don't bother even trying for create/truncate/tmpfile open */ - if (flags & (O_TRUNC | O_CREAT | O_TMPFILE)) + if (flags & (O_TRUNC | O_CREAT | __O_TMPFILE)) return -EAGAIN; lookup_flags |= LOOKUP_CACHED; } From 8089eb93d6787dbf348863e935698b4610d90321 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Mon, 24 Jul 2023 10:42:29 +0800 Subject: [PATCH 263/513] drm/ttm: check null pointer before accessing when swapping MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 2dedcf414bb01b8d966eb445db1d181d92304fb2 upstream. Add a check to avoid null pointer dereference as below: [ 90.002283] general protection fault, probably for non-canonical address 0xdffffc0000000000: 0000 [#1] PREEMPT SMP KASAN NOPTI [ 90.002292] KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007] [ 90.002346] ? exc_general_protection+0x159/0x240 [ 90.002352] ? asm_exc_general_protection+0x26/0x30 [ 90.002357] ? ttm_bo_evict_swapout_allowable+0x322/0x5e0 [ttm] [ 90.002365] ? ttm_bo_evict_swapout_allowable+0x42e/0x5e0 [ttm] [ 90.002373] ttm_bo_swapout+0x134/0x7f0 [ttm] [ 90.002383] ? __pfx_ttm_bo_swapout+0x10/0x10 [ttm] [ 90.002391] ? lock_acquire+0x44d/0x4f0 [ 90.002398] ? ttm_device_swapout+0xa5/0x260 [ttm] [ 90.002412] ? lock_acquired+0x355/0xa00 [ 90.002416] ? do_raw_spin_trylock+0xb6/0x190 [ 90.002421] ? __pfx_lock_acquired+0x10/0x10 [ 90.002426] ? ttm_global_swapout+0x25/0x210 [ttm] [ 90.002442] ttm_device_swapout+0x198/0x260 [ttm] [ 90.002456] ? __pfx_ttm_device_swapout+0x10/0x10 [ttm] [ 90.002472] ttm_global_swapout+0x75/0x210 [ttm] [ 90.002486] ttm_tt_populate+0x187/0x3f0 [ttm] [ 90.002501] ttm_bo_handle_move_mem+0x437/0x590 [ttm] [ 90.002517] ttm_bo_validate+0x275/0x430 [ttm] [ 90.002530] ? __pfx_ttm_bo_validate+0x10/0x10 [ttm] [ 90.002544] ? kasan_save_stack+0x33/0x60 [ 90.002550] ? kasan_set_track+0x25/0x30 [ 90.002554] ? __kasan_kmalloc+0x8f/0xa0 [ 90.002558] ? amdgpu_gtt_mgr_new+0x81/0x420 [amdgpu] [ 90.003023] ? ttm_resource_alloc+0xf6/0x220 [ttm] [ 90.003038] amdgpu_bo_pin_restricted+0x2dd/0x8b0 [amdgpu] [ 90.003210] ? __x64_sys_ioctl+0x131/0x1a0 [ 90.003210] ? do_syscall_64+0x60/0x90 Fixes: a2848d08742c ("drm/ttm: never consider pinned BOs for eviction&swap") Tested-by: Mikhail Gavrilov Signed-off-by: Guchun Chen Reviewed-by: Alex Deucher Reviewed-by: Christian König Cc: stable@vger.kernel.org Link: https://patchwork.freedesktop.org/patch/msgid/20230724024229.1118444-1-guchun.chen@amd.com Signed-off-by: Christian König Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/ttm/ttm_bo.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 4d0ef5ab2531..391ed462f7fb 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -606,7 +606,8 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo, if (bo->pin_count) { *locked = false; - *busy = false; + if (busy) + *busy = false; return false; } From b44d28b98f185d2f2348aa3c3636838c316f889e Mon Sep 17 00:00:00 2001 From: Hou Tao Date: Sat, 29 Jul 2023 17:51:06 +0800 Subject: [PATCH 264/513] bpf, cpumap: Make sure kthread is running before map update returns commit 640a604585aa30f93e39b17d4d6ba69fcb1e66c9 upstream. The following warning was reported when running stress-mode enabled xdp_redirect_cpu with some RT threads: ------------[ cut here ]------------ WARNING: CPU: 4 PID: 65 at kernel/bpf/cpumap.c:135 CPU: 4 PID: 65 Comm: kworker/4:1 Not tainted 6.5.0-rc2+ #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996) Workqueue: events cpu_map_kthread_stop RIP: 0010:put_cpu_map_entry+0xda/0x220 ...... Call Trace: ? show_regs+0x65/0x70 ? __warn+0xa5/0x240 ...... ? put_cpu_map_entry+0xda/0x220 cpu_map_kthread_stop+0x41/0x60 process_one_work+0x6b0/0xb80 worker_thread+0x96/0x720 kthread+0x1a5/0x1f0 ret_from_fork+0x3a/0x70 ret_from_fork_asm+0x1b/0x30 The root cause is the same as commit 436901649731 ("bpf: cpumap: Fix memory leak in cpu_map_update_elem"). The kthread is stopped prematurely by kthread_stop() in cpu_map_kthread_stop(), and kthread() doesn't call cpu_map_kthread_run() at all but XDP program has already queued some frames or skbs into ptr_ring. So when __cpu_map_ring_cleanup() checks the ptr_ring, it will find it was not emptied and report a warning. An alternative fix is to use __cpu_map_ring_cleanup() to drop these pending frames or skbs when kthread_stop() returns -EINTR, but it may confuse the user, because these frames or skbs have been handled correctly by XDP program. So instead of dropping these frames or skbs, just make sure the per-cpu kthread is running before __cpu_map_entry_alloc() returns. After apply the fix, the error handle for kthread_stop() will be unnecessary because it will always return 0, so just remove it. Fixes: 6710e1126934 ("bpf: introduce new bpf cpu map type BPF_MAP_TYPE_CPUMAP") Signed-off-by: Hou Tao Reviewed-by: Pu Lehui Acked-by: Jesper Dangaard Brouer Link: https://lore.kernel.org/r/20230729095107.1722450-2-houtao@huaweicloud.com Signed-off-by: Martin KaFai Lau Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/cpumap.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/kernel/bpf/cpumap.c b/kernel/bpf/cpumap.c index 17f52efba1c8..8d1c4b3ee760 100644 --- a/kernel/bpf/cpumap.c +++ b/kernel/bpf/cpumap.c @@ -26,6 +26,7 @@ #include #include #include +#include #include #include /* netif_receive_skb_list */ @@ -70,6 +71,7 @@ struct bpf_cpu_map_entry { struct rcu_head rcu; struct work_struct kthread_stop_wq; + struct completion kthread_running; }; struct bpf_cpu_map { @@ -163,7 +165,6 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu) static void cpu_map_kthread_stop(struct work_struct *work) { struct bpf_cpu_map_entry *rcpu; - int err; rcpu = container_of(work, struct bpf_cpu_map_entry, kthread_stop_wq); @@ -173,14 +174,7 @@ static void cpu_map_kthread_stop(struct work_struct *work) rcu_barrier(); /* kthread_stop will wake_up_process and wait for it to complete */ - err = kthread_stop(rcpu->kthread); - if (err) { - /* kthread_stop may be called before cpu_map_kthread_run - * is executed, so we need to release the memory related - * to rcpu. - */ - put_cpu_map_entry(rcpu); - } + kthread_stop(rcpu->kthread); } static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu, @@ -308,11 +302,11 @@ static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames, return nframes; } - static int cpu_map_kthread_run(void *data) { struct bpf_cpu_map_entry *rcpu = data; + complete(&rcpu->kthread_running); set_current_state(TASK_INTERRUPTIBLE); /* When kthread gives stop order, then rcpu have been disconnected @@ -475,6 +469,7 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, goto free_ptr_ring; /* Setup kthread */ + init_completion(&rcpu->kthread_running); rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa, "cpumap/%d/map:%d", cpu, map->id); @@ -488,6 +483,12 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value, kthread_bind(rcpu->kthread, cpu); wake_up_process(rcpu->kthread); + /* Make sure kthread has been running, so kthread_stop() will not + * stop the kthread prematurely and all pending frames or skbs + * will be handled by the kthread before kthread_stop() returns. + */ + wait_for_completion(&rcpu->kthread_running); + return rcpu; free_prog: From 0d6f639f1dcd90b254b616faf4459b9b32e19713 Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Thu, 3 Aug 2023 11:35:53 -0700 Subject: [PATCH 265/513] file: reinstate f_pos locking optimization for regular files commit 797964253d358cf8d705614dda394dbe30120223 upstream. In commit 20ea1e7d13c1 ("file: always lock position for FMODE_ATOMIC_POS") we ended up always taking the file pos lock, because pidfd_getfd() could get a reference to the file even when it didn't have an elevated file count due to threading of other sharing cases. But Mateusz Guzik reports that the extra locking is actually measurable, so let's re-introduce the optimization, and only force the locking for directory traversal. Directories need the lock for correctness reasons, while regular files only need it for "POSIX semantics". Since pidfd_getfd() is about debuggers etc special things that are _way_ outside of POSIX, we can relax the rules for that case. Reported-by: Mateusz Guzik Cc: Christian Brauner Link: https://lore.kernel.org/linux-fsdevel/20230803095311.ijpvhx3fyrbkasul@f/ Signed-off-by: Linus Torvalds Signed-off-by: Greg Kroah-Hartman --- fs/file.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/fs/file.c b/fs/file.c index 1501bbf6306e..69a51d37b66d 100644 --- a/fs/file.c +++ b/fs/file.c @@ -1062,12 +1062,28 @@ unsigned long __fdget_raw(unsigned int fd) return __fget_light(fd, 0); } +/* + * Try to avoid f_pos locking. We only need it if the + * file is marked for FMODE_ATOMIC_POS, and it can be + * accessed multiple ways. + * + * Always do it for directories, because pidfd_getfd() + * can make a file accessible even if it otherwise would + * not be, and for directories this is a correctness + * issue, not a "POSIX requirement". + */ +static inline bool file_needs_f_pos_lock(struct file *file) +{ + return (file->f_mode & FMODE_ATOMIC_POS) && + (file_count(file) > 1 || S_ISDIR(file_inode(file)->i_mode)); +} + unsigned long __fdget_pos(unsigned int fd) { unsigned long v = __fdget(fd); struct file *file = (struct file *)(v & ~3); - if (file && (file->f_mode & FMODE_ATOMIC_POS)) { + if (file && file_needs_f_pos_lock(file)) { v |= FDPUT_POS_UNLOCK; mutex_lock(&file->f_pos_lock); } From 80ec112c199664a455e8372d5d7c38012409c71f Mon Sep 17 00:00:00 2001 From: Tetsuo Handa Date: Tue, 28 Mar 2023 20:05:16 +0900 Subject: [PATCH 266/513] fs/ntfs3: Use __GFP_NOWARN allocation at ntfs_load_attr_list() commit ea303f72d70ce2f0b0aa94ab127085289768c5a6 upstream. syzbot is reporting too large allocation at ntfs_load_attr_list(), for a crafted filesystem can have huge data_size. Reported-by: syzbot Link: https://syzkaller.appspot.com/bug?extid=89dbb3a789a5b9711793 Signed-off-by: Tetsuo Handa Signed-off-by: Konstantin Komarov Signed-off-by: Greg Kroah-Hartman --- fs/ntfs3/attrlist.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ntfs3/attrlist.c b/fs/ntfs3/attrlist.c index c0c6bcbc8c05..81c22df27c72 100644 --- a/fs/ntfs3/attrlist.c +++ b/fs/ntfs3/attrlist.c @@ -52,7 +52,7 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr) if (!attr->non_res) { lsize = le32_to_cpu(attr->res.data_size); - le = kmalloc(al_aligned(lsize), GFP_NOFS); + le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN); if (!le) { err = -ENOMEM; goto out; @@ -80,7 +80,7 @@ int ntfs_load_attr_list(struct ntfs_inode *ni, struct ATTRIB *attr) if (err < 0) goto out; - le = kmalloc(al_aligned(lsize), GFP_NOFS); + le = kmalloc(al_aligned(lsize), GFP_NOFS | __GFP_NOWARN); if (!le) { err = -ENOMEM; goto out; From afd9a31b5aa4b3747f382d44a7b03b7b5d0b7635 Mon Sep 17 00:00:00 2001 From: Prince Kumar Maurya Date: Tue, 30 May 2023 18:31:41 -0700 Subject: [PATCH 267/513] fs/sysv: Null check to prevent null-ptr-deref bug commit ea2b62f305893992156a798f665847e0663c9f41 upstream. sb_getblk(inode->i_sb, parent) return a null ptr and taking lock on that leads to the null-ptr-deref bug. Reported-by: syzbot+aad58150cbc64ba41bdc@syzkaller.appspotmail.com Closes: https://syzkaller.appspot.com/bug?extid=aad58150cbc64ba41bdc Signed-off-by: Prince Kumar Maurya Message-Id: <20230531013141.19487-1-princekumarmaurya06@gmail.com> Signed-off-by: Christian Brauner Signed-off-by: Greg Kroah-Hartman --- fs/sysv/itree.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/fs/sysv/itree.c b/fs/sysv/itree.c index 5a59d56a2038..1e9c520411f8 100644 --- a/fs/sysv/itree.c +++ b/fs/sysv/itree.c @@ -145,6 +145,10 @@ static int alloc_branch(struct inode *inode, */ parent = block_to_cpu(SYSV_SB(inode->i_sb), branch[n-1].key); bh = sb_getblk(inode->i_sb, parent); + if (!bh) { + sysv_free_block(inode->i_sb, branch[n].key); + break; + } lock_buffer(bh); memset(bh->b_data, 0, blocksize); branch[n].bh = bh; From fbe5a2fed8156cc19eb3b956602b0a1dd46a302d Mon Sep 17 00:00:00 2001 From: Sungwoo Kim Date: Wed, 31 May 2023 01:39:56 -0400 Subject: [PATCH 268/513] Bluetooth: L2CAP: Fix use-after-free in l2cap_sock_ready_cb commit 1728137b33c00d5a2b5110ed7aafb42e7c32e4a1 upstream. l2cap_sock_release(sk) frees sk. However, sk's children are still alive and point to the already free'd sk's address. To fix this, l2cap_sock_release(sk) also cleans sk's children. ================================================================== BUG: KASAN: use-after-free in l2cap_sock_ready_cb+0xb7/0x100 net/bluetooth/l2cap_sock.c:1650 Read of size 8 at addr ffff888104617aa8 by task kworker/u3:0/276 CPU: 0 PID: 276 Comm: kworker/u3:0 Not tainted 6.2.0-00001-gef397bd4d5fb-dirty #59 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014 Workqueue: hci2 hci_rx_work Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0x72/0x95 lib/dump_stack.c:106 print_address_description mm/kasan/report.c:306 [inline] print_report+0x175/0x478 mm/kasan/report.c:417 kasan_report+0xb1/0x130 mm/kasan/report.c:517 l2cap_sock_ready_cb+0xb7/0x100 net/bluetooth/l2cap_sock.c:1650 l2cap_chan_ready+0x10e/0x1e0 net/bluetooth/l2cap_core.c:1386 l2cap_config_req+0x753/0x9f0 net/bluetooth/l2cap_core.c:4480 l2cap_bredr_sig_cmd net/bluetooth/l2cap_core.c:5739 [inline] l2cap_sig_channel net/bluetooth/l2cap_core.c:6509 [inline] l2cap_recv_frame+0xe2e/0x43c0 net/bluetooth/l2cap_core.c:7788 l2cap_recv_acldata+0x6ed/0x7e0 net/bluetooth/l2cap_core.c:8506 hci_acldata_packet net/bluetooth/hci_core.c:3813 [inline] hci_rx_work+0x66e/0xbc0 net/bluetooth/hci_core.c:4048 process_one_work+0x4ea/0x8e0 kernel/workqueue.c:2289 worker_thread+0x364/0x8e0 kernel/workqueue.c:2436 kthread+0x1b9/0x200 kernel/kthread.c:376 ret_from_fork+0x2c/0x50 arch/x86/entry/entry_64.S:308 Allocated by task 288: kasan_save_stack+0x22/0x50 mm/kasan/common.c:45 kasan_set_track+0x25/0x30 mm/kasan/common.c:52 ____kasan_kmalloc mm/kasan/common.c:374 [inline] __kasan_kmalloc+0x82/0x90 mm/kasan/common.c:383 kasan_kmalloc include/linux/kasan.h:211 [inline] __do_kmalloc_node mm/slab_common.c:968 [inline] __kmalloc+0x5a/0x140 mm/slab_common.c:981 kmalloc include/linux/slab.h:584 [inline] sk_prot_alloc+0x113/0x1f0 net/core/sock.c:2040 sk_alloc+0x36/0x3c0 net/core/sock.c:2093 l2cap_sock_alloc.constprop.0+0x39/0x1c0 net/bluetooth/l2cap_sock.c:1852 l2cap_sock_create+0x10d/0x220 net/bluetooth/l2cap_sock.c:1898 bt_sock_create+0x183/0x290 net/bluetooth/af_bluetooth.c:132 __sock_create+0x226/0x380 net/socket.c:1518 sock_create net/socket.c:1569 [inline] __sys_socket_create net/socket.c:1606 [inline] __sys_socket_create net/socket.c:1591 [inline] __sys_socket+0x112/0x200 net/socket.c:1639 __do_sys_socket net/socket.c:1652 [inline] __se_sys_socket net/socket.c:1650 [inline] __x64_sys_socket+0x40/0x50 net/socket.c:1650 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x3f/0x90 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x72/0xdc Freed by task 288: kasan_save_stack+0x22/0x50 mm/kasan/common.c:45 kasan_set_track+0x25/0x30 mm/kasan/common.c:52 kasan_save_free_info+0x2e/0x50 mm/kasan/generic.c:523 ____kasan_slab_free mm/kasan/common.c:236 [inline] ____kasan_slab_free mm/kasan/common.c:200 [inline] __kasan_slab_free+0x10a/0x190 mm/kasan/common.c:244 kasan_slab_free include/linux/kasan.h:177 [inline] slab_free_hook mm/slub.c:1781 [inline] slab_free_freelist_hook mm/slub.c:1807 [inline] slab_free mm/slub.c:3787 [inline] __kmem_cache_free+0x88/0x1f0 mm/slub.c:3800 sk_prot_free net/core/sock.c:2076 [inline] __sk_destruct+0x347/0x430 net/core/sock.c:2168 sk_destruct+0x9c/0xb0 net/core/sock.c:2183 __sk_free+0x82/0x220 net/core/sock.c:2194 sk_free+0x7c/0xa0 net/core/sock.c:2205 sock_put include/net/sock.h:1991 [inline] l2cap_sock_kill+0x256/0x2b0 net/bluetooth/l2cap_sock.c:1257 l2cap_sock_release+0x1a7/0x220 net/bluetooth/l2cap_sock.c:1428 __sock_release+0x80/0x150 net/socket.c:650 sock_close+0x19/0x30 net/socket.c:1368 __fput+0x17a/0x5c0 fs/file_table.c:320 task_work_run+0x132/0x1c0 kernel/task_work.c:179 resume_user_mode_work include/linux/resume_user_mode.h:49 [inline] exit_to_user_mode_loop kernel/entry/common.c:171 [inline] exit_to_user_mode_prepare+0x113/0x120 kernel/entry/common.c:203 __syscall_exit_to_user_mode_work kernel/entry/common.c:285 [inline] syscall_exit_to_user_mode+0x21/0x50 kernel/entry/common.c:296 do_syscall_64+0x4c/0x90 arch/x86/entry/common.c:86 entry_SYSCALL_64_after_hwframe+0x72/0xdc The buggy address belongs to the object at ffff888104617800 which belongs to the cache kmalloc-1k of size 1024 The buggy address is located 680 bytes inside of 1024-byte region [ffff888104617800, ffff888104617c00) The buggy address belongs to the physical page: page:00000000dbca6a80 refcount:1 mapcount:0 mapping:0000000000000000 index:0xffff888104614000 pfn:0x104614 head:00000000dbca6a80 order:2 compound_mapcount:0 subpages_mapcount:0 compound_pincount:0 flags: 0x200000000010200(slab|head|node=0|zone=2) raw: 0200000000010200 ffff888100041dc0 ffffea0004212c10 ffffea0004234b10 raw: ffff888104614000 0000000000080002 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff888104617980: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff888104617a00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb >ffff888104617a80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff888104617b00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff888104617b80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ================================================================== Ack: This bug is found by FuzzBT with a modified Syzkaller. Other contributors are Ruoyu Wu and Hui Peng. Signed-off-by: Sungwoo Kim Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/bluetooth/l2cap_sock.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c index a267c9b6bcef..756523e5402a 100644 --- a/net/bluetooth/l2cap_sock.c +++ b/net/bluetooth/l2cap_sock.c @@ -45,6 +45,7 @@ static const struct proto_ops l2cap_sock_ops; static void l2cap_sock_init(struct sock *sk, struct sock *parent); static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio, int kern); +static void l2cap_sock_cleanup_listen(struct sock *parent); bool l2cap_is_socket(struct socket *sock) { @@ -1414,6 +1415,7 @@ static int l2cap_sock_release(struct socket *sock) if (!sk) return 0; + l2cap_sock_cleanup_listen(sk); bt_sock_unlink(&l2cap_sk_list, sk); err = l2cap_sock_shutdown(sock, SHUT_RDWR); From 27d0f755d649d388fcd12f01436c9a33289e14e3 Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Wed, 12 Jul 2023 10:15:10 -0400 Subject: [PATCH 269/513] net: usbnet: Fix WARNING in usbnet_start_xmit/usb_submit_urb commit 5e1627cb43ddf1b24b92eb26f8d958a3f5676ccb upstream. The syzbot fuzzer identified a problem in the usbnet driver: usb 1-1: BOGUS urb xfer, pipe 3 != type 1 WARNING: CPU: 0 PID: 754 at drivers/usb/core/urb.c:504 usb_submit_urb+0xed6/0x1880 drivers/usb/core/urb.c:504 Modules linked in: CPU: 0 PID: 754 Comm: kworker/0:2 Not tainted 6.4.0-rc7-syzkaller-00014-g692b7dc87ca6 #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/27/2023 Workqueue: mld mld_ifc_work RIP: 0010:usb_submit_urb+0xed6/0x1880 drivers/usb/core/urb.c:504 Code: 7c 24 18 e8 2c b4 5b fb 48 8b 7c 24 18 e8 42 07 f0 fe 41 89 d8 44 89 e1 4c 89 ea 48 89 c6 48 c7 c7 a0 c9 fc 8a e8 5a 6f 23 fb <0f> 0b e9 58 f8 ff ff e8 fe b3 5b fb 48 81 c5 c0 05 00 00 e9 84 f7 RSP: 0018:ffffc9000463f568 EFLAGS: 00010086 RAX: 0000000000000000 RBX: 0000000000000001 RCX: 0000000000000000 RDX: ffff88801eb28000 RSI: ffffffff814c03b7 RDI: 0000000000000001 RBP: ffff8881443b7190 R08: 0000000000000001 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000001 R12: 0000000000000003 R13: ffff88802a77cb18 R14: 0000000000000003 R15: ffff888018262500 FS: 0000000000000000(0000) GS:ffff8880b9800000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000556a99c15a18 CR3: 0000000028c71000 CR4: 0000000000350ef0 Call Trace: usbnet_start_xmit+0xfe5/0x2190 drivers/net/usb/usbnet.c:1453 __netdev_start_xmit include/linux/netdevice.h:4918 [inline] netdev_start_xmit include/linux/netdevice.h:4932 [inline] xmit_one net/core/dev.c:3578 [inline] dev_hard_start_xmit+0x187/0x700 net/core/dev.c:3594 ... This bug is caused by the fact that usbnet trusts the bulk endpoint addresses its probe routine receives in the driver_info structure, and it does not check to see that these endpoints actually exist and have the expected type and directions. The fix is simply to add such a check. Reported-and-tested-by: syzbot+63ee658b9a100ffadbe2@syzkaller.appspotmail.com Closes: https://lore.kernel.org/linux-usb/000000000000a56e9105d0cec021@google.com/ Signed-off-by: Alan Stern CC: Oliver Neukum Link: https://lore.kernel.org/r/ea152b6d-44df-4f8a-95c6-4db51143dcc1@rowland.harvard.edu Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- drivers/net/usb/usbnet.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c index e4fbb4d86606..566aa01ad281 100644 --- a/drivers/net/usb/usbnet.c +++ b/drivers/net/usb/usbnet.c @@ -1771,6 +1771,10 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) } else if (!info->in || !info->out) status = usbnet_get_endpoints (dev, udev); else { + u8 ep_addrs[3] = { + info->in + USB_DIR_IN, info->out + USB_DIR_OUT, 0 + }; + dev->in = usb_rcvbulkpipe (xdev, info->in); dev->out = usb_sndbulkpipe (xdev, info->out); if (!(info->flags & FLAG_NO_SETINT)) @@ -1780,6 +1784,8 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) else status = 0; + if (status == 0 && !usb_check_bulk_endpoints(udev, ep_addrs)) + status = -EINVAL; } if (status >= 0 && dev->status) status = init_status (dev, udev); From 0ccfe21949bc9f706a86ee7351b74375c0745757 Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Thu, 15 Jun 2023 13:38:48 +0200 Subject: [PATCH 270/513] fs: Protect reconfiguration of sb read-write from racing writes commit c541dce86c537714b6761a79a969c1623dfa222b upstream. The reconfigure / remount code takes a lot of effort to protect filesystem's reconfiguration code from racing writes on remounting read-only. However during remounting read-only filesystem to read-write mode userspace writes can start immediately once we clear SB_RDONLY flag. This is inconvenient for example for ext4 because we need to do some writes to the filesystem (such as preparation of quota files) before we can take userspace writes so we are clearing SB_RDONLY flag before we are fully ready to accept userpace writes and syzbot has found a way to exploit this [1]. Also as far as I'm reading the code the filesystem remount code was protected from racing writes in the legacy mount path by the mount's MNT_READONLY flag so this is relatively new problem. It is actually fairly easy to protect remount read-write from racing writes using sb->s_readonly_remount flag so let's just do that instead of having to workaround these races in the filesystem code. [1] https://lore.kernel.org/all/00000000000006a0df05f6667499@google.com/T/ Signed-off-by: Jan Kara Message-Id: <20230615113848.8439-1-jack@suse.cz> Signed-off-by: Christian Brauner Signed-off-by: Greg Kroah-Hartman --- fs/super.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/fs/super.c b/fs/super.c index 297630540f43..048576b19af6 100644 --- a/fs/super.c +++ b/fs/super.c @@ -863,6 +863,7 @@ int reconfigure_super(struct fs_context *fc) struct super_block *sb = fc->root->d_sb; int retval; bool remount_ro = false; + bool remount_rw = false; bool force = fc->sb_flags & SB_FORCE; if (fc->sb_flags_mask & ~MS_RMT_MASK) @@ -880,7 +881,7 @@ int reconfigure_super(struct fs_context *fc) bdev_read_only(sb->s_bdev)) return -EACCES; #endif - + remount_rw = !(fc->sb_flags & SB_RDONLY) && sb_rdonly(sb); remount_ro = (fc->sb_flags & SB_RDONLY) && !sb_rdonly(sb); } @@ -910,6 +911,14 @@ int reconfigure_super(struct fs_context *fc) if (retval) return retval; } + } else if (remount_rw) { + /* + * We set s_readonly_remount here to protect filesystem's + * reconfigure code from writes from userspace until + * reconfigure finishes. + */ + sb->s_readonly_remount = 1; + smp_wmb(); } if (fc->ops->reconfigure) { From 596be6716bc517d3f73591fc532772f5dfcf09fc Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Tue, 13 Jun 2023 12:25:52 +0200 Subject: [PATCH 271/513] ext2: Drop fragment support commit 404615d7f1dcd4cca200e9a7a9df3a1dcae1dd62 upstream. Ext2 has fields in superblock reserved for subblock allocation support. However that never landed. Drop the many years dead code. Reported-by: syzbot+af5e10f73dbff48f70af@syzkaller.appspotmail.com Signed-off-by: Jan Kara Signed-off-by: Greg Kroah-Hartman --- fs/ext2/ext2.h | 12 ------------ fs/ext2/super.c | 23 ++++------------------- 2 files changed, 4 insertions(+), 31 deletions(-) diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h index a610c096f3a9..5207ce805a39 100644 --- a/fs/ext2/ext2.h +++ b/fs/ext2/ext2.h @@ -70,10 +70,7 @@ struct mb_cache; * second extended-fs super-block data in memory */ struct ext2_sb_info { - unsigned long s_frag_size; /* Size of a fragment in bytes */ - unsigned long s_frags_per_block;/* Number of fragments per block */ unsigned long s_inodes_per_block;/* Number of inodes per block */ - unsigned long s_frags_per_group;/* Number of fragments in a group */ unsigned long s_blocks_per_group;/* Number of blocks in a group */ unsigned long s_inodes_per_group;/* Number of inodes in a group */ unsigned long s_itb_per_group; /* Number of inode table blocks per group */ @@ -187,15 +184,6 @@ static inline struct ext2_sb_info *EXT2_SB(struct super_block *sb) #define EXT2_INODE_SIZE(s) (EXT2_SB(s)->s_inode_size) #define EXT2_FIRST_INO(s) (EXT2_SB(s)->s_first_ino) -/* - * Macro-instructions used to manage fragments - */ -#define EXT2_MIN_FRAG_SIZE 1024 -#define EXT2_MAX_FRAG_SIZE 4096 -#define EXT2_MIN_FRAG_LOG_SIZE 10 -#define EXT2_FRAG_SIZE(s) (EXT2_SB(s)->s_frag_size) -#define EXT2_FRAGS_PER_BLOCK(s) (EXT2_SB(s)->s_frags_per_block) - /* * Structure of a blocks group descriptor */ diff --git a/fs/ext2/super.c b/fs/ext2/super.c index 486a43e34795..81798b7cbde2 100644 --- a/fs/ext2/super.c +++ b/fs/ext2/super.c @@ -670,10 +670,9 @@ static int ext2_setup_super (struct super_block * sb, es->s_max_mnt_count = cpu_to_le16(EXT2_DFL_MAX_MNT_COUNT); le16_add_cpu(&es->s_mnt_count, 1); if (test_opt (sb, DEBUG)) - ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, fs=%lu, gc=%lu, " + ext2_msg(sb, KERN_INFO, "%s, %s, bs=%lu, gc=%lu, " "bpg=%lu, ipg=%lu, mo=%04lx]", EXT2FS_VERSION, EXT2FS_DATE, sb->s_blocksize, - sbi->s_frag_size, sbi->s_groups_count, EXT2_BLOCKS_PER_GROUP(sb), EXT2_INODES_PER_GROUP(sb), @@ -1012,14 +1011,7 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) } } - sbi->s_frag_size = EXT2_MIN_FRAG_SIZE << - le32_to_cpu(es->s_log_frag_size); - if (sbi->s_frag_size == 0) - goto cantfind_ext2; - sbi->s_frags_per_block = sb->s_blocksize / sbi->s_frag_size; - sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group); - sbi->s_frags_per_group = le32_to_cpu(es->s_frags_per_group); sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group); sbi->s_inodes_per_block = sb->s_blocksize / EXT2_INODE_SIZE(sb); @@ -1045,11 +1037,10 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) goto failed_mount; } - if (sb->s_blocksize != sbi->s_frag_size) { + if (es->s_log_frag_size != es->s_log_block_size) { ext2_msg(sb, KERN_ERR, - "error: fragsize %lu != blocksize %lu" - "(not supported yet)", - sbi->s_frag_size, sb->s_blocksize); + "error: fragsize log %u != blocksize log %u", + le32_to_cpu(es->s_log_frag_size), sb->s_blocksize_bits); goto failed_mount; } @@ -1059,12 +1050,6 @@ static int ext2_fill_super(struct super_block *sb, void *data, int silent) sbi->s_blocks_per_group); goto failed_mount; } - if (sbi->s_frags_per_group > sb->s_blocksize * 8) { - ext2_msg(sb, KERN_ERR, - "error: #fragments per group too big: %lu", - sbi->s_frags_per_group); - goto failed_mount; - } if (sbi->s_inodes_per_group < sbi->s_inodes_per_block || sbi->s_inodes_per_group > sb->s_blocksize * 8) { ext2_msg(sb, KERN_ERR, From f6807b62fb0e3da3ead8a91bdf130dbf79fa8414 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Sun, 25 Jun 2023 00:10:21 +0530 Subject: [PATCH 272/513] mtd: rawnand: omap_elm: Fix incorrect type in assignment [ Upstream commit d8403b9eeee66d5dd81ecb9445800b108c267ce3 ] Once the ECC word endianness is converted to BE32, we force cast it to u32 so we can use elm_write_reg() which in turn uses writel(). Fixes below sparse warnings: drivers/mtd/nand/raw/omap_elm.c:180:37: sparse: expected unsigned int [usertype] val drivers/mtd/nand/raw/omap_elm.c:180:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:185:37: sparse: expected unsigned int [usertype] val drivers/mtd/nand/raw/omap_elm.c:185:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:190:37: sparse: expected unsigned int [usertype] val drivers/mtd/nand/raw/omap_elm.c:190:37: sparse: got restricted __be32 [usertype] >> drivers/mtd/nand/raw/omap_elm.c:200:40: sparse: sparse: restricted __be32 degrades to integer drivers/mtd/nand/raw/omap_elm.c:206:39: sparse: sparse: restricted __be32 degrades to integer drivers/mtd/nand/raw/omap_elm.c:210:37: sparse: expected unsigned int [assigned] [usertype] val drivers/mtd/nand/raw/omap_elm.c:210:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:213:37: sparse: expected unsigned int [assigned] [usertype] val drivers/mtd/nand/raw/omap_elm.c:213:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:216:37: sparse: expected unsigned int [assigned] [usertype] val drivers/mtd/nand/raw/omap_elm.c:216:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:219:37: sparse: expected unsigned int [assigned] [usertype] val drivers/mtd/nand/raw/omap_elm.c:219:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:222:37: sparse: expected unsigned int [assigned] [usertype] val drivers/mtd/nand/raw/omap_elm.c:222:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:225:37: sparse: expected unsigned int [assigned] [usertype] val drivers/mtd/nand/raw/omap_elm.c:225:37: sparse: got restricted __be32 [usertype] drivers/mtd/nand/raw/omap_elm.c:228:39: sparse: sparse: restricted __be32 degrades to integer Fixes: bf22433575ef ("mtd: devices: elm: Add support for ELM error correction") Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-kbuild-all/202306212211.WDXokuWh-lkp@intel.com/ Signed-off-by: Roger Quadros Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/20230624184021.7740-1-rogerq@kernel.org Signed-off-by: Sasha Levin --- drivers/mtd/nand/raw/omap_elm.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c index 2b21ce04b3ec..1a48347be3fe 100644 --- a/drivers/mtd/nand/raw/omap_elm.c +++ b/drivers/mtd/nand/raw/omap_elm.c @@ -177,17 +177,17 @@ static void elm_load_syndrome(struct elm_info *info, switch (info->bch_type) { case BCH8_ECC: /* syndrome fragment 0 = ecc[9-12B] */ - val = cpu_to_be32(*(u32 *) &ecc[9]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[9]); elm_write_reg(info, offset, val); /* syndrome fragment 1 = ecc[5-8B] */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[5]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[5]); elm_write_reg(info, offset, val); /* syndrome fragment 2 = ecc[1-4B] */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[1]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[1]); elm_write_reg(info, offset, val); /* syndrome fragment 3 = ecc[0B] */ @@ -197,35 +197,35 @@ static void elm_load_syndrome(struct elm_info *info, break; case BCH4_ECC: /* syndrome fragment 0 = ecc[20-52b] bits */ - val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) | + val = ((__force u32)cpu_to_be32(*(u32 *)&ecc[3]) >> 4) | ((ecc[2] & 0xf) << 28); elm_write_reg(info, offset, val); /* syndrome fragment 1 = ecc[0-20b] bits */ offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12; + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 12; elm_write_reg(info, offset, val); break; case BCH16_ECC: - val = cpu_to_be32(*(u32 *) &ecc[22]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[22]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[18]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[18]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[14]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[14]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[10]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[10]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[6]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[6]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[2]); + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[2]); elm_write_reg(info, offset, val); offset += 4; - val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16; + val = (__force u32)cpu_to_be32(*(u32 *)&ecc[0]) >> 16; elm_write_reg(info, offset, val); break; default: From 1796b492f8cce7f37e50fd40231698700c7ac90c Mon Sep 17 00:00:00 2001 From: Johan Jonker Date: Fri, 14 Jul 2023 17:21:01 +0200 Subject: [PATCH 273/513] mtd: rawnand: rockchip: fix oobfree offset and description [ Upstream commit d0ca3b92b7a6f42841ea9da8492aaf649db79780 ] Rockchip boot blocks are written per 4 x 512 byte sectors per page. Each page with boot blocks must have a page address (PA) pointer in OOB to the next page. The currently advertised free OOB area starts at offset 6, like if 4 PA bytes were located right after the BBM. This is wrong as the PA bytes are located right before the ECC bytes. Fix the layout by allowing access to all bytes between the BBM and the PA bytes instead of reserving 4 bytes right after the BBM. This change breaks existing jffs2 users. Fixes: 058e0e847d54 ("mtd: rawnand: rockchip: NFC driver for RK3308, RK2928 and others") Signed-off-by: Johan Jonker Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/d202f12d-188c-20e8-f2c2-9cc874ad4d22@gmail.com Signed-off-by: Sasha Levin --- drivers/mtd/nand/raw/rockchip-nand-controller.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c index b5405bc7ca3a..45cdc6ca210c 100644 --- a/drivers/mtd/nand/raw/rockchip-nand-controller.c +++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c @@ -562,9 +562,10 @@ static int rk_nfc_write_page_raw(struct nand_chip *chip, const u8 *buf, * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3 * * The rk_nfc_ooblayout_free() function already has reserved - * these 4 bytes with: + * these 4 bytes together with 2 bytes for BBM + * by reducing it's length: * - * oob_region->offset = NFC_SYS_DATA_SIZE + 2; + * oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2; */ if (!i) memcpy(rk_nfc_oob_ptr(chip, i), @@ -935,12 +936,8 @@ static int rk_nfc_ooblayout_free(struct mtd_info *mtd, int section, if (section) return -ERANGE; - /* - * The beginning of the OOB area stores the reserved data for the NFC, - * the size of the reserved data is NFC_SYS_DATA_SIZE bytes. - */ oob_region->length = rknand->metadata_size - NFC_SYS_DATA_SIZE - 2; - oob_region->offset = NFC_SYS_DATA_SIZE + 2; + oob_region->offset = 2; return 0; } From 70643e98cbc39bc4120d4f35dbd79233dfbe83a3 Mon Sep 17 00:00:00 2001 From: Johan Jonker Date: Fri, 14 Jul 2023 17:21:21 +0200 Subject: [PATCH 274/513] mtd: rawnand: rockchip: Align hwecc vs. raw page helper layouts [ Upstream commit ea690ad78dd611e3906df5b948a516000b05c1cb ] Currently, read/write_page_hwecc() and read/write_page_raw() are not aligned: there is a mismatch in the OOB bytes which are not read/written at the same offset in both cases (raw vs. hwecc). This is a real problem when relying on the presence of the Page Addresses (PA) when using the NAND chip as a boot device, as the BootROM expects additional data in the OOB area at specific locations. Rockchip boot blocks are written per 4 x 512 byte sectors per page. Each page with boot blocks must have a page address (PA) pointer in OOB to the next page. Pages are written in a pattern depending on the NAND chip ID. Generate boot block page address and pattern for hwecc in user space and copy PA data to/from the already reserved last 4 bytes before ECC in the chip->oob_poi data layout. Align the different helpers. This change breaks existing jffs2 users. Fixes: 058e0e847d54 ("mtd: rawnand: rockchip: NFC driver for RK3308, RK2928 and others") Signed-off-by: Johan Jonker Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/5e782c08-862b-51ae-47ff-3299940928ca@gmail.com Signed-off-by: Sasha Levin --- .../mtd/nand/raw/rockchip-nand-controller.c | 34 ++++++++++++------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/drivers/mtd/nand/raw/rockchip-nand-controller.c b/drivers/mtd/nand/raw/rockchip-nand-controller.c index 45cdc6ca210c..99242bd68437 100644 --- a/drivers/mtd/nand/raw/rockchip-nand-controller.c +++ b/drivers/mtd/nand/raw/rockchip-nand-controller.c @@ -598,7 +598,7 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, int pages_per_blk = mtd->erasesize / mtd->writesize; int ret = 0, i, boot_rom_mode = 0; dma_addr_t dma_data, dma_oob; - u32 reg; + u32 tmp; u8 *oob; nand_prog_page_begin_op(chip, page, 0, NULL, 0); @@ -625,6 +625,13 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, * * 0xFF 0xFF 0xFF 0xFF | BBM OOB1 OOB2 OOB3 | ... * + * The code here just swaps the first 4 bytes with the last + * 4 bytes without losing any data. + * + * The chip->oob_poi data layout: + * + * BBM OOB1 OOB2 OOB3 |......| PA0 PA1 PA2 PA3 + * * Configure the ECC algorithm supported by the boot ROM. */ if ((page < (pages_per_blk * rknand->boot_blks)) && @@ -635,21 +642,17 @@ static int rk_nfc_write_page_hwecc(struct nand_chip *chip, const u8 *buf, } for (i = 0; i < ecc->steps; i++) { - if (!i) { - reg = 0xFFFFFFFF; - } else { + if (!i) + oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE; + else oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; - reg = oob[0] | oob[1] << 8 | oob[2] << 16 | - oob[3] << 24; - } - if (!i && boot_rom_mode) - reg = (page & (pages_per_blk - 1)) * 4; + tmp = oob[0] | oob[1] << 8 | oob[2] << 16 | oob[3] << 24; if (nfc->cfg->type == NFC_V9) - nfc->oob_buf[i] = reg; + nfc->oob_buf[i] = tmp; else - nfc->oob_buf[i * (oob_step / 4)] = reg; + nfc->oob_buf[i * (oob_step / 4)] = tmp; } dma_data = dma_map_single(nfc->dev, (void *)nfc->page_buf, @@ -812,12 +815,17 @@ static int rk_nfc_read_page_hwecc(struct nand_chip *chip, u8 *buf, int oob_on, goto timeout_err; } - for (i = 1; i < ecc->steps; i++) { - oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; + for (i = 0; i < ecc->steps; i++) { + if (!i) + oob = chip->oob_poi + (ecc->steps - 1) * NFC_SYS_DATA_SIZE; + else + oob = chip->oob_poi + (i - 1) * NFC_SYS_DATA_SIZE; + if (nfc->cfg->type == NFC_V9) tmp = nfc->oob_buf[i]; else tmp = nfc->oob_buf[i * (oob_step / 4)]; + *oob++ = (u8)tmp; *oob++ = (u8)(tmp >> 8); *oob++ = (u8)(tmp >> 16); From eb7a5e4d14c8659cb97db6863316280e15f67209 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 19 Jul 2023 23:55:01 +0200 Subject: [PATCH 275/513] mtd: rawnand: fsl_upm: Fix an off-by one test in fun_exec_op() [ Upstream commit c6abce60338aa2080973cd95be0aedad528bb41f ] 'op-cs' is copied in 'fun->mchip_number' which is used to access the 'mchip_offsets' and the 'rnb_gpio' arrays. These arrays have NAND_MAX_CHIPS elements, so the index must be below this limit. Fix the sanity check in order to avoid the NAND_MAX_CHIPS value. This would lead to out-of-bound accesses. Fixes: 54309d657767 ("mtd: rawnand: fsl_upm: Implement exec_op()") Signed-off-by: Christophe JAILLET Reviewed-by: Dan Carpenter Signed-off-by: Miquel Raynal Link: https://lore.kernel.org/linux-mtd/cd01cba1c7eda58bdabaae174c78c067325803d2.1689803636.git.christophe.jaillet@wanadoo.fr Signed-off-by: Sasha Levin --- drivers/mtd/nand/raw/fsl_upm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/nand/raw/fsl_upm.c b/drivers/mtd/nand/raw/fsl_upm.c index b3cc427100a2..636e65328bb3 100644 --- a/drivers/mtd/nand/raw/fsl_upm.c +++ b/drivers/mtd/nand/raw/fsl_upm.c @@ -135,7 +135,7 @@ static int fun_exec_op(struct nand_chip *chip, const struct nand_operation *op, unsigned int i; int ret; - if (op->cs > NAND_MAX_CHIPS) + if (op->cs >= NAND_MAX_CHIPS) return -EINVAL; if (check_only) From 5058c14440401f27cbe6519f6899f2204e60db3e Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Mon, 24 Jul 2023 23:43:20 +0530 Subject: [PATCH 276/513] powerpc/mm/altmap: Fix altmap boundary check [ Upstream commit 6722b25712054c0f903b839b8f5088438dd04df3 ] altmap->free includes the entire free space from which altmap blocks can be allocated. So when checking whether the kernel is doing altmap block free, compute the boundary correctly, otherwise memory hotunplug can fail. Fixes: 9ef34630a461 ("powerpc/mm: Fallback to RAM if the altmap is unusable") Signed-off-by: "Aneesh Kumar K.V" Reviewed-by: David Hildenbrand Signed-off-by: Michael Ellerman Link: https://msgid.link/20230724181320.471386-1-aneesh.kumar@linux.ibm.com Signed-off-by: Sasha Levin --- arch/powerpc/mm/init_64.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index b76cd49d521b..db040f34c004 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -313,8 +313,7 @@ void __ref vmemmap_free(unsigned long start, unsigned long end, start = ALIGN_DOWN(start, page_size); if (altmap) { alt_start = altmap->base_pfn; - alt_end = altmap->base_pfn + altmap->reserve + - altmap->free + altmap->alloc + altmap->align; + alt_end = altmap->base_pfn + altmap->reserve + altmap->free; } pr_debug("vmemmap_free %lx...%lx\n", start, end); From 0f1f471b91f41838d06e5a9f9592b6e15ee9c006 Mon Sep 17 00:00:00 2001 From: Alexander Stein Date: Mon, 15 May 2023 09:21:37 +0200 Subject: [PATCH 277/513] drm/imx/ipuv3: Fix front porch adjustment upon hactive aligning [ Upstream commit ee31742bf17636da1304af77b2cb1c29b5dda642 ] When hactive is not aligned to 8 pixels, it is aligned accordingly and hfront porch needs to be reduced the same amount. Unfortunately the front porch is set to the difference rather than reducing it. There are some Samsung TVs which can't cope with a front porch of instead of 70. Fixes: 94dfec48fca7 ("drm/imx: Add 8 pixel alignment fix") Signed-off-by: Alexander Stein Reviewed-by: Philipp Zabel Link: https://lore.kernel.org/r/20230515072137.116211-1-alexander.stein@ew.tq-group.com [p.zabel@pengutronix.de: Fixed subject] Signed-off-by: Philipp Zabel Link: https://patchwork.freedesktop.org/patch/msgid/20230515072137.116211-1-alexander.stein@ew.tq-group.com Signed-off-by: Sasha Levin --- drivers/gpu/drm/imx/ipuv3-crtc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c index f7863d6dea80..ba5b16618c23 100644 --- a/drivers/gpu/drm/imx/ipuv3-crtc.c +++ b/drivers/gpu/drm/imx/ipuv3-crtc.c @@ -311,7 +311,7 @@ static void ipu_crtc_mode_set_nofb(struct drm_crtc *crtc) dev_warn(ipu_crtc->dev, "8-pixel align hactive %d -> %d\n", sig_cfg.mode.hactive, new_hactive); - sig_cfg.mode.hfront_porch = new_hactive - sig_cfg.mode.hactive; + sig_cfg.mode.hfront_porch -= new_hactive - sig_cfg.mode.hactive; sig_cfg.mode.hactive = new_hactive; } From 1cdb50faf7f71e05a0aeed6658d21f0cf150f0e9 Mon Sep 17 00:00:00 2001 From: Michael Jeanson Date: Tue, 14 Jun 2022 11:48:30 -0400 Subject: [PATCH 278/513] selftests/rseq: check if libc rseq support is registered [ Upstream commit d1a997ba4c1bf65497d956aea90de42a6398f73a ] When checking for libc rseq support in the library constructor, don't only depend on the symbols presence, check that the registration was completed. This targets a scenario where the libc has rseq support but it is not wired for the current architecture in 'bits/rseq.h', we want to fallback to our internal registration mechanism. Signed-off-by: Michael Jeanson Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Mathieu Desnoyers Link: https://lore.kernel.org/r/20220614154830.1367382-4-mjeanson@efficios.com Stable-dep-of: 3bcbc20942db ("selftests/rseq: Play nice with binaries statically linked against glibc 2.35+") Signed-off-by: Sasha Levin --- tools/testing/selftests/rseq/rseq.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c index 986b9458efb2..4177f9507bbe 100644 --- a/tools/testing/selftests/rseq/rseq.c +++ b/tools/testing/selftests/rseq/rseq.c @@ -111,7 +111,8 @@ void rseq_init(void) libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset"); libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size"); libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags"); - if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p) { + if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p && + *libc_rseq_size_p != 0) { /* rseq registration owned by glibc */ rseq_offset = *libc_rseq_offset_p; rseq_size = *libc_rseq_size_p; From c91c07ae084950622b5b3204ba1e0499d8a176d3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 21 Jul 2023 15:33:52 -0700 Subject: [PATCH 279/513] selftests/rseq: Play nice with binaries statically linked against glibc 2.35+ [ Upstream commit 3bcbc20942db5d738221cca31a928efc09827069 ] To allow running rseq and KVM's rseq selftests as statically linked binaries, initialize the various "trampoline" pointers to point directly at the expect glibc symbols, and skip the dlysm() lookups if the rseq size is non-zero, i.e. the binary is statically linked *and* the libc registered its own rseq. Define weak versions of the symbols so as not to break linking against libc versions that don't support rseq in any capacity. The KVM selftests in particular are often statically linked so that they can be run on targets with very limited runtime environments, i.e. test machines. Fixes: 233e667e1ae3 ("selftests/rseq: Uplift rseq selftests for compatibility with glibc-2.35") Cc: Aaron Lewis Cc: kvm@vger.kernel.org Cc: stable@vger.kernel.org Signed-off-by: Sean Christopherson Message-Id: <20230721223352.2333911-1-seanjc@google.com> Signed-off-by: Paolo Bonzini Signed-off-by: Sasha Levin --- tools/testing/selftests/rseq/rseq.c | 28 ++++++++++++++++++++++------ 1 file changed, 22 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c index 4177f9507bbe..b736a5169aad 100644 --- a/tools/testing/selftests/rseq/rseq.c +++ b/tools/testing/selftests/rseq/rseq.c @@ -32,9 +32,17 @@ #include "../kselftest.h" #include "rseq.h" -static const ptrdiff_t *libc_rseq_offset_p; -static const unsigned int *libc_rseq_size_p; -static const unsigned int *libc_rseq_flags_p; +/* + * Define weak versions to play nice with binaries that are statically linked + * against a libc that doesn't support registering its own rseq. + */ +__weak ptrdiff_t __rseq_offset; +__weak unsigned int __rseq_size; +__weak unsigned int __rseq_flags; + +static const ptrdiff_t *libc_rseq_offset_p = &__rseq_offset; +static const unsigned int *libc_rseq_size_p = &__rseq_size; +static const unsigned int *libc_rseq_flags_p = &__rseq_flags; /* Offset from the thread pointer to the rseq area. */ ptrdiff_t rseq_offset; @@ -108,9 +116,17 @@ int rseq_unregister_current_thread(void) static __attribute__((constructor)) void rseq_init(void) { - libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset"); - libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size"); - libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags"); + /* + * If the libc's registered rseq size isn't already valid, it may be + * because the binary is dynamically linked and not necessarily due to + * libc not having registered a restartable sequence. Try to find the + * symbols if that's the case. + */ + if (!*libc_rseq_size_p) { + libc_rseq_offset_p = dlsym(RTLD_NEXT, "__rseq_offset"); + libc_rseq_size_p = dlsym(RTLD_NEXT, "__rseq_size"); + libc_rseq_flags_p = dlsym(RTLD_NEXT, "__rseq_flags"); + } if (libc_rseq_size_p && libc_rseq_offset_p && libc_rseq_flags_p && *libc_rseq_size_p != 0) { /* rseq registration owned by glibc */ From 7996facaf0ee3829386aa388183f6fffab95e1af Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Wed, 20 Apr 2022 10:32:41 +0800 Subject: [PATCH 280/513] soundwire: bus: pm_runtime_request_resume on peripheral attachment [ Upstream commit e557bca49b812908f380c56b5b4b2f273848b676 ] In typical use cases, the peripheral becomes pm_runtime active as a result of the ALSA/ASoC framework starting up a DAI. The parent/child hierarchy guarantees that the manager device will be fully resumed beforehand. There is however a corner case where the manager device may become pm_runtime active, but without ALSA/ASoC requesting any functionality from the peripherals. In this case, the hardware peripheral device will report as ATTACHED and its initialization routine will be executed. If this initialization routine initiates any sort of deferred processing, there is a possibility that the manager could suspend without the peripheral suspend sequence being invoked: from the pm_runtime framework perspective, the peripheral is *already* suspended. To avoid such disconnects between hardware state and pm_runtime state, this patch adds an asynchronous pm_request_resume() upon successful attach/initialization which will result in the proper resume/suspend sequence to be followed on the peripheral side. BugLink: https://github.com/thesofproject/linux/issues/3459 Signed-off-by: Pierre-Louis Bossart Reviewed-by: Ranjani Sridharan Reviewed-by: Rander Wang Signed-off-by: Bard Liao Link: https://lore.kernel.org/r/20220420023241.14335-4-yung-chuan.liao@linux.intel.com Signed-off-by: Vinod Koul Stable-dep-of: c40d6b3249b1 ("soundwire: fix enumeration completion") Signed-off-by: Sasha Levin --- drivers/soundwire/bus.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index b7cdfa65157c..cc4cca0325b9 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -1841,6 +1841,18 @@ int sdw_handle_slave_status(struct sdw_bus *bus, __func__, slave->dev_num); complete(&slave->initialization_complete); + + /* + * If the manager became pm_runtime active, the peripherals will be + * restarted and attach, but their pm_runtime status may remain + * suspended. If the 'update_slave_status' callback initiates + * any sort of deferred processing, this processing would not be + * cancelled on pm_runtime suspend. + * To avoid such zombie states, we queue a request to resume. + * This would be a no-op in case the peripheral was being resumed + * by e.g. the ALSA/ASoC framework. + */ + pm_request_resume(&slave->dev); } } From a36b522767f3a72688893a472e80c9aa03e67eda Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 5 Jul 2023 14:30:11 +0200 Subject: [PATCH 281/513] soundwire: fix enumeration completion [ Upstream commit c40d6b3249b11d60e09d81530588f56233d9aa44 ] The soundwire subsystem uses two completion structures that allow drivers to wait for soundwire device to become enumerated on the bus and initialised by their drivers, respectively. The code implementing the signalling is currently broken as it does not signal all current and future waiters and also uses the wrong reinitialisation function, which can potentially lead to memory corruption if there are still waiters on the queue. Not signalling future waiters specifically breaks sound card probe deferrals as codec drivers can not tell that the soundwire device is already attached when being reprobed. Some codec runtime PM implementations suffer from similar problems as waiting for enumeration during resume can also timeout despite the device already having been enumerated. Fixes: fb9469e54fa7 ("soundwire: bus: fix race condition with enumeration_complete signaling") Fixes: a90def068127 ("soundwire: bus: fix race condition with initialization_complete signaling") Cc: stable@vger.kernel.org # 5.7 Cc: Pierre-Louis Bossart Cc: Rander Wang Signed-off-by: Johan Hovold Reviewed-by: Pierre-Louis Bossart Link: https://lore.kernel.org/r/20230705123018.30903-2-johan+linaro@kernel.org Signed-off-by: Vinod Koul Signed-off-by: Sasha Levin --- drivers/soundwire/bus.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/soundwire/bus.c b/drivers/soundwire/bus.c index cc4cca0325b9..230a3250f315 100644 --- a/drivers/soundwire/bus.c +++ b/drivers/soundwire/bus.c @@ -828,8 +828,8 @@ static void sdw_modify_slave_status(struct sdw_slave *slave, "%s: initializing enumeration and init completion for Slave %d\n", __func__, slave->dev_num); - init_completion(&slave->enumeration_complete); - init_completion(&slave->initialization_complete); + reinit_completion(&slave->enumeration_complete); + reinit_completion(&slave->initialization_complete); } else if ((status == SDW_SLAVE_ATTACHED) && (slave->status == SDW_SLAVE_UNATTACHED)) { @@ -837,7 +837,7 @@ static void sdw_modify_slave_status(struct sdw_slave *slave, "%s: signaling enumeration completion for Slave %d\n", __func__, slave->dev_num); - complete(&slave->enumeration_complete); + complete_all(&slave->enumeration_complete); } slave->status = status; mutex_unlock(&bus->bus_lock); @@ -1840,7 +1840,7 @@ int sdw_handle_slave_status(struct sdw_bus *bus, "%s: signaling initialization completion for Slave %d\n", __func__, slave->dev_num); - complete(&slave->initialization_complete); + complete_all(&slave->initialization_complete); /* * If the manager became pm_runtime active, the peripherals will be From b5d3a4251bd2af87cef636ee491f1bda6988981d Mon Sep 17 00:00:00 2001 From: Chunfeng Yun Date: Mon, 25 Oct 2021 15:01:53 +0800 Subject: [PATCH 282/513] PM / wakeirq: support enabling wake-up irq after runtime_suspend called [ Upstream commit 259714100d98b50bf04d36a21bf50ca8b829fc11 ] When the dedicated wake IRQ is level trigger, and it uses the device's low-power status as the wakeup source, that means if the device is not in low-power state, the wake IRQ will be triggered if enabled; For this case, need enable the wake IRQ after running the device's ->runtime_suspend() which make it enter low-power state. e.g. Assume the wake IRQ is a low level trigger type, and the wakeup signal comes from the low-power status of the device. The wakeup signal is low level at running time (0), and becomes high level when the device enters low-power state (runtime_suspend (1) is called), a wakeup event at (2) make the device exit low-power state, then the wakeup signal also becomes low level. ------------------ | ^ ^| ---------------- | | -------------- |<---(0)--->|<--(1)--| (3) (2) (4) if enable the wake IRQ before running runtime_suspend during (0), a wake IRQ will arise, it causes resume immediately; it works if enable wake IRQ ( e.g. at (3) or (4)) after running ->runtime_suspend(). This patch introduces a new status WAKE_IRQ_DEDICATED_REVERSE to optionally support enabling wake IRQ after running ->runtime_suspend(). Suggested-by: Rafael J. Wysocki Signed-off-by: Chunfeng Yun Signed-off-by: Rafael J. Wysocki Stable-dep-of: 8527beb12087 ("PM: sleep: wakeirq: fix wake irq arming") Signed-off-by: Sasha Levin --- drivers/base/power/power.h | 7 ++- drivers/base/power/runtime.c | 6 ++- drivers/base/power/wakeirq.c | 101 +++++++++++++++++++++++++++-------- include/linux/pm_wakeirq.h | 9 +++- 4 files changed, 96 insertions(+), 27 deletions(-) diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 54292cdd7808..0eb7f02b3ad5 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -25,8 +25,10 @@ extern u64 pm_runtime_active_time(struct device *dev); #define WAKE_IRQ_DEDICATED_ALLOCATED BIT(0) #define WAKE_IRQ_DEDICATED_MANAGED BIT(1) +#define WAKE_IRQ_DEDICATED_REVERSE BIT(2) #define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \ - WAKE_IRQ_DEDICATED_MANAGED) + WAKE_IRQ_DEDICATED_MANAGED | \ + WAKE_IRQ_DEDICATED_REVERSE) struct wake_irq { struct device *dev; @@ -39,7 +41,8 @@ extern void dev_pm_arm_wake_irq(struct wake_irq *wirq); extern void dev_pm_disarm_wake_irq(struct wake_irq *wirq); extern void dev_pm_enable_wake_irq_check(struct device *dev, bool can_change_status); -extern void dev_pm_disable_wake_irq_check(struct device *dev); +extern void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable); +extern void dev_pm_enable_wake_irq_complete(struct device *dev); #ifdef CONFIG_PM_SLEEP diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c index c1142a7a4fe6..5824d41a0b74 100644 --- a/drivers/base/power/runtime.c +++ b/drivers/base/power/runtime.c @@ -673,6 +673,8 @@ static int rpm_suspend(struct device *dev, int rpmflags) if (retval) goto fail; + dev_pm_enable_wake_irq_complete(dev); + no_callback: __update_runtime_status(dev, RPM_SUSPENDED); pm_runtime_deactivate_timer(dev); @@ -718,7 +720,7 @@ static int rpm_suspend(struct device *dev, int rpmflags) return retval; fail: - dev_pm_disable_wake_irq_check(dev); + dev_pm_disable_wake_irq_check(dev, true); __update_runtime_status(dev, RPM_ACTIVE); dev->power.deferred_resume = false; wake_up_all(&dev->power.wait_queue); @@ -901,7 +903,7 @@ static int rpm_resume(struct device *dev, int rpmflags) callback = RPM_GET_CALLBACK(dev, runtime_resume); - dev_pm_disable_wake_irq_check(dev); + dev_pm_disable_wake_irq_check(dev, false); retval = rpm_callback(callback, dev); if (retval) { __update_runtime_status(dev, RPM_SUSPENDED); diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index b91a3a9bf9f6..0004db4a9d3b 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -142,24 +142,7 @@ static irqreturn_t handle_threaded_wake_irq(int irq, void *_wirq) return IRQ_HANDLED; } -/** - * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt - * @dev: Device entry - * @irq: Device wake-up interrupt - * - * Unless your hardware has separate wake-up interrupts in addition - * to the device IO interrupts, you don't need this. - * - * Sets up a threaded interrupt handler for a device that has - * a dedicated wake-up interrupt in addition to the device IO - * interrupt. - * - * The interrupt starts disabled, and needs to be managed for - * the device by the bus code or the device driver using - * dev_pm_enable_wake_irq() and dev_pm_disable_wake_irq() - * functions. - */ -int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) +static int __dev_pm_set_dedicated_wake_irq(struct device *dev, int irq, unsigned int flag) { struct wake_irq *wirq; int err; @@ -197,7 +180,7 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) if (err) goto err_free_irq; - wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED; + wirq->status = WAKE_IRQ_DEDICATED_ALLOCATED | flag; return err; @@ -210,8 +193,57 @@ int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) return err; } + + +/** + * dev_pm_set_dedicated_wake_irq - Request a dedicated wake-up interrupt + * @dev: Device entry + * @irq: Device wake-up interrupt + * + * Unless your hardware has separate wake-up interrupts in addition + * to the device IO interrupts, you don't need this. + * + * Sets up a threaded interrupt handler for a device that has + * a dedicated wake-up interrupt in addition to the device IO + * interrupt. + * + * The interrupt starts disabled, and needs to be managed for + * the device by the bus code or the device driver using + * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() + * functions. + */ +int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) +{ + return __dev_pm_set_dedicated_wake_irq(dev, irq, 0); +} EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq); +/** + * dev_pm_set_dedicated_wake_irq_reverse - Request a dedicated wake-up interrupt + * with reverse enable ordering + * @dev: Device entry + * @irq: Device wake-up interrupt + * + * Unless your hardware has separate wake-up interrupts in addition + * to the device IO interrupts, you don't need this. + * + * Sets up a threaded interrupt handler for a device that has a dedicated + * wake-up interrupt in addition to the device IO interrupt. It sets + * the status of WAKE_IRQ_DEDICATED_REVERSE to tell rpm_suspend() + * to enable dedicated wake-up interrupt after running the runtime suspend + * callback for @dev. + * + * The interrupt starts disabled, and needs to be managed for + * the device by the bus code or the device driver using + * dev_pm_enable_wake_irq*() and dev_pm_disable_wake_irq*() + * functions. + */ +int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq) +{ + return __dev_pm_set_dedicated_wake_irq(dev, irq, WAKE_IRQ_DEDICATED_REVERSE); +} +EXPORT_SYMBOL_GPL(dev_pm_set_dedicated_wake_irq_reverse); + /** * dev_pm_enable_wake_irq - Enable device wake-up interrupt * @dev: Device @@ -282,27 +314,54 @@ void dev_pm_enable_wake_irq_check(struct device *dev, return; enable: - enable_irq(wirq->irq); + if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) + enable_irq(wirq->irq); } /** * dev_pm_disable_wake_irq_check - Checks and disables wake-up interrupt * @dev: Device + * @cond_disable: if set, also check WAKE_IRQ_DEDICATED_REVERSE * * Disables wake-up interrupt conditionally based on status. * Should be only called from rpm_suspend() and rpm_resume() path. */ -void dev_pm_disable_wake_irq_check(struct device *dev) +void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable) { struct wake_irq *wirq = dev->power.wakeirq; if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK)) return; + if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) + return; + if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) disable_irq_nosync(wirq->irq); } +/** + * dev_pm_enable_wake_irq_complete - enable wake IRQ not enabled before + * @dev: Device using the wake IRQ + * + * Enable wake IRQ conditionally based on status, mainly used if want to + * enable wake IRQ after running ->runtime_suspend() which depends on + * WAKE_IRQ_DEDICATED_REVERSE. + * + * Should be only called from rpm_suspend() path. + */ +void dev_pm_enable_wake_irq_complete(struct device *dev) +{ + struct wake_irq *wirq = dev->power.wakeirq; + + if (!wirq || !(wirq->status & WAKE_IRQ_DEDICATED_MASK)) + return; + + if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED && + wirq->status & WAKE_IRQ_DEDICATED_REVERSE) + enable_irq(wirq->irq); +} + /** * dev_pm_arm_wake_irq - Arm device wake-up * @wirq: Device wake-up interrupt diff --git a/include/linux/pm_wakeirq.h b/include/linux/pm_wakeirq.h index cd5b62db9084..e63a63aa47a3 100644 --- a/include/linux/pm_wakeirq.h +++ b/include/linux/pm_wakeirq.h @@ -17,8 +17,8 @@ #ifdef CONFIG_PM extern int dev_pm_set_wake_irq(struct device *dev, int irq); -extern int dev_pm_set_dedicated_wake_irq(struct device *dev, - int irq); +extern int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq); +extern int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq); extern void dev_pm_clear_wake_irq(struct device *dev); extern void dev_pm_enable_wake_irq(struct device *dev); extern void dev_pm_disable_wake_irq(struct device *dev); @@ -35,6 +35,11 @@ static inline int dev_pm_set_dedicated_wake_irq(struct device *dev, int irq) return 0; } +static inline int dev_pm_set_dedicated_wake_irq_reverse(struct device *dev, int irq) +{ + return 0; +} + static inline void dev_pm_clear_wake_irq(struct device *dev) { } From aeb4db8ab7f1c9c9196be62fb4a0afa2cd0ddf44 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Thu, 13 Jul 2023 16:57:39 +0200 Subject: [PATCH 283/513] PM: sleep: wakeirq: fix wake irq arming [ Upstream commit 8527beb12087238d4387607597b4020bc393c4b4 ] The decision whether to enable a wake irq during suspend can not be done based on the runtime PM state directly as a driver may use wake irqs without implementing runtime PM. Such drivers specifically leave the state set to the default 'suspended' and the wake irq is thus never enabled at suspend. Add a new wake irq flag to track whether a dedicated wake irq has been enabled at runtime suspend and therefore must not be enabled at system suspend. Note that pm_runtime_enabled() can not be used as runtime PM is always disabled during late suspend. Fixes: 69728051f5bf ("PM / wakeirq: Fix unbalanced IRQ enable for wakeirq") Cc: 4.16+ # 4.16+ Signed-off-by: Johan Hovold Reviewed-by: Tony Lindgren Tested-by: Tony Lindgren Signed-off-by: Rafael J. Wysocki Signed-off-by: Sasha Levin --- drivers/base/power/power.h | 1 + drivers/base/power/wakeirq.c | 12 ++++++++---- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/drivers/base/power/power.h b/drivers/base/power/power.h index 0eb7f02b3ad5..922ed457db19 100644 --- a/drivers/base/power/power.h +++ b/drivers/base/power/power.h @@ -29,6 +29,7 @@ extern u64 pm_runtime_active_time(struct device *dev); #define WAKE_IRQ_DEDICATED_MASK (WAKE_IRQ_DEDICATED_ALLOCATED | \ WAKE_IRQ_DEDICATED_MANAGED | \ WAKE_IRQ_DEDICATED_REVERSE) +#define WAKE_IRQ_DEDICATED_ENABLED BIT(3) struct wake_irq { struct device *dev; diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c index 0004db4a9d3b..6f2cdd8643af 100644 --- a/drivers/base/power/wakeirq.c +++ b/drivers/base/power/wakeirq.c @@ -314,8 +314,10 @@ void dev_pm_enable_wake_irq_check(struct device *dev, return; enable: - if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) + if (!can_change_status || !(wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) { enable_irq(wirq->irq); + wirq->status |= WAKE_IRQ_DEDICATED_ENABLED; + } } /** @@ -336,8 +338,10 @@ void dev_pm_disable_wake_irq_check(struct device *dev, bool cond_disable) if (cond_disable && (wirq->status & WAKE_IRQ_DEDICATED_REVERSE)) return; - if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) + if (wirq->status & WAKE_IRQ_DEDICATED_MANAGED) { + wirq->status &= ~WAKE_IRQ_DEDICATED_ENABLED; disable_irq_nosync(wirq->irq); + } } /** @@ -376,7 +380,7 @@ void dev_pm_arm_wake_irq(struct wake_irq *wirq) if (device_may_wakeup(wirq->dev)) { if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && - !pm_runtime_status_suspended(wirq->dev)) + !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED)) enable_irq(wirq->irq); enable_irq_wake(wirq->irq); @@ -399,7 +403,7 @@ void dev_pm_disarm_wake_irq(struct wake_irq *wirq) disable_irq_wake(wirq->irq); if (wirq->status & WAKE_IRQ_DEDICATED_ALLOCATED && - !pm_runtime_status_suspended(wirq->dev)) + !(wirq->status & WAKE_IRQ_DEDICATED_ENABLED)) disable_irq_nosync(wirq->irq); } } From 24c4de4069cbce796a1c71166240807d617cd652 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Fri, 11 Aug 2023 15:14:00 +0200 Subject: [PATCH 284/513] Linux 5.15.126 Link: https://lore.kernel.org/r/20230809103633.485906560@linuxfoundation.org Tested-by: SeongJae Park Tested-by: Harshit Mogalapalli Tested-by: Ron Economos Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a90f955e14ab..42993220a57a 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 15 -SUBLEVEL = 125 +SUBLEVEL = 126 EXTRAVERSION = NAME = Trick or Treat From 595679098bdcdbfbba91ebe07a2f7f208df93870 Mon Sep 17 00:00:00 2001 From: Long Li Date: Sat, 29 Jul 2023 11:36:18 +0800 Subject: [PATCH 285/513] ksmbd: validate command request size commit 5aa4fda5aa9c2a5a7bac67b4a12b089ab81fee3c upstream. In commit 2b9b8f3b68ed ("ksmbd: validate command payload size"), except for SMB2_OPLOCK_BREAK_HE command, the request size of other commands is not checked, it's not expected. Fix it by add check for request size of other commands. Cc: stable@vger.kernel.org Fixes: 2b9b8f3b68ed ("ksmbd: validate command payload size") Acked-by: Namjae Jeon Signed-off-by: Long Li Signed-off-by: Steve French Signed-off-by: Greg Kroah-Hartman --- fs/ksmbd/smb2misc.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c index c24674fc1904..8ef9503c4ab9 100644 --- a/fs/ksmbd/smb2misc.c +++ b/fs/ksmbd/smb2misc.c @@ -381,13 +381,13 @@ int ksmbd_smb2_check_message(struct ksmbd_work *work) } if (smb2_req_struct_sizes[command] != pdu->StructureSize2) { - if (command == SMB2_OPLOCK_BREAK_HE && - le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_20 && - le16_to_cpu(pdu->StructureSize2) != OP_BREAK_STRUCT_SIZE_21) { + if (!(command == SMB2_OPLOCK_BREAK_HE && + (le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_20 || + le16_to_cpu(pdu->StructureSize2) == OP_BREAK_STRUCT_SIZE_21))) { /* special case for SMB2.1 lease break message */ ksmbd_debug(SMB, - "Illegal request size %d for oplock break\n", - le16_to_cpu(pdu->StructureSize2)); + "Illegal request size %u for command %d\n", + le16_to_cpu(pdu->StructureSize2), command); return 1; } } From aeb974907642be095e38ecb1a400ca583958b2b0 Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Sun, 6 Aug 2023 08:44:17 +0900 Subject: [PATCH 286/513] ksmbd: fix wrong next length validation of ea buffer in smb2_set_ea() commit 79ed288cef201f1f212dfb934bcaac75572fb8f6 upstream. There are multiple smb2_ea_info buffers in FILE_FULL_EA_INFORMATION request from client. ksmbd find next smb2_ea_info using ->NextEntryOffset of current smb2_ea_info. ksmbd need to validate buffer length Before accessing the next ea. ksmbd should check buffer length using buf_len, not next variable. next is the start offset of current ea that got from previous ea. Cc: stable@vger.kernel.org Reported-by: zdi-disclosures@trendmicro.com # ZDI-CAN-21598 Signed-off-by: Namjae Jeon Signed-off-by: Steve French Signed-off-by: Greg Kroah-Hartman --- fs/ksmbd/smb2pdu.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c index 9f9d07caa57e..0fde3d12b346 100644 --- a/fs/ksmbd/smb2pdu.c +++ b/fs/ksmbd/smb2pdu.c @@ -2309,9 +2309,16 @@ static int smb2_set_ea(struct smb2_ea_info *eabuf, unsigned int buf_len, break; buf_len -= next; eabuf = (struct smb2_ea_info *)((char *)eabuf + next); - if (next < (u32)eabuf->EaNameLength + le16_to_cpu(eabuf->EaValueLength)) + if (buf_len < sizeof(struct smb2_ea_info)) { + rc = -EINVAL; break; + } + if (buf_len < sizeof(struct smb2_ea_info) + eabuf->EaNameLength + + le16_to_cpu(eabuf->EaValueLength)) { + rc = -EINVAL; + break; + } } while (next != 0); kfree(attr_name); From 3f00757ab41612d020eb48c787184bf05815897a Mon Sep 17 00:00:00 2001 From: "Jason A. Donenfeld" Date: Mon, 7 Aug 2023 15:21:27 +0200 Subject: [PATCH 287/513] wireguard: allowedips: expand maximum node depth commit 46622219aae2b67813fe31a7b8cb7da5baff5c8a upstream. In the allowedips self-test, nodes are inserted into the tree, but it generated an even amount of nodes, but for checking maximum node depth, there is of course the root node, which makes the total number necessarily odd. With two few nodes added, it never triggered the maximum depth check like it should have. So, add 129 nodes instead of 128 nodes, and do so with a more straightforward scheme, starting with all the bits set, and shifting over one each time. Then increase the maximum depth to 129, and choose a better name for that variable to make it clear that it represents depth as opposed to bits. Cc: stable@vger.kernel.org Fixes: e7096c131e51 ("net: WireGuard secure network tunnel") Signed-off-by: Jason A. Donenfeld Link: https://lore.kernel.org/r/20230807132146.2191597-2-Jason@zx2c4.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- drivers/net/wireguard/allowedips.c | 8 ++++---- drivers/net/wireguard/selftest/allowedips.c | 16 ++++++++++------ 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/drivers/net/wireguard/allowedips.c b/drivers/net/wireguard/allowedips.c index 5bf7822c53f1..0ba714ca5185 100644 --- a/drivers/net/wireguard/allowedips.c +++ b/drivers/net/wireguard/allowedips.c @@ -6,7 +6,7 @@ #include "allowedips.h" #include "peer.h" -enum { MAX_ALLOWEDIPS_BITS = 128 }; +enum { MAX_ALLOWEDIPS_DEPTH = 129 }; static struct kmem_cache *node_cache; @@ -42,7 +42,7 @@ static void push_rcu(struct allowedips_node **stack, struct allowedips_node __rcu *p, unsigned int *len) { if (rcu_access_pointer(p)) { - if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_BITS)) + if (WARN_ON(IS_ENABLED(DEBUG) && *len >= MAX_ALLOWEDIPS_DEPTH)) return; stack[(*len)++] = rcu_dereference_raw(p); } @@ -55,7 +55,7 @@ static void node_free_rcu(struct rcu_head *rcu) static void root_free_rcu(struct rcu_head *rcu) { - struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { + struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { container_of(rcu, struct allowedips_node, rcu) }; unsigned int len = 1; @@ -68,7 +68,7 @@ static void root_free_rcu(struct rcu_head *rcu) static void root_remove_peer_lists(struct allowedips_node *root) { - struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_BITS] = { root }; + struct allowedips_node *node, *stack[MAX_ALLOWEDIPS_DEPTH] = { root }; unsigned int len = 1; while (len > 0 && (node = stack[--len])) { diff --git a/drivers/net/wireguard/selftest/allowedips.c b/drivers/net/wireguard/selftest/allowedips.c index 41db10f9be49..2c9eec24eec4 100644 --- a/drivers/net/wireguard/selftest/allowedips.c +++ b/drivers/net/wireguard/selftest/allowedips.c @@ -593,16 +593,20 @@ bool __init wg_allowedips_selftest(void) wg_allowedips_remove_by_peer(&t, a, &mutex); test_negative(4, a, 192, 168, 0, 1); - /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_BITS) in free_node + /* These will hit the WARN_ON(len >= MAX_ALLOWEDIPS_DEPTH) in free_node * if something goes wrong. */ - for (i = 0; i < MAX_ALLOWEDIPS_BITS; ++i) { - part = cpu_to_be64(~(1LLU << (i % 64))); - memset(&ip, 0xff, 16); - memcpy((u8 *)&ip + (i < 64) * 8, &part, 8); + for (i = 0; i < 64; ++i) { + part = cpu_to_be64(~0LLU << i); + memset(&ip, 0xff, 8); + memcpy((u8 *)&ip + 8, &part, 8); + wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); + memcpy(&ip, &part, 8); + memset((u8 *)&ip + 8, 0, 8); wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); } - + memset(&ip, 0, 16); + wg_allowedips_insert_v6(&t, &ip, 128, a, &mutex); wg_allowedips_free(&t, &mutex); wg_allowedips_init(&t); From 6cde60777675193ce9aa966ae0ab933f3bdaa832 Mon Sep 17 00:00:00 2001 From: Sergei Antonov Date: Tue, 27 Jun 2023 15:05:49 +0300 Subject: [PATCH 288/513] mmc: moxart: read scr register without changing byte order commit d44263222134b5635932974c6177a5cba65a07e8 upstream. Conversion from big-endian to native is done in a common function mmc_app_send_scr(). Converting in moxart_transfer_pio() is extra. Double conversion on a LE system returns an incorrect SCR value, leads to errors: mmc0: unrecognised SCR structure version 8 Fixes: 1b66e94e6b99 ("mmc: moxart: Add MOXA ART SD/MMC driver") Signed-off-by: Sergei Antonov Cc: Jonas Jensen Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230627120549.2400325-1-saproj@gmail.com Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/moxart-mmc.c | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index 52ed30f2d9f4..94e9a08bc90e 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -338,13 +338,7 @@ static void moxart_transfer_pio(struct moxart_host *host) return; } for (len = 0; len < remain && len < host->fifo_width;) { - /* SCR data must be read in big endian. */ - if (data->mrq->cmd->opcode == SD_APP_SEND_SCR) - *sgp = ioread32be(host->base + - REG_DATA_WINDOW); - else - *sgp = ioread32(host->base + - REG_DATA_WINDOW); + *sgp = ioread32(host->base + REG_DATA_WINDOW); sgp++; len += 4; } From 3ca8f5c733c4691c4e75c25c27af2fb453e2a1a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maciej=20=C5=BBenczykowski?= Date: Mon, 7 Aug 2023 03:25:32 -0700 Subject: [PATCH 289/513] ipv6: adjust ndisc_is_useropt() to also return true for PIO MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 048c796beb6eb4fa3a5a647ee1c81f5c6f0f6a2a upstream. The upcoming (and nearly finalized): https://datatracker.ietf.org/doc/draft-collink-6man-pio-pflag/ will update the IPv6 RA to include a new flag in the PIO field, which will serve as a hint to perform DHCPv6-PD. As we don't want DHCPv6 related logic inside the kernel, this piece of information needs to be exposed to userspace. The simplest option is to simply expose the entire PIO through the already existing mechanism. Even without this new flag, the already existing PIO R (router address) flag (from RFC6275) cannot AFAICT be handled entirely in kernel, and provides useful information that should be exposed to userspace (the router's global address, for use by Mobile IPv6). Also cc'ing stable@ for inclusion in LTS, as while technically this is not quite a bugfix, and instead more of a feature, it is absolutely trivial and the alternative is manually cherrypicking into all Android Common Kernel trees - and I know Greg will ask for it to be sent in via LTS instead... Cc: Jen Linkova Cc: Lorenzo Colitti Cc: David Ahern Cc: YOSHIFUJI Hideaki / 吉藤英明 Cc: stable@vger.kernel.org Signed-off-by: Maciej Żenczykowski Link: https://lore.kernel.org/r/20230807102533.1147559-1-maze@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/ipv6/ndisc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 8108e9a941d0..3ab903f7e0f8 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -196,7 +196,8 @@ static struct nd_opt_hdr *ndisc_next_option(struct nd_opt_hdr *cur, static inline int ndisc_is_useropt(const struct net_device *dev, struct nd_opt_hdr *opt) { - return opt->nd_opt_type == ND_OPT_RDNSS || + return opt->nd_opt_type == ND_OPT_PREFIX_INFO || + opt->nd_opt_type == ND_OPT_RDNSS || opt->nd_opt_type == ND_OPT_DNSSL || opt->nd_opt_type == ND_OPT_CAPTIVE_PORTAL || opt->nd_opt_type == ND_OPT_PREF64 || From 57772ae9b339578aecae35aae9c29da72258b6f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ilpo=20J=C3=A4rvinen?= Date: Fri, 26 May 2023 13:54:34 +0300 Subject: [PATCH 290/513] dmaengine: pl330: Return DMA_PAUSED when transaction is paused MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 8cda3ececf07d374774f6a13e5a94bc2dc04c26c upstream. pl330_pause() does not set anything to indicate paused condition which causes pl330_tx_status() to return DMA_IN_PROGRESS. This breaks 8250 DMA flush after the fix in commit 57e9af7831dc ("serial: 8250_dma: Fix DMA Rx rearm race"). The function comment for pl330_pause() claims pause is supported but resume is not which is enough for 8250 DMA flush to work as long as DMA status reports DMA_PAUSED when appropriate. Add PAUSED state for descriptor and mark BUSY descriptors with PAUSED in pl330_pause(). Return DMA_PAUSED from pl330_tx_status() when the descriptor is PAUSED. Reported-by: Richard Tresidder Tested-by: Richard Tresidder Fixes: 88987d2c7534 ("dmaengine: pl330: add DMA_PAUSE feature") Cc: stable@vger.kernel.org Link: https://lore.kernel.org/linux-serial/f8a86ecd-64b1-573f-c2fa-59f541083f1a@electromag.com.au/ Signed-off-by: Ilpo Järvinen Link: https://lore.kernel.org/r/20230526105434.14959-1-ilpo.jarvinen@linux.intel.com Signed-off-by: Vinod Koul Signed-off-by: Greg Kroah-Hartman --- drivers/dma/pl330.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index b9bc82d6a162..ec8a1565630b 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -403,6 +403,12 @@ enum desc_status { * of a channel can be BUSY at any time. */ BUSY, + /* + * Pause was called while descriptor was BUSY. Due to hardware + * limitations, only termination is possible for descriptors + * that have been paused. + */ + PAUSED, /* * Sitting on the channel work_list but xfer done * by PL330 core @@ -2041,7 +2047,7 @@ static inline void fill_queue(struct dma_pl330_chan *pch) list_for_each_entry(desc, &pch->work_list, node) { /* If already submitted */ - if (desc->status == BUSY) + if (desc->status == BUSY || desc->status == PAUSED) continue; ret = pl330_submit_req(pch->thread, desc); @@ -2326,6 +2332,7 @@ static int pl330_pause(struct dma_chan *chan) { struct dma_pl330_chan *pch = to_pchan(chan); struct pl330_dmac *pl330 = pch->dmac; + struct dma_pl330_desc *desc; unsigned long flags; pm_runtime_get_sync(pl330->ddma.dev); @@ -2335,6 +2342,10 @@ static int pl330_pause(struct dma_chan *chan) _stop(pch->thread); spin_unlock(&pl330->lock); + list_for_each_entry(desc, &pch->work_list, node) { + if (desc->status == BUSY) + desc->status = PAUSED; + } spin_unlock_irqrestore(&pch->lock, flags); pm_runtime_mark_last_busy(pl330->ddma.dev); pm_runtime_put_autosuspend(pl330->ddma.dev); @@ -2425,7 +2436,7 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, else if (running && desc == running) transferred = pl330_get_current_xferred_count(pch, desc); - else if (desc->status == BUSY) + else if (desc->status == BUSY || desc->status == PAUSED) /* * Busy but not running means either just enqueued, * or finished and not yet marked done @@ -2442,6 +2453,9 @@ pl330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, case DONE: ret = DMA_COMPLETE; break; + case PAUSED: + ret = DMA_PAUSED; + break; case PREP: case BUSY: ret = DMA_IN_PROGRESS; From bcd9eeb3a3090f9d257f4944dfb7138d5ad8e611 Mon Sep 17 00:00:00 2001 From: Andrea Parri Date: Thu, 3 Aug 2023 06:27:38 +0200 Subject: [PATCH 291/513] riscv,mmio: Fix readX()-to-delay() ordering commit 4eb2eb1b4c0eb07793c240744843498564a67b83 upstream. Section 2.1 of the Platform Specification [1] states: Unless otherwise specified by a given I/O device, I/O devices are on ordering channel 0 (i.e., they are point-to-point strongly ordered). which is not sufficient to guarantee that a readX() by a hart completes before a subsequent delay() on the same hart (cf. memory-barriers.txt, "Kernel I/O barrier effects"). Set the I(nput) bit in __io_ar() to restore the ordering, align inline comments. [1] https://github.com/riscv/riscv-platform-specs Signed-off-by: Andrea Parri Link: https://lore.kernel.org/r/20230803042738.5937-1-parri.andrea@gmail.com Fixes: fab957c11efe ("RISC-V: Atomic and Locking Code") Cc: stable@vger.kernel.org Signed-off-by: Palmer Dabbelt Signed-off-by: Greg Kroah-Hartman --- arch/riscv/include/asm/mmio.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h index aff6c33ab0c0..4c58ee7f95ec 100644 --- a/arch/riscv/include/asm/mmio.h +++ b/arch/riscv/include/asm/mmio.h @@ -101,9 +101,9 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) * Relaxed I/O memory access primitives. These follow the Device memory * ordering rules but do not guarantee any ordering relative to Normal memory * accesses. These are defined to order the indicated access (either a read or - * write) with all other I/O memory accesses. Since the platform specification - * defines that all I/O regions are strongly ordered on channel 2, no explicit - * fences are required to enforce this ordering. + * write) with all other I/O memory accesses to the same peripheral. Since the + * platform specification defines that all I/O regions are strongly ordered on + * channel 0, no explicit fences are required to enforce this ordering. */ /* FIXME: These are now the same as asm-generic */ #define __io_rbr() do {} while (0) @@ -125,14 +125,14 @@ static inline u64 __raw_readq(const volatile void __iomem *addr) #endif /* - * I/O memory access primitives. Reads are ordered relative to any - * following Normal memory access. Writes are ordered relative to any prior - * Normal memory access. The memory barriers here are necessary as RISC-V + * I/O memory access primitives. Reads are ordered relative to any following + * Normal memory read and delay() loop. Writes are ordered relative to any + * prior Normal memory write. The memory barriers here are necessary as RISC-V * doesn't define any ordering between the memory space and the I/O space. */ #define __io_br() do {} while (0) -#define __io_ar(v) __asm__ __volatile__ ("fence i,r" : : : "memory") -#define __io_bw() __asm__ __volatile__ ("fence w,o" : : : "memory") +#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); }) +#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); }) #define __io_aw() mmiowb_set_pending() #define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) From 64e6253f6489610163b3055e85738074b61cbd5d Mon Sep 17 00:00:00 2001 From: Karol Herbst Date: Thu, 22 Jun 2023 17:20:17 +0200 Subject: [PATCH 292/513] drm/nouveau/gr: enable memory loads on helper invocation on all channels commit 1cb9e2ef66d53b020842b18762e30d0eb4384de8 upstream. We have a lurking bug where Fragment Shader Helper Invocations can't load from memory. But this is actually required in OpenGL and is causing random hangs or failures in random shaders. It is unknown how widespread this issue is, but shaders hitting this can end up with infinite loops. We enable those only on all Kepler and newer GPUs where we use our own Firmware. Nvidia's firmware provides a way to set a kernelspace controlled list of mmio registers in the gr space from push buffers via MME macros. v2: drop code for gm200 and newer. Cc: Ben Skeggs Cc: David Airlie Cc: nouveau@lists.freedesktop.org Cc: stable@vger.kernel.org # 4.19+ Signed-off-by: Karol Herbst Reviewed-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20230622152017.2512101-1-kherbst@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h | 1 + drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c | 4 +++- drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c | 10 ++++++++++ drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c | 1 + drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c | 1 + drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c | 1 + 6 files changed, 17 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h index 32bbddc0993e..679aff79f4d6 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgf100.h @@ -123,6 +123,7 @@ void gk104_grctx_generate_r418800(struct gf100_gr *); extern const struct gf100_grctx_func gk110_grctx; void gk110_grctx_generate_r419eb0(struct gf100_gr *); +void gk110_grctx_generate_r419f78(struct gf100_gr *); extern const struct gf100_grctx_func gk110b_grctx; extern const struct gf100_grctx_func gk208_grctx; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c index 304e9d268bad..f894f8254824 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk104.c @@ -916,7 +916,9 @@ static void gk104_grctx_generate_r419f78(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; - nvkm_mask(device, 0x419f78, 0x00000001, 0x00000000); + + /* bit 3 set disables loads in fp helper invocations, we need it enabled */ + nvkm_mask(device, 0x419f78, 0x00000009, 0x00000000); } void diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c index 86547cfc38dc..e88740d4e54d 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110.c @@ -820,6 +820,15 @@ gk110_grctx_generate_r419eb0(struct gf100_gr *gr) nvkm_mask(device, 0x419eb0, 0x00001000, 0x00001000); } +void +gk110_grctx_generate_r419f78(struct gf100_gr *gr) +{ + struct nvkm_device *device = gr->base.engine.subdev.device; + + /* bit 3 set disables loads in fp helper invocations, we need it enabled */ + nvkm_mask(device, 0x419f78, 0x00000008, 0x00000000); +} + const struct gf100_grctx_func gk110_grctx = { .main = gf100_grctx_generate_main, @@ -852,4 +861,5 @@ gk110_grctx = { .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, .r419eb0 = gk110_grctx_generate_r419eb0, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c index ebb947bd1446..086e4d49e112 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk110b.c @@ -101,4 +101,5 @@ gk110b_grctx = { .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, .r419eb0 = gk110_grctx_generate_r419eb0, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c index 4d40512b5c99..0bf438c3f7cb 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgk208.c @@ -566,4 +566,5 @@ gk208_grctx = { .dist_skip_table = gf117_grctx_generate_dist_skip_table, .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r418800 = gk104_grctx_generate_r418800, + .r419f78 = gk110_grctx_generate_r419f78, }; diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c index 0b3964e6b36e..acdf0932a99e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c +++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/ctxgm107.c @@ -991,4 +991,5 @@ gm107_grctx = { .r406500 = gm107_grctx_generate_r406500, .gpc_tpc_nr = gk104_grctx_generate_gpc_tpc_nr, .r419e00 = gm107_grctx_generate_r419e00, + .r419f78 = gk110_grctx_generate_r419f78, }; From 621204fca047450dbb4c2e3b47ab1dbf15e3c017 Mon Sep 17 00:00:00 2001 From: Boris Brezillon Date: Mon, 24 Jul 2023 13:26:10 +0200 Subject: [PATCH 293/513] drm/shmem-helper: Reset vma->vm_ops before calling dma_buf_mmap() commit 07dd476f6116966cb2006e25fdcf48f0715115ff upstream. The dma-buf backend is supposed to provide its own vm_ops, but some implementation just have nothing special to do and leave vm_ops untouched, probably expecting this field to be zero initialized (this is the case with the system_heap implementation for instance). Let's reset vma->vm_ops to NULL to keep things working with these implementations. Fixes: 26d3ac3cb04d ("drm/shmem-helpers: Redirect mmap for imported dma-buf") Cc: Cc: Daniel Vetter Reported-by: Roman Stratiienko Signed-off-by: Boris Brezillon Tested-by: Roman Stratiienko Reviewed-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20230724112610.60974-1-boris.brezillon@collabora.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/drm_gem_shmem_helper.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 0c2968052b66..54f1ab3071f9 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -591,7 +591,13 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct int ret; if (obj->import_attach) { + /* Reset both vm_ops and vm_private_data, so we don't end up with + * vm_ops pointing to our implementation if the dma-buf backend + * doesn't set those fields. + */ vma->vm_private_data = NULL; + vma->vm_ops = NULL; + ret = dma_buf_mmap(obj->dma_buf, vma, 0); /* Drop the reference drm_gem_mmap_obj() acquired.*/ From 3ad4ba2b61124903d348b60026eb05150f4950a2 Mon Sep 17 00:00:00 2001 From: Melissa Wen Date: Mon, 31 Jul 2023 07:35:05 -0100 Subject: [PATCH 294/513] drm/amd/display: check attr flag before set cursor degamma on DCN3+ commit 96b020e2163fb2197266b2f71b1007495206e6bb upstream. Don't set predefined degamma curve to cursor plane if the cursor attribute flag is not set. Applying a degamma curve to the cursor by default breaks userspace expectation. Checking the flag before performing any color transformation prevents too dark cursor gamma in DCN3+ on many Linux desktop environment (KDE Plasma, GNOME, wlroots-based, etc.) as reported at: - https://gitlab.freedesktop.org/drm/amd/-/issues/1513 This is the same approach followed by DCN2 drivers where the issue is not present. Fixes: 03f54d7d3448 ("drm/amd/display: Add DCN3 DPP") Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1513 Signed-off-by: Melissa Wen Reviewed-by: Harry Wentland Tested-by: Alex Hung Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c index 23a52d47e61c..0601c17426af 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_dpp.c @@ -355,8 +355,11 @@ void dpp3_set_cursor_attributes( int cur_rom_en = 0; if (color_format == CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA || - color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) - cur_rom_en = 1; + color_format == CURSOR_MODE_COLOR_UN_PRE_MULTIPLIED_ALPHA) { + if (cursor_attributes->attribute_flags.bits.ENABLE_CURSOR_DEGAMMA) { + cur_rom_en = 1; + } + } REG_UPDATE_3(CURSOR0_CONTROL, CUR0_MODE, color_format, From 8d10284243b7fe26e884b1d1a7e387434d6c617a Mon Sep 17 00:00:00 2001 From: Tao Ren Date: Fri, 4 Aug 2023 15:14:03 -0700 Subject: [PATCH 295/513] hwmon: (pmbus/bel-pfe) Enable PMBUS_SKIP_STATUS_CHECK for pfe1100 commit f38963b9cd0645a336cf30c5da2e89e34e34fec3 upstream. Skip status check for both pfe1100 and pfe3000 because the communication error is also observed on pfe1100 devices. Signed-off-by: Tao Ren Fixes: 626bb2f3fb3c hwmon: (pmbus) add driver for BEL PFE1100 and PFE3000 Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230804221403.28931-1-rentao.bupt@gmail.com Signed-off-by: Guenter Roeck Signed-off-by: Greg Kroah-Hartman --- drivers/hwmon/pmbus/bel-pfe.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/hwmon/pmbus/bel-pfe.c b/drivers/hwmon/pmbus/bel-pfe.c index 4100eefb7ac3..61c195f8fd3b 100644 --- a/drivers/hwmon/pmbus/bel-pfe.c +++ b/drivers/hwmon/pmbus/bel-pfe.c @@ -17,12 +17,13 @@ enum chips {pfe1100, pfe3000}; /* - * Disable status check for pfe3000 devices, because some devices report - * communication error (invalid command) for VOUT_MODE command (0x20) - * although correct VOUT_MODE (0x16) is returned: it leads to incorrect - * exponent in linear mode. + * Disable status check because some devices report communication error + * (invalid command) for VOUT_MODE command (0x20) although the correct + * VOUT_MODE (0x16) is returned: it leads to incorrect exponent in linear + * mode. + * This affects both pfe3000 and pfe1100. */ -static struct pmbus_platform_data pfe3000_plat_data = { +static struct pmbus_platform_data pfe_plat_data = { .flags = PMBUS_SKIP_STATUS_CHECK, }; @@ -94,16 +95,15 @@ static int pfe_pmbus_probe(struct i2c_client *client) int model; model = (int)i2c_match_id(pfe_device_id, client)->driver_data; + client->dev.platform_data = &pfe_plat_data; /* * PFE3000-12-069RA devices may not stay in page 0 during device * probe which leads to probe failure (read status word failed). * So let's set the device to page 0 at the beginning. */ - if (model == pfe3000) { - client->dev.platform_data = &pfe3000_plat_data; + if (model == pfe3000) i2c_smbus_write_byte_data(client, PMBUS_PAGE, 0); - } return pmbus_do_probe(client, &pfe_driver_info[model]); } From 36a3b560c78d11c4bf887e7b7e7c8d0aa819048e Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Thu, 27 Jul 2023 17:09:30 +0100 Subject: [PATCH 296/513] radix tree test suite: fix incorrect allocation size for pthreads commit cac7ea57a06016e4914848b707477fb07ee4ae1c upstream. Currently the pthread allocation for each array item is based on the size of a pthread_t pointer and should be the size of the pthread_t structure, so the allocation is under-allocating the correct size. Fix this by using the size of each element in the pthreads array. Static analysis cppcheck reported: tools/testing/radix-tree/regression1.c:180:2: warning: Size of pointer 'threads' used instead of size of its data. [pointerSize] Link: https://lkml.kernel.org/r/20230727160930.632674-1-colin.i.king@gmail.com Fixes: 1366c37ed84b ("radix tree test harness") Signed-off-by: Colin Ian King Cc: Konstantin Khlebnikov Cc: Matthew Wilcox (Oracle) Cc: Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- tools/testing/radix-tree/regression1.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/radix-tree/regression1.c b/tools/testing/radix-tree/regression1.c index a61c7bcbc72d..63f468bf8245 100644 --- a/tools/testing/radix-tree/regression1.c +++ b/tools/testing/radix-tree/regression1.c @@ -177,7 +177,7 @@ void regression1_test(void) nr_threads = 2; pthread_barrier_init(&worker_barrier, NULL, nr_threads); - threads = malloc(nr_threads * sizeof(pthread_t *)); + threads = malloc(nr_threads * sizeof(*threads)); for (i = 0; i < nr_threads; i++) { arg = i; From 3645510cf926e6af2f4d44899370d7e5331c93bd Mon Sep 17 00:00:00 2001 From: Ryusuke Konishi Date: Sat, 29 Jul 2023 04:13:18 +0900 Subject: [PATCH 297/513] nilfs2: fix use-after-free of nilfs_root in dirtying inodes via iput commit f8654743a0e6909dc634cbfad6db6816f10f3399 upstream. During unmount process of nilfs2, nothing holds nilfs_root structure after nilfs2 detaches its writer in nilfs_detach_log_writer(). Previously, nilfs_evict_inode() could cause use-after-free read for nilfs_root if inodes are left in "garbage_list" and released by nilfs_dispose_list at the end of nilfs_detach_log_writer(), and this bug was fixed by commit 9b5a04ac3ad9 ("nilfs2: fix use-after-free bug of nilfs_root in nilfs_evict_inode()"). However, it turned out that there is another possibility of UAF in the call path where mark_inode_dirty_sync() is called from iput(): nilfs_detach_log_writer() nilfs_dispose_list() iput() mark_inode_dirty_sync() __mark_inode_dirty() nilfs_dirty_inode() __nilfs_mark_inode_dirty() nilfs_load_inode_block() --> causes UAF of nilfs_root struct This can happen after commit 0ae45f63d4ef ("vfs: add support for a lazytime mount option"), which changed iput() to call mark_inode_dirty_sync() on its final reference if i_state has I_DIRTY_TIME flag and i_nlink is non-zero. This issue appears after commit 28a65b49eb53 ("nilfs2: do not write dirty data after degenerating to read-only") when using the syzbot reproducer, but the issue has potentially existed before. Fix this issue by adding a "purging flag" to the nilfs structure, setting that flag while disposing the "garbage_list" and checking it in __nilfs_mark_inode_dirty(). Unlike commit 9b5a04ac3ad9 ("nilfs2: fix use-after-free bug of nilfs_root in nilfs_evict_inode()"), this patch does not rely on ns_writer to determine whether to skip operations, so as not to break recovery on mount. The nilfs_salvage_orphan_logs routine dirties the buffer of salvaged data before attaching the log writer, so changing __nilfs_mark_inode_dirty() to skip the operation when ns_writer is NULL will cause recovery write to fail. The purpose of using the cleanup-only flag is to allow for narrowing of such conditions. Link: https://lkml.kernel.org/r/20230728191318.33047-1-konishi.ryusuke@gmail.com Signed-off-by: Ryusuke Konishi Reported-by: syzbot+74db8b3087f293d3a13a@syzkaller.appspotmail.com Closes: https://lkml.kernel.org/r/000000000000b4e906060113fd63@google.com Fixes: 0ae45f63d4ef ("vfs: add support for a lazytime mount option") Tested-by: Ryusuke Konishi Cc: # 4.0+ Signed-off-by: Andrew Morton Signed-off-by: Greg Kroah-Hartman --- fs/nilfs2/inode.c | 8 ++++++++ fs/nilfs2/segment.c | 2 ++ fs/nilfs2/the_nilfs.h | 2 ++ 3 files changed, 12 insertions(+) diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index c1ab0bfc3ed5..b908216f306d 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -1105,9 +1105,17 @@ int nilfs_set_file_dirty(struct inode *inode, unsigned int nr_dirty) int __nilfs_mark_inode_dirty(struct inode *inode, int flags) { + struct the_nilfs *nilfs = inode->i_sb->s_fs_info; struct buffer_head *ibh; int err; + /* + * Do not dirty inodes after the log writer has been detached + * and its nilfs_root struct has been freed. + */ + if (unlikely(nilfs_purging(nilfs))) + return 0; + err = nilfs_load_inode_block(inode, &ibh); if (unlikely(err)) { nilfs_warn(inode->i_sb, diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index 5c310eb7dd0c..9e865732d352 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -2845,6 +2845,7 @@ void nilfs_detach_log_writer(struct super_block *sb) nilfs_segctor_destroy(nilfs->ns_writer); nilfs->ns_writer = NULL; } + set_nilfs_purging(nilfs); /* Force to free the list of dirty files */ spin_lock(&nilfs->ns_inode_lock); @@ -2857,4 +2858,5 @@ void nilfs_detach_log_writer(struct super_block *sb) up_write(&nilfs->ns_segctor_sem); nilfs_dispose_list(nilfs, &garbage_list, 1); + clear_nilfs_purging(nilfs); } diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h index 987c8ab02aee..b36ba588ee69 100644 --- a/fs/nilfs2/the_nilfs.h +++ b/fs/nilfs2/the_nilfs.h @@ -29,6 +29,7 @@ enum { THE_NILFS_DISCONTINUED, /* 'next' pointer chain has broken */ THE_NILFS_GC_RUNNING, /* gc process is running */ THE_NILFS_SB_DIRTY, /* super block is dirty */ + THE_NILFS_PURGING, /* disposing dirty files for cleanup */ }; /** @@ -208,6 +209,7 @@ THE_NILFS_FNS(INIT, init) THE_NILFS_FNS(DISCONTINUED, discontinued) THE_NILFS_FNS(GC_RUNNING, gc_running) THE_NILFS_FNS(SB_DIRTY, sb_dirty) +THE_NILFS_FNS(PURGING, purging) /* * Mount option operations From c47d0178ad86ad57de5e4deb64133cff518dcc10 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 4 Aug 2023 23:24:54 +0800 Subject: [PATCH 298/513] bpf: allow precision tracking for programs with subprogs [ Upstream commit be2ef8161572ec1973124ebc50f56dafc2925e07 ] Stop forcing precise=true for SCALAR registers when BPF program has any subprograms. Current restriction means that any BPF program, as soon as it uses subprograms, will end up not getting any of the precision tracking benefits in reduction of number of verified states. This patch keeps the fallback mark_all_scalars_precise() behavior if precise marking has to cross function frames. E.g., if subprogram requires R1 (first input arg) to be marked precise, ideally we'd need to backtrack to the parent function and keep marking R1 and its dependencies as precise. But right now we give up and force all the SCALARs in any of the current and parent states to be forced to precise=true. We can lift that restriction in the future. But this patch fixes two issues identified when trying to enable precision tracking for subprogs. First, prevent "escaping" from top-most state in a global subprog. While with entry-level BPF program we never end up requesting precision for R1-R5 registers, because R2-R5 are not initialized (and so not readable in correct BPF program), and R1 is PTR_TO_CTX, not SCALAR, and so is implicitly precise. With global subprogs, though, it's different, as global subprog a) can have up to 5 SCALAR input arguments, which might get marked as precise=true and b) it is validated in isolation from its main entry BPF program. b) means that we can end up exhausting parent state chain and still not mark all registers in reg_mask as precise, which would lead to verifier bug warning. To handle that, we need to consider two cases. First, if the very first state is not immediately "checkpointed" (i.e., stored in state lookup hashtable), it will get correct first_insn_idx and last_insn_idx instruction set during state checkpointing. As such, this case is already handled and __mark_chain_precision() already handles that by just doing nothing when we reach to the very first parent state. st->parent will be NULL and we'll just stop. Perhaps some extra check for reg_mask and stack_mask is due here, but this patch doesn't address that issue. More problematic second case is when global function's initial state is immediately checkpointed before we manage to process the very first instruction. This is happening because when there is a call to global subprog from the main program the very first subprog's instruction is marked as pruning point, so before we manage to process first instruction we have to check and checkpoint state. This patch adds a special handling for such "empty" state, which is identified by having st->last_insn_idx set to -1. In such case, we check that we are indeed validating global subprog, and with some sanity checking we mark input args as precise if requested. Note that we also initialize state->first_insn_idx with correct start insn_idx offset. For main program zero is correct value, but for any subprog it's quite confusing to not have first_insn_idx set. This doesn't have any functional impact, but helps with debugging and state printing. We also explicitly initialize state->last_insns_idx instead of relying on is_state_visited() to do this with env->prev_insns_idx, which will be -1 on the very first instruction. This concludes necessary changes to handle specifically global subprog's precision tracking. Second identified problem was missed handling of BPF helper functions that call into subprogs (e.g., bpf_loop and few others). From precision tracking and backtracking logic's standpoint those are effectively calls into subprogs and should be called as BPF_PSEUDO_CALL calls. This patch takes the least intrusive way and just checks against a short list of current BPF helpers that do call subprogs, encapsulated in is_callback_calling_function() function. But to prevent accidentally forgetting to add new BPF helpers to this "list", we also do a sanity check in __check_func_call, which has to be called for each such special BPF helper, to validate that BPF helper is indeed recognized as callback-calling one. This should catch any missed checks in the future. Adding some special flags to be added in function proto definitions seemed like an overkill in this case. With the above changes, it's possible to remove forceful setting of reg->precise to true in __mark_reg_unknown, which turns on precision tracking both inside subprogs and entry progs that have subprogs. No warnings or errors were detected across all the selftests, but also when validating with veristat against internal Meta BPF objects and Cilium objects. Further, in some BPF programs there are noticeable reduction in number of states and instructions validated due to more effective precision tracking, especially benefiting syncookie test. $ ./veristat -C -e file,prog,insns,states ~/baseline-results.csv ~/subprog-precise-results.csv | grep -v '+0' File Program Total insns (A) Total insns (B) Total insns (DIFF) Total states (A) Total states (B) Total states (DIFF) ---------------------------------------- -------------------------- --------------- --------------- ------------------ ---------------- ---------------- ------------------- pyperf600_bpf_loop.bpf.linked1.o on_event 3966 3678 -288 (-7.26%) 306 276 -30 (-9.80%) pyperf_global.bpf.linked1.o on_event 7563 7530 -33 (-0.44%) 520 517 -3 (-0.58%) pyperf_subprogs.bpf.linked1.o on_event 36358 36934 +576 (+1.58%) 2499 2531 +32 (+1.28%) setget_sockopt.bpf.linked1.o skops_sockopt 3965 4038 +73 (+1.84%) 343 347 +4 (+1.17%) test_cls_redirect_subprogs.bpf.linked1.o cls_redirect 64965 64901 -64 (-0.10%) 4619 4612 -7 (-0.15%) test_misc_tcp_hdr_options.bpf.linked1.o misc_estab 1491 1307 -184 (-12.34%) 110 100 -10 (-9.09%) test_pkt_access.bpf.linked1.o test_pkt_access 354 349 -5 (-1.41%) 25 24 -1 (-4.00%) test_sock_fields.bpf.linked1.o egress_read_sock_fields 435 375 -60 (-13.79%) 22 20 -2 (-9.09%) test_sysctl_loop2.bpf.linked1.o sysctl_tcp_mem 1508 1501 -7 (-0.46%) 29 28 -1 (-3.45%) test_tc_dtime.bpf.linked1.o egress_fwdns_prio100 468 435 -33 (-7.05%) 45 41 -4 (-8.89%) test_tc_dtime.bpf.linked1.o ingress_fwdns_prio100 398 408 +10 (+2.51%) 42 39 -3 (-7.14%) test_tc_dtime.bpf.linked1.o ingress_fwdns_prio101 1096 842 -254 (-23.18%) 97 73 -24 (-24.74%) test_tcp_hdr_options.bpf.linked1.o estab 2758 2408 -350 (-12.69%) 208 181 -27 (-12.98%) test_urandom_usdt.bpf.linked1.o urand_read_with_sema 466 448 -18 (-3.86%) 31 28 -3 (-9.68%) test_urandom_usdt.bpf.linked1.o urand_read_without_sema 466 448 -18 (-3.86%) 31 28 -3 (-9.68%) test_urandom_usdt.bpf.linked1.o urandlib_read_with_sema 466 448 -18 (-3.86%) 31 28 -3 (-9.68%) test_urandom_usdt.bpf.linked1.o urandlib_read_without_sema 466 448 -18 (-3.86%) 31 28 -3 (-9.68%) test_xdp_noinline.bpf.linked1.o balancer_ingress_v6 4302 4294 -8 (-0.19%) 257 256 -1 (-0.39%) xdp_synproxy_kern.bpf.linked1.o syncookie_tc 583722 405757 -177965 (-30.49%) 35846 25735 -10111 (-28.21%) xdp_synproxy_kern.bpf.linked1.o syncookie_xdp 609123 479055 -130068 (-21.35%) 35452 29145 -6307 (-17.79%) ---------------------------------------- -------------------------- --------------- --------------- ------------------ ---------------- ---------------- ------------------- Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20221104163649.121784-4-andrii@kernel.org Signed-off-by: Alexei Starovoitov Stable-dep-of: ecdf985d7615 ("bpf: track immediate values written to stack by BPF_ST instruction") Signed-off-by: Pu Lehui Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 59 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 58 insertions(+), 1 deletion(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index e1848a2a7230..d61f670f279d 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -517,6 +517,12 @@ static bool is_ptr_cast_function(enum bpf_func_id func_id) func_id == BPF_FUNC_skc_to_tcp_request_sock; } +static bool is_callback_calling_function(enum bpf_func_id func_id) +{ + return func_id == BPF_FUNC_for_each_map_elem || + func_id == BPF_FUNC_timer_set_callback; +} + static bool is_cmpxchg_insn(const struct bpf_insn *insn) { return BPF_CLASS(insn->code) == BPF_STX && @@ -1446,7 +1452,7 @@ static void __mark_reg_unknown(const struct bpf_verifier_env *env, reg->type = SCALAR_VALUE; reg->var_off = tnum_unknown; reg->frameno = 0; - reg->precise = env->subprog_cnt > 1 || !env->bpf_capable; + reg->precise = !env->bpf_capable; __mark_reg_unbounded(reg); } @@ -2267,6 +2273,11 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, */ if (insn->src_reg == BPF_PSEUDO_KFUNC_CALL && insn->imm == 0) return -ENOTSUPP; + /* BPF helpers that invoke callback subprogs are + * equivalent to BPF_PSEUDO_CALL above + */ + if (insn->src_reg == 0 && is_callback_calling_function(insn->imm)) + return -ENOTSUPP; /* regular helper call sets R0 */ *reg_mask &= ~1; if (*reg_mask & 0x3f) { @@ -2445,12 +2456,42 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r return 0; if (!reg_mask && !stack_mask) return 0; + for (;;) { DECLARE_BITMAP(mask, 64); u32 history = st->jmp_history_cnt; if (env->log.level & BPF_LOG_LEVEL) verbose(env, "last_idx %d first_idx %d\n", last_idx, first_idx); + + if (last_idx < 0) { + /* we are at the entry into subprog, which + * is expected for global funcs, but only if + * requested precise registers are R1-R5 + * (which are global func's input arguments) + */ + if (st->curframe == 0 && + st->frame[0]->subprogno > 0 && + st->frame[0]->callsite == BPF_MAIN_FUNC && + stack_mask == 0 && (reg_mask & ~0x3e) == 0) { + bitmap_from_u64(mask, reg_mask); + for_each_set_bit(i, mask, 32) { + reg = &st->frame[0]->regs[i]; + if (reg->type != SCALAR_VALUE) { + reg_mask &= ~(1u << i); + continue; + } + reg->precise = true; + } + return 0; + } + + verbose(env, "BUG backtracing func entry subprog %d reg_mask %x stack_mask %llx\n", + st->frame[0]->subprogno, reg_mask, stack_mask); + WARN_ONCE(1, "verifier backtracking bug"); + return -EFAULT; + } + for (i = last_idx;;) { if (skip_first) { err = 0; @@ -5806,6 +5847,10 @@ typedef int (*set_callee_state_fn)(struct bpf_verifier_env *env, struct bpf_func_state *callee, int insn_idx); +static int set_callee_state(struct bpf_verifier_env *env, + struct bpf_func_state *caller, + struct bpf_func_state *callee, int insn_idx); + static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn, int *insn_idx, int subprog, set_callee_state_fn set_callee_state_cb) @@ -5856,6 +5901,16 @@ static int __check_func_call(struct bpf_verifier_env *env, struct bpf_insn *insn } } + /* set_callee_state is used for direct subprog calls, but we are + * interested in validating only BPF helpers that can call subprogs as + * callbacks + */ + if (set_callee_state_cb != set_callee_state && !is_callback_calling_function(insn->imm)) { + verbose(env, "verifier bug: helper %s#%d is not marked as callback-calling\n", + func_id_name(insn->imm), insn->imm); + return -EFAULT; + } + if (insn->code == (BPF_JMP | BPF_CALL) && insn->src_reg == 0 && insn->imm == BPF_FUNC_timer_set_callback) { @@ -13209,6 +13264,8 @@ static int do_check_common(struct bpf_verifier_env *env, int subprog) BPF_MAIN_FUNC /* callsite */, 0 /* frameno */, subprog); + state->first_insn_idx = env->subprog_info[subprog].start; + state->last_insn_idx = -1; regs = state->frame[state->curframe]->regs; if (subprog || env->prog->type == BPF_PROG_TYPE_EXT) { From 2516deeb872ab9dda3bf4c66cf24b1ee900f25bf Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 4 Aug 2023 23:24:55 +0800 Subject: [PATCH 299/513] bpf: stop setting precise in current state [ Upstream commit f63181b6ae79fd3b034cde641db774268c2c3acf ] Setting reg->precise to true in current state is not necessary from correctness standpoint, but it does pessimise the whole precision (or rather "imprecision", because that's what we want to keep as much as possible) tracking. Why is somewhat subtle and my best attempt to explain this is recorded in an extensive comment for __mark_chain_precise() function. Some more careful thinking and code reading is probably required still to grok this completely, unfortunately. Whiteboarding and a bunch of extra handwaiving in person would be even more helpful, but is deemed impractical in Git commit. Next patch pushes this imprecision property even further, building on top of the insights described in this patch. End results are pretty nice, we get reduction in number of total instructions and states verified due to a better states reuse, as some of the states are now more generic and permissive due to less unnecessary precise=true requirements. SELFTESTS RESULTS ================= $ ./veristat -C -e file,prog,insns,states ~/subprog-precise-results.csv ~/imprecise-early-results.csv | grep -v '+0' File Program Total insns (A) Total insns (B) Total insns (DIFF) Total states (A) Total states (B) Total states (DIFF) --------------------------------------- ---------------------- --------------- --------------- ------------------ ---------------- ---------------- ------------------- bpf_iter_ksym.bpf.linked1.o dump_ksym 347 285 -62 (-17.87%) 20 19 -1 (-5.00%) pyperf600_bpf_loop.bpf.linked1.o on_event 3678 3736 +58 (+1.58%) 276 285 +9 (+3.26%) setget_sockopt.bpf.linked1.o skops_sockopt 4038 3947 -91 (-2.25%) 347 343 -4 (-1.15%) test_l4lb.bpf.linked1.o balancer_ingress 4559 2611 -1948 (-42.73%) 118 105 -13 (-11.02%) test_l4lb_noinline.bpf.linked1.o balancer_ingress 6279 6268 -11 (-0.18%) 237 236 -1 (-0.42%) test_misc_tcp_hdr_options.bpf.linked1.o misc_estab 1307 1303 -4 (-0.31%) 100 99 -1 (-1.00%) test_sk_lookup.bpf.linked1.o ctx_narrow_access 456 447 -9 (-1.97%) 39 38 -1 (-2.56%) test_sysctl_loop1.bpf.linked1.o sysctl_tcp_mem 1389 1384 -5 (-0.36%) 26 25 -1 (-3.85%) test_tc_dtime.bpf.linked1.o egress_fwdns_prio101 518 485 -33 (-6.37%) 51 46 -5 (-9.80%) test_tc_dtime.bpf.linked1.o egress_host 519 468 -51 (-9.83%) 50 44 -6 (-12.00%) test_tc_dtime.bpf.linked1.o ingress_fwdns_prio101 842 1000 +158 (+18.76%) 73 88 +15 (+20.55%) xdp_synproxy_kern.bpf.linked1.o syncookie_tc 405757 373173 -32584 (-8.03%) 25735 22882 -2853 (-11.09%) xdp_synproxy_kern.bpf.linked1.o syncookie_xdp 479055 371590 -107465 (-22.43%) 29145 22207 -6938 (-23.81%) --------------------------------------- ---------------------- --------------- --------------- ------------------ ---------------- ---------------- ------------------- Slight regression in test_tc_dtime.bpf.linked1.o/ingress_fwdns_prio101 is left for a follow up, there might be some more precision-related bugs in existing BPF verifier logic. CILIUM RESULTS ============== $ ./veristat -C -e file,prog,insns,states ~/subprog-precise-results-cilium.csv ~/imprecise-early-results-cilium.csv | grep -v '+0' File Program Total insns (A) Total insns (B) Total insns (DIFF) Total states (A) Total states (B) Total states (DIFF) ------------- ------------------------------ --------------- --------------- ------------------ ---------------- ---------------- ------------------- bpf_host.o cil_from_host 762 556 -206 (-27.03%) 43 37 -6 (-13.95%) bpf_host.o tail_handle_nat_fwd_ipv4 23541 23426 -115 (-0.49%) 1538 1537 -1 (-0.07%) bpf_host.o tail_nodeport_nat_egress_ipv4 33592 33566 -26 (-0.08%) 2163 2161 -2 (-0.09%) bpf_lxc.o tail_handle_nat_fwd_ipv4 23541 23426 -115 (-0.49%) 1538 1537 -1 (-0.07%) bpf_overlay.o tail_nodeport_nat_egress_ipv4 33581 33543 -38 (-0.11%) 2160 2157 -3 (-0.14%) bpf_xdp.o tail_handle_nat_fwd_ipv4 21659 20920 -739 (-3.41%) 1440 1376 -64 (-4.44%) bpf_xdp.o tail_handle_nat_fwd_ipv6 17084 17039 -45 (-0.26%) 907 905 -2 (-0.22%) bpf_xdp.o tail_lb_ipv4 73442 73430 -12 (-0.02%) 4370 4369 -1 (-0.02%) bpf_xdp.o tail_lb_ipv6 152114 151895 -219 (-0.14%) 6493 6479 -14 (-0.22%) bpf_xdp.o tail_nodeport_nat_egress_ipv4 17377 17200 -177 (-1.02%) 1125 1111 -14 (-1.24%) bpf_xdp.o tail_nodeport_nat_ingress_ipv6 6405 6397 -8 (-0.12%) 309 308 -1 (-0.32%) bpf_xdp.o tail_rev_nodeport_lb4 7126 6934 -192 (-2.69%) 414 402 -12 (-2.90%) bpf_xdp.o tail_rev_nodeport_lb6 18059 17905 -154 (-0.85%) 1105 1096 -9 (-0.81%) ------------- ------------------------------ --------------- --------------- ------------------ ---------------- ---------------- ------------------- Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20221104163649.121784-5-andrii@kernel.org Signed-off-by: Alexei Starovoitov Stable-dep-of: ecdf985d7615 ("bpf: track immediate values written to stack by BPF_ST instruction") Signed-off-by: Pu Lehui Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 103 +++++++++++++++++++++++++++++++++++++----- 1 file changed, 91 insertions(+), 12 deletions(-) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index d61f670f279d..402afbee6972 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2382,8 +2382,11 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, /* big hammer: mark all scalars precise in this path. * pop_stack may still get !precise scalars. + * We also skip current state and go straight to first parent state, + * because precision markings in current non-checkpointed state are + * not needed. See why in the comment in __mark_chain_precision below. */ - for (; st; st = st->parent) + for (st = st->parent; st; st = st->parent) { for (i = 0; i <= st->curframe; i++) { func = st->frame[i]; for (j = 0; j < BPF_REG_FP; j++) { @@ -2401,8 +2404,88 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, reg->precise = true; } } + } } +/* + * __mark_chain_precision() backtracks BPF program instruction sequence and + * chain of verifier states making sure that register *regno* (if regno >= 0) + * and/or stack slot *spi* (if spi >= 0) are marked as precisely tracked + * SCALARS, as well as any other registers and slots that contribute to + * a tracked state of given registers/stack slots, depending on specific BPF + * assembly instructions (see backtrack_insns() for exact instruction handling + * logic). This backtracking relies on recorded jmp_history and is able to + * traverse entire chain of parent states. This process ends only when all the + * necessary registers/slots and their transitive dependencies are marked as + * precise. + * + * One important and subtle aspect is that precise marks *do not matter* in + * the currently verified state (current state). It is important to understand + * why this is the case. + * + * First, note that current state is the state that is not yet "checkpointed", + * i.e., it is not yet put into env->explored_states, and it has no children + * states as well. It's ephemeral, and can end up either a) being discarded if + * compatible explored state is found at some point or BPF_EXIT instruction is + * reached or b) checkpointed and put into env->explored_states, branching out + * into one or more children states. + * + * In the former case, precise markings in current state are completely + * ignored by state comparison code (see regsafe() for details). Only + * checkpointed ("old") state precise markings are important, and if old + * state's register/slot is precise, regsafe() assumes current state's + * register/slot as precise and checks value ranges exactly and precisely. If + * states turn out to be compatible, current state's necessary precise + * markings and any required parent states' precise markings are enforced + * after the fact with propagate_precision() logic, after the fact. But it's + * important to realize that in this case, even after marking current state + * registers/slots as precise, we immediately discard current state. So what + * actually matters is any of the precise markings propagated into current + * state's parent states, which are always checkpointed (due to b) case above). + * As such, for scenario a) it doesn't matter if current state has precise + * markings set or not. + * + * Now, for the scenario b), checkpointing and forking into child(ren) + * state(s). Note that before current state gets to checkpointing step, any + * processed instruction always assumes precise SCALAR register/slot + * knowledge: if precise value or range is useful to prune jump branch, BPF + * verifier takes this opportunity enthusiastically. Similarly, when + * register's value is used to calculate offset or memory address, exact + * knowledge of SCALAR range is assumed, checked, and enforced. So, similar to + * what we mentioned above about state comparison ignoring precise markings + * during state comparison, BPF verifier ignores and also assumes precise + * markings *at will* during instruction verification process. But as verifier + * assumes precision, it also propagates any precision dependencies across + * parent states, which are not yet finalized, so can be further restricted + * based on new knowledge gained from restrictions enforced by their children + * states. This is so that once those parent states are finalized, i.e., when + * they have no more active children state, state comparison logic in + * is_state_visited() would enforce strict and precise SCALAR ranges, if + * required for correctness. + * + * To build a bit more intuition, note also that once a state is checkpointed, + * the path we took to get to that state is not important. This is crucial + * property for state pruning. When state is checkpointed and finalized at + * some instruction index, it can be correctly and safely used to "short + * circuit" any *compatible* state that reaches exactly the same instruction + * index. I.e., if we jumped to that instruction from a completely different + * code path than original finalized state was derived from, it doesn't + * matter, current state can be discarded because from that instruction + * forward having a compatible state will ensure we will safely reach the + * exit. States describe preconditions for further exploration, but completely + * forget the history of how we got here. + * + * This also means that even if we needed precise SCALAR range to get to + * finalized state, but from that point forward *that same* SCALAR register is + * never used in a precise context (i.e., it's precise value is not needed for + * correctness), it's correct and safe to mark such register as "imprecise" + * (i.e., precise marking set to false). This is what we rely on when we do + * not set precise marking in current state. If no child state requires + * precision for any given SCALAR register, it's safe to dictate that it can + * be imprecise. If any child state does require this register to be precise, + * we'll mark it precise later retroactively during precise markings + * propagation from child state to parent states. + */ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno, int spi) { @@ -2420,6 +2503,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r if (!env->bpf_capable) return 0; + /* Do sanity checks against current state of register and/or stack + * slot, but don't set precise flag in current state, as precision + * tracking in the current state is unnecessary. + */ func = st->frame[frame]; if (regno >= 0) { reg = &func->regs[regno]; @@ -2427,11 +2514,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r WARN_ONCE(1, "backtracing misuse"); return -EFAULT; } - if (!reg->precise) - new_marks = true; - else - reg_mask = 0; - reg->precise = true; + new_marks = true; } while (spi >= 0) { @@ -2444,11 +2527,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int r stack_mask = 0; break; } - if (!reg->precise) - new_marks = true; - else - stack_mask = 0; - reg->precise = true; + new_marks = true; break; } @@ -10356,7 +10435,7 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold, if (env->explore_alu_limits) return false; if (rcur->type == SCALAR_VALUE) { - if (!rold->precise && !rcur->precise) + if (!rold->precise) return true; /* new val must satisfy old val knowledge */ return range_within(rold, rcur) && From 683d2969a0820072ddc05dbcc667664f7a34ac90 Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 4 Aug 2023 23:24:56 +0800 Subject: [PATCH 300/513] bpf: aggressively forget precise markings during state checkpointing [ Upstream commit 7a830b53c17bbadcf99f778f28aaaa4e6c41df5f ] Exploit the property of about-to-be-checkpointed state to be able to forget all precise markings up to that point even more aggressively. We now clear all potentially inherited precise markings right before checkpointing and branching off into child state. If any of children states require precise knowledge of any SCALAR register, those will be propagated backwards later on before this state is finalized, preserving correctness. There is a single selftests BPF program change, but tremendous one: 25x reduction in number of verified instructions and states in trace_virtqueue_add_sgs. Cilium results are more modest, but happen across wider range of programs. SELFTESTS RESULTS ================= $ ./veristat -C -e file,prog,insns,states ~/imprecise-early-results.csv ~/imprecise-aggressive-results.csv | grep -v '+0' File Program Total insns (A) Total insns (B) Total insns (DIFF) Total states (A) Total states (B) Total states (DIFF) ------------------- ----------------------- --------------- --------------- ------------------ ---------------- ---------------- ------------------- loop6.bpf.linked1.o trace_virtqueue_add_sgs 398057 15114 -382943 (-96.20%) 8717 336 -8381 (-96.15%) ------------------- ----------------------- --------------- --------------- ------------------ ---------------- ---------------- ------------------- CILIUM RESULTS ============== $ ./veristat -C -e file,prog,insns,states ~/imprecise-early-results-cilium.csv ~/imprecise-aggressive-results-cilium.csv | grep -v '+0' File Program Total insns (A) Total insns (B) Total insns (DIFF) Total states (A) Total states (B) Total states (DIFF) ------------- -------------------------------- --------------- --------------- ------------------ ---------------- ---------------- ------------------- bpf_host.o tail_handle_nat_fwd_ipv4 23426 23221 -205 (-0.88%) 1537 1515 -22 (-1.43%) bpf_host.o tail_handle_nat_fwd_ipv6 13009 12904 -105 (-0.81%) 719 708 -11 (-1.53%) bpf_host.o tail_nodeport_nat_ingress_ipv6 5261 5196 -65 (-1.24%) 247 243 -4 (-1.62%) bpf_host.o tail_nodeport_nat_ipv6_egress 3446 3406 -40 (-1.16%) 203 198 -5 (-2.46%) bpf_lxc.o tail_handle_nat_fwd_ipv4 23426 23221 -205 (-0.88%) 1537 1515 -22 (-1.43%) bpf_lxc.o tail_handle_nat_fwd_ipv6 13009 12904 -105 (-0.81%) 719 708 -11 (-1.53%) bpf_lxc.o tail_ipv4_ct_egress 5074 4897 -177 (-3.49%) 255 248 -7 (-2.75%) bpf_lxc.o tail_ipv4_ct_ingress 5100 4923 -177 (-3.47%) 255 248 -7 (-2.75%) bpf_lxc.o tail_ipv4_ct_ingress_policy_only 5100 4923 -177 (-3.47%) 255 248 -7 (-2.75%) bpf_lxc.o tail_ipv6_ct_egress 4558 4536 -22 (-0.48%) 188 187 -1 (-0.53%) bpf_lxc.o tail_ipv6_ct_ingress 4578 4556 -22 (-0.48%) 188 187 -1 (-0.53%) bpf_lxc.o tail_ipv6_ct_ingress_policy_only 4578 4556 -22 (-0.48%) 188 187 -1 (-0.53%) bpf_lxc.o tail_nodeport_nat_ingress_ipv6 5261 5196 -65 (-1.24%) 247 243 -4 (-1.62%) bpf_overlay.o tail_nodeport_nat_ingress_ipv6 5261 5196 -65 (-1.24%) 247 243 -4 (-1.62%) bpf_overlay.o tail_nodeport_nat_ipv6_egress 3482 3442 -40 (-1.15%) 204 201 -3 (-1.47%) bpf_xdp.o tail_nodeport_nat_egress_ipv4 17200 15619 -1581 (-9.19%) 1111 1010 -101 (-9.09%) ------------- -------------------------------- --------------- --------------- ------------------ ---------------- ---------------- ------------------- Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20221104163649.121784-6-andrii@kernel.org Signed-off-by: Alexei Starovoitov Stable-dep-of: ecdf985d7615 ("bpf: track immediate values written to stack by BPF_ST instruction") Signed-off-by: Pu Lehui Signed-off-by: Greg Kroah-Hartman --- kernel/bpf/verifier.c | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c index 402afbee6972..ecf4332ff312 100644 --- a/kernel/bpf/verifier.c +++ b/kernel/bpf/verifier.c @@ -2407,6 +2407,31 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, } } +static void mark_all_scalars_imprecise(struct bpf_verifier_env *env, struct bpf_verifier_state *st) +{ + struct bpf_func_state *func; + struct bpf_reg_state *reg; + int i, j; + + for (i = 0; i <= st->curframe; i++) { + func = st->frame[i]; + for (j = 0; j < BPF_REG_FP; j++) { + reg = &func->regs[j]; + if (reg->type != SCALAR_VALUE) + continue; + reg->precise = false; + } + for (j = 0; j < func->allocated_stack / BPF_REG_SIZE; j++) { + if (!is_spilled_reg(&func->stack[j])) + continue; + reg = &func->stack[j].spilled_ptr; + if (reg->type != SCALAR_VALUE) + continue; + reg->precise = false; + } + } +} + /* * __mark_chain_precision() backtracks BPF program instruction sequence and * chain of verifier states making sure that register *regno* (if regno >= 0) @@ -2485,6 +2510,14 @@ static void mark_all_scalars_precise(struct bpf_verifier_env *env, * be imprecise. If any child state does require this register to be precise, * we'll mark it precise later retroactively during precise markings * propagation from child state to parent states. + * + * Skipping precise marking setting in current state is a mild version of + * relying on the above observation. But we can utilize this property even + * more aggressively by proactively forgetting any precise marking in the + * current state (which we inherited from the parent state), right before we + * checkpoint it and branch off into new child state. This is done by + * mark_all_scalars_imprecise() to hopefully get more permissive and generic + * finalized states which help in short circuiting more future states. */ static int __mark_chain_precision(struct bpf_verifier_env *env, int frame, int regno, int spi) @@ -10984,6 +11017,10 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx) env->prev_jmps_processed = env->jmps_processed; env->prev_insn_processed = env->insn_processed; + /* forget precise markings we inherited, see __mark_chain_precision */ + if (env->bpf_capable) + mark_all_scalars_imprecise(env, cur); + /* add new state to the head of linked list */ new = &new_sl->state; err = copy_verifier_state(new, cur); From ee701208f4ccb1f083ce3db3ab4e9e7c3ae57dad Mon Sep 17 00:00:00 2001 From: Andrii Nakryiko Date: Fri, 4 Aug 2023 23:24:57 +0800 Subject: [PATCH 301/513] selftests/bpf: make test_align selftest more robust [ Upstream commit 4f999b767769b76378c3616c624afd6f4bb0d99f ] test_align selftest relies on BPF verifier log emitting register states for specific instructions in expected format. Unfortunately, BPF verifier precision backtracking log interferes with such expectations. And instruction on which precision propagation happens sometimes don't output full expected register states. This does indeed look like something to be improved in BPF verifier, but is beyond the scope of this patch set. So to make test_align a bit more robust, inject few dummy R4 = R5 instructions which capture desired state of R5 and won't have precision tracking logs on them. This fixes tests until we can improve BPF verifier output in the presence of precision tracking. Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/r/20221104163649.121784-7-andrii@kernel.org Signed-off-by: Alexei Starovoitov Stable-dep-of: ecdf985d7615 ("bpf: track immediate values written to stack by BPF_ST instruction") Signed-off-by: Pu Lehui Signed-off-by: Greg Kroah-Hartman --- .../testing/selftests/bpf/prog_tests/align.c | 36 ++++++++++++------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/tools/testing/selftests/bpf/prog_tests/align.c b/tools/testing/selftests/bpf/prog_tests/align.c index 5861446d0777..7996ec07e0bd 100644 --- a/tools/testing/selftests/bpf/prog_tests/align.c +++ b/tools/testing/selftests/bpf/prog_tests/align.c @@ -2,7 +2,7 @@ #include #define MAX_INSNS 512 -#define MAX_MATCHES 16 +#define MAX_MATCHES 24 struct bpf_reg_match { unsigned int line; @@ -267,6 +267,7 @@ static struct bpf_align_test tests[] = { */ BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4), @@ -280,6 +281,7 @@ static struct bpf_align_test tests[] = { BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), + BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4), BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6), BPF_MOV64_REG(BPF_REG_4, BPF_REG_5), @@ -311,44 +313,52 @@ static struct bpf_align_test tests[] = { {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Variable offset is added to R5 packet pointer, - * resulting in auxiliary alignment of 4. + * resulting in auxiliary alignment of 4. To avoid BPF + * verifier's precision backtracking logging + * interfering we also have a no-op R4 = R5 + * instruction to validate R5 state. We also check + * that R4 is what it should be in such case. */ - {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {19, "R4_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {19, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5, resulting in * reg->off of 14. */ - {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {20, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off * (14) which is 16. Then the variable offset is 4-byte * aligned, so the total offset is 4-byte aligned and * meets the load's requirements. */ - {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, - {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {24, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {24, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Constant offset is added to R5 packet pointer, * resulting in reg->off value of 14. */ - {26, "R5_w=pkt(id=0,off=14,r=8"}, + {27, "R5_w=pkt(id=0,off=14,r=8"}, /* Variable offset is added to R5, resulting in a - * variable offset of (4n). + * variable offset of (4n). See comment for insn #19 + * for R4 = R5 trick. */ - {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {29, "R4_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {29, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* Constant is added to R5 again, setting reg->off to 18. */ - {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, + {30, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"}, /* And once more we add a variable; resulting var_off * is still (4n), fixed offset is not changed. * Also, we create a new reg->id. */ - {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {32, "R4_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {32, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc)"}, /* At the time the word size load is performed from R5, * its total fixed offset is NET_IP_ALIGN + reg->off (18) * which is 20. Then the variable offset is (4n), so * the total offset is 4-byte aligned and meets the * load's requirements. */ - {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, - {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {35, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, + {35, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc)"}, }, }, { From 127277262110e730dad561e15c1e051020cd6242 Mon Sep 17 00:00:00 2001 From: Yonghong Song Date: Fri, 4 Aug 2023 23:24:58 +0800 Subject: [PATCH 302/513] selftests/bpf: Workaround verification failure for fexit_bpf2bpf/func_replace_return_code [ Upstream commit 63d78b7e8ca2d0eb8c687a355fa19d01b6fcc723 ] With latest llvm17, selftest fexit_bpf2bpf/func_replace_return_code has the following verification failure: 0: R1=ctx(off=0,imm=0) R10=fp0 ; int connect_v4_prog(struct bpf_sock_addr *ctx) 0: (bf) r7 = r1 ; R1=ctx(off=0,imm=0) R7_w=ctx(off=0,imm=0) 1: (b4) w6 = 0 ; R6_w=0 ; memset(&tuple.ipv4.saddr, 0, sizeof(tuple.ipv4.saddr)); ... ; return do_bind(ctx) ? 1 : 0; 179: (bf) r1 = r7 ; R1=ctx(off=0,imm=0) R7=ctx(off=0,imm=0) 180: (85) call pc+147 Func#3 is global and valid. Skipping. 181: R0_w=scalar() 181: (bc) w6 = w0 ; R0_w=scalar() R6_w=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) 182: (05) goto pc-129 ; } 54: (bc) w0 = w6 ; R0_w=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) R6_w=scalar(umax=4294967295,var_off=(0x0; 0xffffffff)) 55: (95) exit At program exit the register R0 has value (0x0; 0xffffffff) should have been in (0x0; 0x1) processed 281 insns (limit 1000000) max_states_per_insn 1 total_states 26 peak_states 26 mark_read 13 -- END PROG LOAD LOG -- libbpf: prog 'connect_v4_prog': failed to load: -22 The corresponding source code: __attribute__ ((noinline)) int do_bind(struct bpf_sock_addr *ctx) { struct sockaddr_in sa = {}; sa.sin_family = AF_INET; sa.sin_port = bpf_htons(0); sa.sin_addr.s_addr = bpf_htonl(SRC_REWRITE_IP4); if (bpf_bind(ctx, (struct sockaddr *)&sa, sizeof(sa)) != 0) return 0; return 1; } ... SEC("cgroup/connect4") int connect_v4_prog(struct bpf_sock_addr *ctx) { ... return do_bind(ctx) ? 1 : 0; } Insn 180 is a call to 'do_bind'. The call's return value is also the return value for the program. Since do_bind() returns 0/1, so it is legitimate for compiler to optimize 'return do_bind(ctx) ? 1 : 0' to 'return do_bind(ctx)'. However, such optimization breaks verifier as the return value of 'do_bind()' is marked as any scalar which violates the requirement of prog return value 0/1. There are two ways to fix this problem, (1) changing 'return 1' in do_bind() to e.g. 'return 10' so the compiler has to do 'do_bind(ctx) ? 1 :0', or (2) suggested by Andrii, marking do_bind() with __weak attribute so the compiler cannot make any assumption on do_bind() return value. This patch adopted adding __weak approach which is simpler and more resistant to potential compiler optimizations. Suggested-by: Andrii Nakryiko Signed-off-by: Yonghong Song Signed-off-by: Andrii Nakryiko Link: https://lore.kernel.org/bpf/20230310012410.2920570-1-yhs@fb.com Signed-off-by: Pu Lehui Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/bpf/progs/connect4_prog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/bpf/progs/connect4_prog.c b/tools/testing/selftests/bpf/progs/connect4_prog.c index a943d394fd3a..38ab1ce32e57 100644 --- a/tools/testing/selftests/bpf/progs/connect4_prog.c +++ b/tools/testing/selftests/bpf/progs/connect4_prog.c @@ -33,7 +33,7 @@ int _version SEC("version") = 1; -__attribute__ ((noinline)) +__attribute__ ((noinline)) __weak int do_bind(struct bpf_sock_addr *ctx) { struct sockaddr_in sa = {}; From 697bc234632cfb44090dcd6eec39f30c99fdbd4b Mon Sep 17 00:00:00 2001 From: Ilya Leoshkevich Date: Fri, 4 Aug 2023 23:24:59 +0800 Subject: [PATCH 303/513] selftests/bpf: Fix sk_assign on s390x [ Upstream commit 7ce878ca81bca7811e669db4c394b86780e0dbe4 ] sk_assign is failing on an s390x machine running Debian "bookworm" for 2 reasons: legacy server_map definition and uninitialized addrlen in recvfrom() call. Fix by adding a new-style server_map definition and dropping addrlen (recvfrom() allows NULL values for src_addr and addrlen). Since the test should support tc built without libbpf, build the prog twice: with the old-style definition and with the new-style definition, then select the right one at runtime. This could be done at compile time too, but this would not be cross-compilation friendly. Signed-off-by: Ilya Leoshkevich Link: https://lore.kernel.org/r/20230129190501.1624747-2-iii@linux.ibm.com Signed-off-by: Alexei Starovoitov Signed-off-by: Pu Lehui Signed-off-by: Greg Kroah-Hartman --- .../selftests/bpf/prog_tests/sk_assign.c | 25 ++++++++++++++----- .../selftests/bpf/progs/test_sk_assign.c | 11 ++++++++ .../bpf/progs/test_sk_assign_libbpf.c | 3 +++ 3 files changed, 33 insertions(+), 6 deletions(-) create mode 100644 tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c diff --git a/tools/testing/selftests/bpf/prog_tests/sk_assign.c b/tools/testing/selftests/bpf/prog_tests/sk_assign.c index 3a469099f30d..e09c5239a595 100644 --- a/tools/testing/selftests/bpf/prog_tests/sk_assign.c +++ b/tools/testing/selftests/bpf/prog_tests/sk_assign.c @@ -29,7 +29,23 @@ static int stop, duration; static bool configure_stack(void) { + char tc_version[128]; char tc_cmd[BUFSIZ]; + char *prog; + FILE *tc; + + /* Check whether tc is built with libbpf. */ + tc = popen("tc -V", "r"); + if (CHECK_FAIL(!tc)) + return false; + if (CHECK_FAIL(!fgets(tc_version, sizeof(tc_version), tc))) + return false; + if (strstr(tc_version, ", libbpf ")) + prog = "test_sk_assign_libbpf.o"; + else + prog = "test_sk_assign.o"; + if (CHECK_FAIL(pclose(tc))) + return false; /* Move to a new networking namespace */ if (CHECK_FAIL(unshare(CLONE_NEWNET))) @@ -46,8 +62,8 @@ configure_stack(void) /* Load qdisc, BPF program */ if (CHECK_FAIL(system("tc qdisc add dev lo clsact"))) return false; - sprintf(tc_cmd, "%s %s %s %s", "tc filter add dev lo ingress bpf", - "direct-action object-file ./test_sk_assign.o", + sprintf(tc_cmd, "%s %s %s %s %s", "tc filter add dev lo ingress bpf", + "direct-action object-file", prog, "section classifier/sk_assign_test", (env.verbosity < VERBOSE_VERY) ? " 2>/dev/null" : "verbose"); if (CHECK(system(tc_cmd), "BPF load failed;", @@ -129,15 +145,12 @@ get_port(int fd) static ssize_t rcv_msg(int srv_client, int type) { - struct sockaddr_storage ss; char buf[BUFSIZ]; - socklen_t slen; if (type == SOCK_STREAM) return read(srv_client, &buf, sizeof(buf)); else - return recvfrom(srv_client, &buf, sizeof(buf), 0, - (struct sockaddr *)&ss, &slen); + return recvfrom(srv_client, &buf, sizeof(buf), 0, NULL, NULL); } static int diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign.c b/tools/testing/selftests/bpf/progs/test_sk_assign.c index 1ecd987005d2..77fd42f835fc 100644 --- a/tools/testing/selftests/bpf/progs/test_sk_assign.c +++ b/tools/testing/selftests/bpf/progs/test_sk_assign.c @@ -16,6 +16,16 @@ #include #include +#if defined(IPROUTE2_HAVE_LIBBPF) +/* Use a new-style map definition. */ +struct { + __uint(type, BPF_MAP_TYPE_SOCKMAP); + __type(key, int); + __type(value, __u64); + __uint(pinning, LIBBPF_PIN_BY_NAME); + __uint(max_entries, 1); +} server_map SEC(".maps"); +#else /* Pin map under /sys/fs/bpf/tc/globals/ */ #define PIN_GLOBAL_NS 2 @@ -35,6 +45,7 @@ struct { .max_elem = 1, .pinning = PIN_GLOBAL_NS, }; +#endif int _version SEC("version") = 1; char _license[] SEC("license") = "GPL"; diff --git a/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c new file mode 100644 index 000000000000..dcf46adfda04 --- /dev/null +++ b/tools/testing/selftests/bpf/progs/test_sk_assign_libbpf.c @@ -0,0 +1,3 @@ +// SPDX-License-Identifier: GPL-2.0 +#define IPROUTE2_HAVE_LIBBPF +#include "test_sk_assign.c" From a7cedc2b76124eece96679214ab4639eaeee95a1 Mon Sep 17 00:00:00 2001 From: Aleksa Sarai Date: Sat, 12 Aug 2023 07:19:05 -0600 Subject: [PATCH 304/513] io_uring: correct check for O_TMPFILE Commit 72dbde0f2afbe4af8e8595a89c650ae6b9d9c36f upstream. O_TMPFILE is actually __O_TMPFILE|O_DIRECTORY. This means that the old check for whether RESOLVE_CACHED can be used would incorrectly think that O_DIRECTORY could not be used with RESOLVE_CACHED. Cc: stable@vger.kernel.org # v5.12+ Fixes: 3a81fd02045c ("io_uring: enable LOOKUP_CACHED path resolution for filename lookups") Signed-off-by: Aleksa Sarai Link: https://lore.kernel.org/r/20230807-resolve_cached-o_tmpfile-v3-1-e49323e1ef6f@cyphar.com Signed-off-by: Jens Axboe Signed-off-by: Greg Kroah-Hartman --- io_uring/io_uring.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c index 6f1d88bfd690..7c98a820c8dd 100644 --- a/io_uring/io_uring.c +++ b/io_uring/io_uring.c @@ -4375,9 +4375,11 @@ static int io_openat2(struct io_kiocb *req, unsigned int issue_flags) if (issue_flags & IO_URING_F_NONBLOCK) { /* * Don't bother trying for O_TRUNC, O_CREAT, or O_TMPFILE open, - * it'll always -EAGAIN + * it'll always -EAGAIN. Note that we test for __O_TMPFILE + * because O_TMPFILE includes O_DIRECTORY, which isn't a flag + * we need to force async for. */ - if (req->open.how.flags & (O_TRUNC | O_CREAT | O_TMPFILE)) + if (req->open.how.flags & (O_TRUNC | O_CREAT | __O_TMPFILE)) return -EAGAIN; op.lookup_flags |= LOOKUP_CACHED; op.open_flag |= O_NONBLOCK; From 2df8ae1e42b8e2a9cb78e99af22e7bf44d38e9ed Mon Sep 17 00:00:00 2001 From: Yiyuan Guo Date: Fri, 30 Jun 2023 22:37:19 +0800 Subject: [PATCH 305/513] iio: cros_ec: Fix the allocation size for cros_ec_command commit 8a4629055ef55177b5b63dab1ecce676bd8cccdd upstream. The struct cros_ec_command contains several integer fields and a trailing array. An allocation size neglecting the integer fields can lead to buffer overrun. Reviewed-by: Tzung-Bi Shih Signed-off-by: Yiyuan Guo Fixes: 974e6f02e27e ("iio: cros_ec_sensors_core: Add common functions for the ChromeOS EC Sensor Hub.") Link: https://lore.kernel.org/r/20230630143719.1513906-1-yguoaz@gmail.com Cc: Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c index b0c1dc8cc4c5..f529c01ac66b 100644 --- a/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c +++ b/drivers/iio/common/cros_ec_sensors/cros_ec_sensors_core.c @@ -259,7 +259,7 @@ int cros_ec_sensors_core_init(struct platform_device *pdev, platform_set_drvdata(pdev, indio_dev); state->ec = ec->ec_dev; - state->msg = devm_kzalloc(&pdev->dev, + state->msg = devm_kzalloc(&pdev->dev, sizeof(*state->msg) + max((u16)sizeof(struct ec_params_motion_sense), state->ec->max_response), GFP_KERNEL); if (!state->msg) From a8e2ae6296d56478fb98ae7f739846ed121f154f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Alvin=20=C5=A0ipraga?= Date: Mon, 19 Jun 2023 16:12:39 +0200 Subject: [PATCH 306/513] iio: adc: ina2xx: avoid NULL pointer dereference on OF device match MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit a41e19cc0d6b6a445a4133170b90271e4a2553dc upstream. The affected lines were resulting in a NULL pointer dereference on our platform because the device tree contained the following list of compatible strings: power-sensor@40 { compatible = "ti,ina232", "ti,ina231"; ... }; Since the driver doesn't declare a compatible string "ti,ina232", the OF matching succeeds on "ti,ina231". But the I2C device ID info is populated via the first compatible string, cf. modalias population in of_i2c_get_board_info(). Since there is no "ina232" entry in the legacy I2C device ID table either, the struct i2c_device_id *id pointer in the probe function is NULL. Fix this by using the already populated type variable instead, which points to the proper driver data. Since the name is also wanted, add a generic one to the ina2xx_config table. Signed-off-by: Alvin Šipraga Fixes: c43a102e67db ("iio: ina2xx: add support for TI INA2xx Power Monitors") Link: https://lore.kernel.org/r/20230619141239.2257392-1-alvin@pqrs.dk Cc: Signed-off-by: Jonathan Cameron Signed-off-by: Greg Kroah-Hartman --- drivers/iio/adc/ina2xx-adc.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c index a4b2ff9e0dd5..9403c2604066 100644 --- a/drivers/iio/adc/ina2xx-adc.c +++ b/drivers/iio/adc/ina2xx-adc.c @@ -124,6 +124,7 @@ static const struct regmap_config ina2xx_regmap_config = { enum ina2xx_ids { ina219, ina226 }; struct ina2xx_config { + const char *name; u16 config_default; int calibration_value; int shunt_voltage_lsb; /* nV */ @@ -155,6 +156,7 @@ struct ina2xx_chip_info { static const struct ina2xx_config ina2xx_config[] = { [ina219] = { + .name = "ina219", .config_default = INA219_CONFIG_DEFAULT, .calibration_value = 4096, .shunt_voltage_lsb = 10000, @@ -164,6 +166,7 @@ static const struct ina2xx_config ina2xx_config[] = { .chip_id = ina219, }, [ina226] = { + .name = "ina226", .config_default = INA226_CONFIG_DEFAULT, .calibration_value = 2048, .shunt_voltage_lsb = 2500, @@ -999,7 +1002,7 @@ static int ina2xx_probe(struct i2c_client *client, /* Patch the current config register with default. */ val = chip->config->config_default; - if (id->driver_data == ina226) { + if (type == ina226) { ina226_set_average(chip, INA226_DEFAULT_AVG, &val); ina226_set_int_time_vbus(chip, INA226_DEFAULT_IT, &val); ina226_set_int_time_vshunt(chip, INA226_DEFAULT_IT, &val); @@ -1018,7 +1021,7 @@ static int ina2xx_probe(struct i2c_client *client, } indio_dev->modes = INDIO_DIRECT_MODE; - if (id->driver_data == ina226) { + if (type == ina226) { indio_dev->channels = ina226_channels; indio_dev->num_channels = ARRAY_SIZE(ina226_channels); indio_dev->info = &ina226_info; @@ -1027,7 +1030,7 @@ static int ina2xx_probe(struct i2c_client *client, indio_dev->num_channels = ARRAY_SIZE(ina219_channels); indio_dev->info = &ina219_info; } - indio_dev->name = id->name; + indio_dev->name = id ? id->name : chip->config->name; ret = devm_iio_kfifo_buffer_setup(&client->dev, indio_dev, INDIO_BUFFER_SOFTWARE, From 03eebad96233397f951d8e9fafd82a1674a77284 Mon Sep 17 00:00:00 2001 From: Qi Zheng Date: Sun, 25 Jun 2023 15:49:37 +0000 Subject: [PATCH 307/513] binder: fix memory leak in binder_init() commit adb9743d6a08778b78d62d16b4230346d3508986 upstream. In binder_init(), the destruction of binder_alloc_shrinker_init() is not performed in the wrong path, which will cause memory leaks. So this commit introduces binder_alloc_shrinker_exit() and calls it in the wrong path to fix that. Signed-off-by: Qi Zheng Acked-by: Carlos Llamas Fixes: f2517eb76f1f ("android: binder: Add global lru shrinker to binder") Cc: stable Link: https://lore.kernel.org/r/20230625154937.64316-1-qi.zheng@linux.dev Signed-off-by: Greg Kroah-Hartman --- drivers/android/binder.c | 1 + drivers/android/binder_alloc.c | 6 ++++++ drivers/android/binder_alloc.h | 1 + 3 files changed, 8 insertions(+) diff --git a/drivers/android/binder.c b/drivers/android/binder.c index a4749b6c3d73..cbbed43baf05 100644 --- a/drivers/android/binder.c +++ b/drivers/android/binder.c @@ -6412,6 +6412,7 @@ static int __init binder_init(void) err_alloc_device_names_failed: debugfs_remove_recursive(binder_debugfs_dir_entry_root); + binder_alloc_shrinker_exit(); return ret; } diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index db01c5d423e6..54cee2b31c8e 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -1091,6 +1091,12 @@ int binder_alloc_shrinker_init(void) return ret; } +void binder_alloc_shrinker_exit(void) +{ + unregister_shrinker(&binder_shrinker); + list_lru_destroy(&binder_alloc_lru); +} + /** * check_buffer() - verify that buffer/offset is safe to access * @alloc: binder_alloc for this proc diff --git a/drivers/android/binder_alloc.h b/drivers/android/binder_alloc.h index 7dea57a84c79..399f2b269f2c 100644 --- a/drivers/android/binder_alloc.h +++ b/drivers/android/binder_alloc.h @@ -131,6 +131,7 @@ extern struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, int pid); extern void binder_alloc_init(struct binder_alloc *alloc); extern int binder_alloc_shrinker_init(void); +extern void binder_alloc_shrinker_exit(void); extern void binder_alloc_vma_close(struct binder_alloc *alloc); extern struct binder_buffer * binder_alloc_prepare_to_free(struct binder_alloc *alloc, From 945e1b3c361bdcdf5ec21f097cfc22a30bf0f716 Mon Sep 17 00:00:00 2001 From: Ricky WU Date: Tue, 25 Jul 2023 09:10:54 +0000 Subject: [PATCH 308/513] misc: rtsx: judge ASPM Mode to set PETXCFG Reg commit 101bd907b4244a726980ee67f95ed9cafab6ff7a upstream. ASPM Mode is ASPM_MODE_CFG need to judge the value of clkreq_0 to set HIGH or LOW, if the ASPM Mode is ASPM_MODE_REG always set to HIGH during the initialization. Cc: stable@vger.kernel.org Signed-off-by: Ricky Wu Link: https://lore.kernel.org/r/52906c6836374c8cb068225954c5543a@realtek.com Signed-off-by: Greg Kroah-Hartman --- drivers/misc/cardreader/rts5227.c | 2 +- drivers/misc/cardreader/rts5228.c | 18 ------------------ drivers/misc/cardreader/rts5249.c | 3 +-- drivers/misc/cardreader/rts5260.c | 18 ------------------ drivers/misc/cardreader/rts5261.c | 18 ------------------ drivers/misc/cardreader/rtsx_pcr.c | 5 ++++- 6 files changed, 6 insertions(+), 58 deletions(-) diff --git a/drivers/misc/cardreader/rts5227.c b/drivers/misc/cardreader/rts5227.c index 4bcfbc9afbac..0f106d700625 100644 --- a/drivers/misc/cardreader/rts5227.c +++ b/drivers/misc/cardreader/rts5227.c @@ -171,7 +171,7 @@ static int rts5227_extra_init_hw(struct rtsx_pcr *pcr) else rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, 0x30, 0x00); - if (option->force_clkreq_0) + if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG) rtsx_pci_add_cmd(pcr, WRITE_REG_CMD, PETXCFG, FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); else diff --git a/drivers/misc/cardreader/rts5228.c b/drivers/misc/cardreader/rts5228.c index ffc128278613..282a03520cf5 100644 --- a/drivers/misc/cardreader/rts5228.c +++ b/drivers/misc/cardreader/rts5228.c @@ -427,17 +427,10 @@ static void rts5228_init_from_cfg(struct rtsx_pcr *pcr) option->ltr_enabled = false; } } - - if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN - | PM_L1_1_EN | PM_L1_2_EN)) - option->force_clkreq_0 = false; - else - option->force_clkreq_0 = true; } static int rts5228_extra_init_hw(struct rtsx_pcr *pcr) { - struct rtsx_cr_option *option = &pcr->option; rtsx_pci_write_register(pcr, RTS5228_AUTOLOAD_CFG1, CD_RESUME_EN_MASK, CD_RESUME_EN_MASK); @@ -468,17 +461,6 @@ static int rts5228_extra_init_hw(struct rtsx_pcr *pcr) else rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00); - /* - * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced - * to drive low, and we forcibly request clock. - */ - if (option->force_clkreq_0) - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); - else - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); - rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB); rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00); rtsx_pci_write_register(pcr, RTS5228_REG_PME_FORCE_CTL, diff --git a/drivers/misc/cardreader/rts5249.c b/drivers/misc/cardreader/rts5249.c index 53f3a1f45c4a..6b5e4bdf209d 100644 --- a/drivers/misc/cardreader/rts5249.c +++ b/drivers/misc/cardreader/rts5249.c @@ -302,12 +302,11 @@ static int rts5249_extra_init_hw(struct rtsx_pcr *pcr) } } - /* * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced * to drive low, and we forcibly request clock. */ - if (option->force_clkreq_0) + if (option->force_clkreq_0 && pcr->aspm_mode == ASPM_MODE_CFG) rtsx_pci_write_register(pcr, PETXCFG, FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); else diff --git a/drivers/misc/cardreader/rts5260.c b/drivers/misc/cardreader/rts5260.c index 9b42b20a3e5a..79b18f6f73a8 100644 --- a/drivers/misc/cardreader/rts5260.c +++ b/drivers/misc/cardreader/rts5260.c @@ -517,17 +517,10 @@ static void rts5260_init_from_cfg(struct rtsx_pcr *pcr) option->ltr_enabled = false; } } - - if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN - | PM_L1_1_EN | PM_L1_2_EN)) - option->force_clkreq_0 = false; - else - option->force_clkreq_0 = true; } static int rts5260_extra_init_hw(struct rtsx_pcr *pcr) { - struct rtsx_cr_option *option = &pcr->option; /* Set mcu_cnt to 7 to ensure data can be sampled properly */ rtsx_pci_write_register(pcr, 0xFC03, 0x7F, 0x07); @@ -546,17 +539,6 @@ static int rts5260_extra_init_hw(struct rtsx_pcr *pcr) rts5260_init_hw(pcr); - /* - * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced - * to drive low, and we forcibly request clock. - */ - if (option->force_clkreq_0) - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); - else - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); - rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00); return 0; diff --git a/drivers/misc/cardreader/rts5261.c b/drivers/misc/cardreader/rts5261.c index 1fd4e0e50730..2a97eeb0e509 100644 --- a/drivers/misc/cardreader/rts5261.c +++ b/drivers/misc/cardreader/rts5261.c @@ -468,17 +468,10 @@ static void rts5261_init_from_cfg(struct rtsx_pcr *pcr) option->ltr_enabled = false; } } - - if (rtsx_check_dev_flag(pcr, ASPM_L1_1_EN | ASPM_L1_2_EN - | PM_L1_1_EN | PM_L1_2_EN)) - option->force_clkreq_0 = false; - else - option->force_clkreq_0 = true; } static int rts5261_extra_init_hw(struct rtsx_pcr *pcr) { - struct rtsx_cr_option *option = &pcr->option; u32 val; rtsx_pci_write_register(pcr, RTS5261_AUTOLOAD_CFG1, @@ -524,17 +517,6 @@ static int rts5261_extra_init_hw(struct rtsx_pcr *pcr) else rtsx_pci_write_register(pcr, PETXCFG, 0x30, 0x00); - /* - * If u_force_clkreq_0 is enabled, CLKREQ# PIN will be forced - * to drive low, and we forcibly request clock. - */ - if (option->force_clkreq_0) - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_LOW); - else - rtsx_pci_write_register(pcr, PETXCFG, - FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); - rtsx_pci_write_register(pcr, PWD_SUSPEND_EN, 0xFF, 0xFB); rtsx_pci_write_register(pcr, pcr->reg_pm_ctrl3, 0x10, 0x00); rtsx_pci_write_register(pcr, RTS5261_REG_PME_FORCE_CTL, diff --git a/drivers/misc/cardreader/rtsx_pcr.c b/drivers/misc/cardreader/rtsx_pcr.c index 62fdbbd55e74..c0bf747305e2 100644 --- a/drivers/misc/cardreader/rtsx_pcr.c +++ b/drivers/misc/cardreader/rtsx_pcr.c @@ -1400,8 +1400,11 @@ static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) return err; } - if (pcr->aspm_mode == ASPM_MODE_REG) + if (pcr->aspm_mode == ASPM_MODE_REG) { rtsx_pci_write_register(pcr, ASPM_FORCE_CTL, 0x30, 0x30); + rtsx_pci_write_register(pcr, PETXCFG, + FORCE_CLKREQ_DELINK_MASK, FORCE_CLKREQ_HIGH); + } /* No CD interrupt if probing driver with card inserted. * So we need to initialize pcr->card_exist here. From 7a11d1e2625bdb2346f6586773b20b20977278ac Mon Sep 17 00:00:00 2001 From: Alan Stern Date: Wed, 2 Aug 2023 13:49:02 -0400 Subject: [PATCH 309/513] usb-storage: alauda: Fix uninit-value in alauda_check_media() commit a6ff6e7a9dd69364547751db0f626a10a6d628d2 upstream. Syzbot got KMSAN to complain about access to an uninitialized value in the alauda subdriver of usb-storage: BUG: KMSAN: uninit-value in alauda_transport+0x462/0x57f0 drivers/usb/storage/alauda.c:1137 CPU: 0 PID: 12279 Comm: usb-storage Not tainted 5.3.0-rc7+ #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:77 [inline] dump_stack+0x191/0x1f0 lib/dump_stack.c:113 kmsan_report+0x13a/0x2b0 mm/kmsan/kmsan_report.c:108 __msan_warning+0x73/0xe0 mm/kmsan/kmsan_instr.c:250 alauda_check_media+0x344/0x3310 drivers/usb/storage/alauda.c:460 The problem is that alauda_check_media() doesn't verify that its USB transfer succeeded before trying to use the received data. What should happen if the transfer fails isn't entirely clear, but a reasonably conservative approach is to pretend that no media is present. A similar problem exists in a usb_stor_dbg() call in alauda_get_media_status(). In this case, when an error occurs the call is redundant, because usb_stor_ctrl_transfer() already will print a debugging message. Finally, unrelated to the uninitialized memory access, is the fact that alauda_check_media() performs DMA to a buffer on the stack. Fortunately usb-storage provides a general purpose DMA-able buffer for uses like this. We'll use it instead. Reported-and-tested-by: syzbot+e7d46eb426883fb97efd@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/0000000000007d25ff059457342d@google.com/T/ Suggested-by: Christophe JAILLET Signed-off-by: Alan Stern Fixes: e80b0fade09e ("[PATCH] USB Storage: add alauda support") Cc: Link: https://lore.kernel.org/r/693d5d5e-f09b-42d0-8ed9-1f96cd30bcce@rowland.harvard.edu Signed-off-by: Greg Kroah-Hartman --- drivers/usb/storage/alauda.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/drivers/usb/storage/alauda.c b/drivers/usb/storage/alauda.c index 7e4ce0e7e05a..dcc4778d1ae9 100644 --- a/drivers/usb/storage/alauda.c +++ b/drivers/usb/storage/alauda.c @@ -318,7 +318,8 @@ static int alauda_get_media_status(struct us_data *us, unsigned char *data) rc = usb_stor_ctrl_transfer(us, us->recv_ctrl_pipe, command, 0xc0, 0, 1, data, 2); - usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]); + if (rc == USB_STOR_XFER_GOOD) + usb_stor_dbg(us, "Media status %02X %02X\n", data[0], data[1]); return rc; } @@ -454,9 +455,14 @@ static int alauda_init_media(struct us_data *us) static int alauda_check_media(struct us_data *us) { struct alauda_info *info = (struct alauda_info *) us->extra; - unsigned char status[2]; + unsigned char *status = us->iobuf; + int rc; - alauda_get_media_status(us, status); + rc = alauda_get_media_status(us, status); + if (rc != USB_STOR_XFER_GOOD) { + status[0] = 0xF0; /* Pretend there's no media */ + status[1] = 0; + } /* Check for no media or door open */ if ((status[0] & 0x80) || ((status[0] & 0x1F) == 0x10) From 00cc14b52d6fcf35726f704aa2bf1fb36410cbba Mon Sep 17 00:00:00 2001 From: Elson Roy Serrao Date: Tue, 1 Aug 2023 12:26:58 -0700 Subject: [PATCH 310/513] usb: dwc3: Properly handle processing of pending events commit 3ddaa6a274578e23745b7466346fc2650df8f959 upstream. If dwc3 is runtime suspended we defer processing the event buffer until resume, by setting the pending_events flag. Set this flag before triggering resume to avoid race with the runtime resume callback. While handling the pending events, in addition to checking the event buffer we also need to process it. Handle this by explicitly calling dwc3_thread_interrupt(). Also balance the runtime pm get() operation that triggered this processing. Cc: stable@vger.kernel.org Fixes: fc8bb91bc83e ("usb: dwc3: implement runtime PM") Signed-off-by: Elson Roy Serrao Acked-by: Thinh Nguyen Reviewed-by: Roger Quadros Link: https://lore.kernel.org/r/20230801192658.19275-1-quic_eserrao@quicinc.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/dwc3/gadget.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 845c6041b385..d76a4837615d 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -4204,9 +4204,14 @@ static irqreturn_t dwc3_check_event_buf(struct dwc3_event_buffer *evt) u32 reg; if (pm_runtime_suspended(dwc->dev)) { + dwc->pending_events = true; + /* + * Trigger runtime resume. The get() function will be balanced + * after processing the pending events in dwc3_process_pending + * events(). + */ pm_runtime_get(dwc->dev); disable_irq_nosync(dwc->irq_gadget); - dwc->pending_events = true; return IRQ_HANDLED; } @@ -4470,6 +4475,8 @@ void dwc3_gadget_process_pending_events(struct dwc3 *dwc) { if (dwc->pending_events) { dwc3_interrupt(dwc->irq_gadget, dwc->ev_buf); + dwc3_thread_interrupt(dwc->irq_gadget, dwc->ev_buf); + pm_runtime_put(dwc->dev); dwc->pending_events = false; enable_irq(dwc->irq_gadget); } From f776b94ccdf033f97937ce18434dc2af56f6269a Mon Sep 17 00:00:00 2001 From: Prashanth K Date: Tue, 1 Aug 2023 14:33:52 +0530 Subject: [PATCH 311/513] usb: common: usb-conn-gpio: Prevent bailing out if initial role is none commit 8e21a620c7e6e00347ade1a6ed4967b359eada5a upstream. Currently if we bootup a device without cable connected, then usb-conn-gpio won't call set_role() because last_role is same as current role. This happens since last_role gets initialised to zero during the probe. To avoid this, add a new flag initial_detection into struct usb_conn_info, which prevents bailing out during initial detection. Cc: # 5.4 Fixes: 4602f3bff266 ("usb: common: add USB GPIO based connection detection driver") Signed-off-by: Prashanth K Tested-by: AngeloGioacchino Del Regno Reviewed-by: Heikki Krogerus Link: https://lore.kernel.org/r/1690880632-12588-1-git-send-email-quic_prashk@quicinc.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/common/usb-conn-gpio.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c index 0158148cb054..521c95935d4c 100644 --- a/drivers/usb/common/usb-conn-gpio.c +++ b/drivers/usb/common/usb-conn-gpio.c @@ -42,6 +42,7 @@ struct usb_conn_info { struct power_supply_desc desc; struct power_supply *charger; + bool initial_detection; }; /* @@ -86,11 +87,13 @@ static void usb_conn_detect_cable(struct work_struct *work) dev_dbg(info->dev, "role %s -> %s, gpios: id %d, vbus %d\n", usb_role_string(info->last_role), usb_role_string(role), id, vbus); - if (info->last_role == role) { + if (!info->initial_detection && info->last_role == role) { dev_warn(info->dev, "repeated role: %s\n", usb_role_string(role)); return; } + info->initial_detection = false; + if (info->last_role == USB_ROLE_HOST && info->vbus) regulator_disable(info->vbus); @@ -273,6 +276,7 @@ static int usb_conn_probe(struct platform_device *pdev) platform_set_drvdata(pdev, info); /* Perform initial detection */ + info->initial_detection = true; usb_conn_queue_dwork(info, 0); return 0; From c2372b1559d4b5510f44feb885be3533f9919fe0 Mon Sep 17 00:00:00 2001 From: Badhri Jagan Sridharan Date: Wed, 12 Jul 2023 08:57:22 +0000 Subject: [PATCH 312/513] usb: typec: tcpm: Fix response to vsafe0V event commit 4270d2b4845e820b274702bfc2a7140f69e4d19d upstream. Do not transition to SNK_UNATTACHED state when receiving vsafe0v event while in SNK_HARD_RESET_WAIT_VBUS. Ignore VBUS off events as well as in some platforms VBUS off can be signalled more than once. [143515.364753] Requesting mux state 1, usb-role 2, orientation 2 [143515.365520] pending state change SNK_HARD_RESET_SINK_OFF -> SNK_HARD_RESET_SINK_ON @ 650 ms [rev3 HARD_RESET] [143515.632281] CC1: 0 -> 0, CC2: 3 -> 0 [state SNK_HARD_RESET_SINK_OFF, polarity 1, disconnected] [143515.637214] VBUS on [143515.664985] VBUS off [143515.664992] state change SNK_HARD_RESET_SINK_OFF -> SNK_HARD_RESET_WAIT_VBUS [rev3 HARD_RESET] [143515.665564] VBUS VSAFE0V [143515.665566] state change SNK_HARD_RESET_WAIT_VBUS -> SNK_UNATTACHED [rev3 HARD_RESET] Fixes: 28b43d3d746b ("usb: typec: tcpm: Introduce vsafe0v for vbus") Cc: Signed-off-by: Badhri Jagan Sridharan Acked-by: Heikki Krogerus Link: https://lore.kernel.org/r/20230712085722.1414743-1-badhri@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/typec/tcpm/tcpm.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index c6e5991b3868..f7e99757a50e 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -5246,6 +5246,10 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port) /* Do nothing, vbus drop expected */ break; + case SNK_HARD_RESET_WAIT_VBUS: + /* Do nothing, its OK to receive vbus off events */ + break; + default: if (port->pwr_role == TYPEC_SINK && port->attached) tcpm_set_state(port, SNK_UNATTACHED, tcpm_wait_for_discharge(port)); @@ -5292,6 +5296,9 @@ static void _tcpm_pd_vbus_vsafe0v(struct tcpm_port *port) case SNK_DEBOUNCED: /*Do nothing, still waiting for VSAFE5V for connect */ break; + case SNK_HARD_RESET_WAIT_VBUS: + /* Do nothing, its OK to receive vbus off events */ + break; default: if (port->pwr_role == TYPEC_SINK && port->auto_vbus_discharge_enabled) tcpm_set_state(port, SNK_UNATTACHED, 0); From c41a22b93d7c9286da4acb34d45bd305cd307ebb Mon Sep 17 00:00:00 2001 From: Nick Desaulniers Date: Wed, 9 Aug 2023 09:40:26 -0700 Subject: [PATCH 313/513] x86/srso: Fix build breakage with the LLVM linker commit cbe8ded48b939b9d55d2c5589ab56caa7b530709 upstream. The assertion added to verify the difference in bits set of the addresses of srso_untrain_ret_alias() and srso_safe_ret_alias() would fail to link in LLVM's ld.lld linker with the following error: ld.lld: error: ./arch/x86/kernel/vmlinux.lds:210: at least one side of the expression must be absolute ld.lld: error: ./arch/x86/kernel/vmlinux.lds:211: at least one side of the expression must be absolute Use ABSOLUTE to evaluate the expression referring to at least one of the symbols so that LLD can evaluate the linker script. Also, add linker version info to the comment about XOR being unsupported in either ld.bfd or ld.lld until somewhat recently. Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Closes: https://lore.kernel.org/llvm/CA+G9fYsdUeNu-gwbs0+T6XHi4hYYk=Y9725-wFhZ7gJMspLDRA@mail.gmail.com/ Reported-by: Nathan Chancellor Reported-by: Daniel Kolesa Reported-by: Naresh Kamboju Suggested-by: Sven Volkinsfeld Signed-off-by: Nick Desaulniers Signed-off-by: Borislav Petkov (AMD) Link: https://github.com/ClangBuiltLinux/linux/issues/1907 Link: https://lore.kernel.org/r/20230809-gds-v1-1-eaac90b0cbcc@google.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/vmlinux.lds.S | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 50aaf0cd8f46..7f3fa72a236a 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -518,11 +518,17 @@ INIT_PER_CPU(irq_stack_backing_store); #ifdef CONFIG_CPU_SRSO /* - * GNU ld cannot do XOR so do: (A | B) - (A & B) in order to compute the XOR + * GNU ld cannot do XOR until 2.41. + * https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=f6f78318fca803c4907fb8d7f6ded8295f1947b1 + * + * LLVM lld cannot do XOR until lld-17. + * https://github.com/llvm/llvm-project/commit/fae96104d4378166cbe5c875ef8ed808a356f3fb + * + * Instead do: (A | B) - (A & B) in order to compute the XOR * of the two function addresses: */ -. = ASSERT(((srso_untrain_ret_alias | srso_safe_ret_alias) - - (srso_untrain_ret_alias & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), +. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) - + (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), "SRSO function pair won't alias"); #endif From 829409510d0036f7707c72cd676a60479aafc3a8 Mon Sep 17 00:00:00 2001 From: Cristian Ciocaltea Date: Fri, 11 Aug 2023 23:37:05 +0300 Subject: [PATCH 314/513] x86/cpu/amd: Enable Zenbleed fix for AMD Custom APU 0405 commit 6dbef74aeb090d6bee7d64ef3fa82ae6fa53f271 upstream. Commit 522b1d69219d ("x86/cpu/amd: Add a Zenbleed fix") provided a fix for the Zen2 VZEROUPPER data corruption bug affecting a range of CPU models, but the AMD Custom APU 0405 found on SteamDeck was not listed, although it is clearly affected by the vulnerability. Add this CPU variant to the Zenbleed erratum list, in order to unconditionally enable the fallback fix until a proper microcode update is available. Fixes: 522b1d69219d ("x86/cpu/amd: Add a Zenbleed fix") Signed-off-by: Cristian Ciocaltea Signed-off-by: Borislav Petkov (AMD) Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230811203705.1699914-1-cristian.ciocaltea@collabora.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/amd.c | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 77f4dfb0662e..0ca7123417ab 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -73,6 +73,7 @@ static const int amd_erratum_1054[] = static const int amd_zenbleed[] = AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x17, 0x30, 0x0, 0x4f, 0xf), AMD_MODEL_RANGE(0x17, 0x60, 0x0, 0x7f, 0xf), + AMD_MODEL_RANGE(0x17, 0x90, 0x0, 0x91, 0xf), AMD_MODEL_RANGE(0x17, 0xa0, 0x0, 0xaf, 0xf)); static const int amd_div0[] = From 9290ef14c96b9f924ea50d8c0b9c070801a226fa Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 3 Aug 2023 18:16:09 +0300 Subject: [PATCH 315/513] x86/mm: Fix VDSO and VVAR placement on 5-level paging machines commit 1b8b1aa90c9c0e825b181b98b8d9e249dc395470 upstream. Yingcong has noticed that on the 5-level paging machine, VDSO and VVAR VMAs are placed above the 47-bit border: 8000001a9000-8000001ad000 r--p 00000000 00:00 0 [vvar] 8000001ad000-8000001af000 r-xp 00000000 00:00 0 [vdso] This might confuse users who are not aware of 5-level paging and expect all userspace addresses to be under the 47-bit border. So far problem has only been triggered with ASLR disabled, although it may also occur with ASLR enabled if the layout is randomized in a just right way. The problem happens due to custom placement for the VMAs in the VDSO code: vdso_addr() tries to place them above the stack and checks the result against TASK_SIZE_MAX, which is wrong. TASK_SIZE_MAX is set to the 56-bit border on 5-level paging machines. Use DEFAULT_MAP_WINDOW instead. Fixes: b569bab78d8d ("x86/mm: Prepare to expose larger address space to userspace") Reported-by: Yingcong Wu Signed-off-by: Kirill A. Shutemov Signed-off-by: Dave Hansen Cc: stable@vger.kernel.org Link: https://lore.kernel.org/all/20230803151609.22141-1-kirill.shutemov%40linux.intel.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/entry/vdso/vma.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c index 1000d457c332..a380f7ecdd54 100644 --- a/arch/x86/entry/vdso/vma.c +++ b/arch/x86/entry/vdso/vma.c @@ -322,8 +322,8 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) /* Round the lowest possible end address up to a PMD boundary. */ end = (start + len + PMD_SIZE - 1) & PMD_MASK; - if (end >= TASK_SIZE_MAX) - end = TASK_SIZE_MAX; + if (end >= DEFAULT_MAP_WINDOW) + end = DEFAULT_MAP_WINDOW; end -= len; if (end > start) { From f919cbc904410764431c1e733fe78835811bbcd0 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 9 Aug 2023 15:04:59 +0200 Subject: [PATCH 316/513] x86/speculation: Add cpu_show_gds() prototype commit a57c27c7ad85c420b7de44c6ee56692d51709dda upstream. The newly added function has two definitions but no prototypes: drivers/base/cpu.c:605:16: error: no previous prototype for 'cpu_show_gds' [-Werror=missing-prototypes] Add a declaration next to the other ones for this file to avoid the warning. Fixes: 8974eb588283b ("x86/speculation: Add Gather Data Sampling mitigation") Signed-off-by: Arnd Bergmann Signed-off-by: Dave Hansen Tested-by: Daniel Sneddon Cc: stable@kernel.org Link: https://lore.kernel.org/all/20230809130530.1913368-1-arnd%40kernel.org Signed-off-by: Greg Kroah-Hartman --- include/linux/cpu.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/cpu.h b/include/linux/cpu.h index d4c860de9a6a..caf3b95017bf 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -72,6 +72,8 @@ extern ssize_t cpu_show_retbleed(struct device *dev, struct device_attribute *attr, char *buf); extern ssize_t cpu_show_spec_rstack_overflow(struct device *dev, struct device_attribute *attr, char *buf); +extern ssize_t cpu_show_gds(struct device *dev, + struct device_attribute *attr, char *buf); extern __printf(4, 5) struct device *cpu_device_create(struct device *parent, void *drvdata, From 4c6767c8bf5e41751bcb3d63ffaee64124a2d506 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 9 Aug 2023 15:05:00 +0200 Subject: [PATCH 317/513] x86: Move gds_ucode_mitigated() declaration to header commit eb3515dc99c7c85f4170b50838136b2a193f8012 upstream. The declaration got placed in the .c file of the caller, but that causes a warning for the definition: arch/x86/kernel/cpu/bugs.c:682:6: error: no previous prototype for 'gds_ucode_mitigated' [-Werror=missing-prototypes] Move it to a header where both sides can observe it instead. Fixes: 81ac7e5d74174 ("KVM: Add GDS_NO support to KVM") Signed-off-by: Arnd Bergmann Signed-off-by: Dave Hansen Tested-by: Daniel Sneddon Cc: stable@kernel.org Link: https://lore.kernel.org/all/20230809130530.1913368-2-arnd%40kernel.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/processor.h | 2 ++ arch/x86/kvm/x86.c | 2 -- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h index aec714ea8230..bbbf27cfe701 100644 --- a/arch/x86/include/asm/processor.h +++ b/arch/x86/include/asm/processor.h @@ -859,4 +859,6 @@ enum mds_mitigations { MDS_MITIGATION_VMWERV, }; +extern bool gds_ucode_mitigated(void); + #endif /* _ASM_X86_PROCESSOR_H */ diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 2686c4dcdb1a..a26200c3e82b 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -303,8 +303,6 @@ static struct kmem_cache *x86_fpu_cache; static struct kmem_cache *x86_emulator_cache; -extern bool gds_ucode_mitigated(void); - /* * When called, it means the previous get/set msr reached an invalid msr. * Return true if we want to ignore/silent this failed msr access. From b8b8db5857d4e2fda5ef38b2d9c4586ac54ae7ea Mon Sep 17 00:00:00 2001 From: Karol Herbst Date: Sat, 5 Aug 2023 12:18:13 +0200 Subject: [PATCH 318/513] drm/nouveau/disp: Revert a NULL check inside nouveau_connector_get_modes commit d5712cd22b9cf109fded1b7f178f4c1888c8b84b upstream. The original commit adding that check tried to protect the kenrel against a potential invalid NULL pointer access. However we call nouveau_connector_detect_depth once without a native_mode set on purpose for non LVDS connectors and this broke DP support in a few cases. Cc: Olaf Skibbe Cc: Lyude Paul Closes: https://gitlab.freedesktop.org/drm/nouveau/-/issues/238 Closes: https://gitlab.freedesktop.org/drm/nouveau/-/issues/245 Fixes: 20a2ce87fbaf8 ("drm/nouveau/dp: check for NULL nv_connector->native_mode") Signed-off-by: Karol Herbst Reviewed-by: Lyude Paul Link: https://patchwork.freedesktop.org/patch/msgid/20230805101813.2603989-1-kherbst@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/nouveau/nouveau_connector.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c index 7f8607b97707..fe6c650d23ce 100644 --- a/drivers/gpu/drm/nouveau/nouveau_connector.c +++ b/drivers/gpu/drm/nouveau/nouveau_connector.c @@ -966,7 +966,7 @@ nouveau_connector_get_modes(struct drm_connector *connector) /* Determine display colour depth for everything except LVDS now, * DP requires this before mode_valid() is called. */ - if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS && nv_connector->native_mode) + if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) nouveau_connector_detect_depth(connector); /* Find the native mode if this is a digital panel, if we didn't From b973eb76dff3f84d71ab4634a42f281e8cc4aa60 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 4 Aug 2023 20:22:11 +0100 Subject: [PATCH 319/513] selftests/rseq: Fix build with undefined __weak commit d5ad9aae13dcced333c1a7816ff0a4fbbb052466 upstream. Commit 3bcbc20942db ("selftests/rseq: Play nice with binaries statically linked against glibc 2.35+") which is now in Linus' tree introduced uses of __weak but did nothing to ensure that a definition is provided for it resulting in build failures for the rseq tests: rseq.c:41:1: error: unknown type name '__weak' __weak ptrdiff_t __rseq_offset; ^ rseq.c:41:17: error: expected ';' after top level declarator __weak ptrdiff_t __rseq_offset; ^ ; rseq.c:42:1: error: unknown type name '__weak' __weak unsigned int __rseq_size; ^ rseq.c:43:1: error: unknown type name '__weak' __weak unsigned int __rseq_flags; Fix this by using the definition from tools/include compiler.h. Fixes: 3bcbc20942db ("selftests/rseq: Play nice with binaries statically linked against glibc 2.35+") Signed-off-by: Mark Brown Message-Id: <20230804-kselftest-rseq-build-v1-1-015830b66aa9@kernel.org> Signed-off-by: Paolo Bonzini Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/rseq/Makefile | 4 +++- tools/testing/selftests/rseq/rseq.c | 2 ++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile index 215e1067f037..82ceca6aab96 100644 --- a/tools/testing/selftests/rseq/Makefile +++ b/tools/testing/selftests/rseq/Makefile @@ -4,8 +4,10 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),) CLANG_FLAGS += -no-integrated-as endif +top_srcdir = ../../../.. + CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L$(OUTPUT) -Wl,-rpath=./ \ - $(CLANG_FLAGS) + $(CLANG_FLAGS) -I$(top_srcdir)/tools/include LDLIBS += -lpthread -ldl # Own dependencies because we only want to build against 1st prerequisite, but diff --git a/tools/testing/selftests/rseq/rseq.c b/tools/testing/selftests/rseq/rseq.c index b736a5169aad..e20191fb40d4 100644 --- a/tools/testing/selftests/rseq/rseq.c +++ b/tools/testing/selftests/rseq/rseq.c @@ -29,6 +29,8 @@ #include #include +#include + #include "../kselftest.h" #include "rseq.h" From b9dfb80d9fb2cd6fe50a02c3bafc9fd8c4f913fc Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 8 Aug 2023 17:14:52 +0300 Subject: [PATCH 320/513] selftests: forwarding: Add a helper to skip test when using veth pairs commit 66e131861ab7bf754b50813216f5c6885cd32d63 upstream. A handful of tests require physical loopbacks to be used instead of veth pairs. Add a helper that these tests will invoke in order to be skipped when executed with veth pairs. Fixes: 64916b57c0b1 ("selftests: forwarding: Add speed and auto-negotiation test") Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Tested-by: Mirsad Todorovac Reviewed-by: Hangbin Liu Acked-by: Nikolay Aleksandrov Link: https://lore.kernel.org/r/20230808141503.4060661-7-idosch@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/net/forwarding/lib.sh | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index b7d946cf14eb..ea776ceba9b6 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -122,6 +122,17 @@ check_ethtool_lanes_support() fi } +skip_on_veth() +{ + local kind=$(ip -j -d link show dev ${NETIFS[p1]} | + jq -r '.[].linkinfo.info_kind') + + if [[ $kind == veth ]]; then + echo "SKIP: Test cannot be run with veth pairs" + exit $ksft_skip + fi +} + if [[ "$(id -u)" -ne 0 ]]; then echo "SKIP: need root privileges" exit $ksft_skip From b8d216e9c607d8700489bffdd7f95b42f5a23389 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 8 Aug 2023 17:14:53 +0300 Subject: [PATCH 321/513] selftests: forwarding: ethtool: Skip when using veth pairs commit 60a36e21915c31c0375d9427be9406aa8ce2ec34 upstream. Auto-negotiation cannot be tested with veth pairs, resulting in failures: # ./ethtool.sh TEST: force of same speed autoneg off [FAIL] error in configuration. swp1 speed Not autoneg off [...] Fix by skipping the test when used with veth pairs. Fixes: 64916b57c0b1 ("selftests: forwarding: Add speed and auto-negotiation test") Reported-by: Mirsad Todorovac Closes: https://lore.kernel.org/netdev/adc5e40d-d040-a65e-eb26-edf47dac5b02@alu.unizg.hr/ Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Tested-by: Mirsad Todorovac Reviewed-by: Hangbin Liu Acked-by: Nikolay Aleksandrov Link: https://lore.kernel.org/r/20230808141503.4060661-8-idosch@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/net/forwarding/ethtool.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/selftests/net/forwarding/ethtool.sh b/tools/testing/selftests/net/forwarding/ethtool.sh index dbb9fcf759e0..aa2eafb7b243 100755 --- a/tools/testing/selftests/net/forwarding/ethtool.sh +++ b/tools/testing/selftests/net/forwarding/ethtool.sh @@ -286,6 +286,8 @@ different_speeds_autoneg_on() ethtool -s $h1 autoneg on } +skip_on_veth + trap cleanup EXIT setup_prepare From 4a449945262019a607ed58246f41a86de3707367 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 8 Aug 2023 17:14:54 +0300 Subject: [PATCH 322/513] selftests: forwarding: ethtool_extended_state: Skip when using veth pairs commit b3d9305e60d121dac20a77b6847c4cf14a4c0001 upstream. Ethtool extended state cannot be tested with veth pairs, resulting in failures: # ./ethtool_extended_state.sh TEST: Autoneg, No partner detected [FAIL] Expected "Autoneg", got "Link detected: no" [...] Fix by skipping the test when used with veth pairs. Fixes: 7d10bcce98cd ("selftests: forwarding: Add tests for ethtool extended state") Reported-by: Mirsad Todorovac Closes: https://lore.kernel.org/netdev/adc5e40d-d040-a65e-eb26-edf47dac5b02@alu.unizg.hr/ Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Tested-by: Mirsad Todorovac Reviewed-by: Hangbin Liu Acked-by: Nikolay Aleksandrov Link: https://lore.kernel.org/r/20230808141503.4060661-9-idosch@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- .../testing/selftests/net/forwarding/ethtool_extended_state.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh index 4b42dfd4efd1..baf831da5366 100755 --- a/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh +++ b/tools/testing/selftests/net/forwarding/ethtool_extended_state.sh @@ -95,6 +95,8 @@ no_cable() ip link set dev $swp3 down } +skip_on_veth + setup_prepare tests_run From e410f85ebca9e86350bb0391128f07ff7db9de8c Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 8 Aug 2023 17:14:47 +0300 Subject: [PATCH 323/513] selftests: forwarding: Skip test when no interfaces are specified commit d72c83b1e4b4a36a38269c77a85ff52f95eb0d08 upstream. As explained in [1], the forwarding selftests are meant to be run with either physical loopbacks or veth pairs. The interfaces are expected to be specified in a user-provided forwarding.config file or as command line arguments. By default, this file is not present and the tests fail: # make -C tools/testing/selftests TARGETS=net/forwarding run_tests [...] TAP version 13 1..102 # timeout set to 45 # selftests: net/forwarding: bridge_igmp.sh # Command line is not complete. Try option "help" # Failed to create netif not ok 1 selftests: net/forwarding: bridge_igmp.sh # exit=1 [...] Fix by skipping a test if interfaces are not provided either via the configuration file or command line arguments. # make -C tools/testing/selftests TARGETS=net/forwarding run_tests [...] TAP version 13 1..102 # timeout set to 45 # selftests: net/forwarding: bridge_igmp.sh # SKIP: Cannot create interface. Name not specified ok 1 selftests: net/forwarding: bridge_igmp.sh # SKIP [1] tools/testing/selftests/net/forwarding/README Fixes: 81573b18f26d ("selftests/net/forwarding: add Makefile to install tests") Reported-by: Mirsad Todorovac Closes: https://lore.kernel.org/netdev/856d454e-f83c-20cf-e166-6dc06cbc1543@alu.unizg.hr/ Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Tested-by: Mirsad Todorovac Reviewed-by: Hangbin Liu Acked-by: Nikolay Aleksandrov Link: https://lore.kernel.org/r/20230808141503.4060661-2-idosch@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/net/forwarding/lib.sh | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tools/testing/selftests/net/forwarding/lib.sh b/tools/testing/selftests/net/forwarding/lib.sh index ea776ceba9b6..83e8f9466d62 100644 --- a/tools/testing/selftests/net/forwarding/lib.sh +++ b/tools/testing/selftests/net/forwarding/lib.sh @@ -185,6 +185,11 @@ create_netif_veth() for ((i = 1; i <= NUM_NETIFS; ++i)); do local j=$((i+1)) + if [ -z ${NETIFS[p$i]} ]; then + echo "SKIP: Cannot create interface. Name not specified" + exit $ksft_skip + fi + ip link show dev ${NETIFS[p$i]} &> /dev/null if [[ $? -ne 0 ]]; then ip link add ${NETIFS[p$i]} type veth \ From 7b3fa99526f94345c981ae88d891d0e8c7144abb Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 8 Aug 2023 17:14:48 +0300 Subject: [PATCH 324/513] selftests: forwarding: Switch off timeout commit 0529883ad102f6c04e19fb7018f31e1bda575bbe upstream. The default timeout for selftests is 45 seconds, but it is not enough for forwarding selftests which can takes minutes to finish depending on the number of tests cases: # make -C tools/testing/selftests TARGETS=net/forwarding run_tests TAP version 13 1..102 # timeout set to 45 # selftests: net/forwarding: bridge_igmp.sh # TEST: IGMPv2 report 239.10.10.10 [ OK ] # TEST: IGMPv2 leave 239.10.10.10 [ OK ] # TEST: IGMPv3 report 239.10.10.10 is_include [ OK ] # TEST: IGMPv3 report 239.10.10.10 include -> allow [ OK ] # not ok 1 selftests: net/forwarding: bridge_igmp.sh # TIMEOUT 45 seconds Fix by switching off the timeout and setting it to 0. A similar change was done for BPF selftests in commit 6fc5916cc256 ("selftests: bpf: Switch off timeout"). Fixes: 81573b18f26d ("selftests/net/forwarding: add Makefile to install tests") Reported-by: Mirsad Todorovac Closes: https://lore.kernel.org/netdev/8d149f8c-818e-d141-a0ce-a6bae606bc22@alu.unizg.hr/ Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Tested-by: Mirsad Todorovac Reviewed-by: Hangbin Liu Acked-by: Nikolay Aleksandrov Link: https://lore.kernel.org/r/20230808141503.4060661-3-idosch@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/net/forwarding/settings | 1 + 1 file changed, 1 insertion(+) create mode 100644 tools/testing/selftests/net/forwarding/settings diff --git a/tools/testing/selftests/net/forwarding/settings b/tools/testing/selftests/net/forwarding/settings new file mode 100644 index 000000000000..e7b9417537fb --- /dev/null +++ b/tools/testing/selftests/net/forwarding/settings @@ -0,0 +1 @@ +timeout=0 From 85af0b226c0ba0302e9f727c7dfecb44765a7c5e Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 8 Aug 2023 17:14:58 +0300 Subject: [PATCH 325/513] selftests: forwarding: tc_flower: Relax success criterion commit 9ee37e53e7687654b487fc94e82569377272a7a8 upstream. The test checks that filters that match on source or destination MAC were only hit once. A host can send more than one packet with a given source or destination MAC, resulting in failures. Fix by relaxing the success criterion and instead check that the filters were not hit zero times. Using tc_check_at_least_x_packets() is also an option, but it is not available in older kernels. Fixes: 07e5c75184a1 ("selftests: forwarding: Introduce tc flower matching tests") Reported-by: Mirsad Todorovac Closes: https://lore.kernel.org/netdev/adc5e40d-d040-a65e-eb26-edf47dac5b02@alu.unizg.hr/ Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Tested-by: Mirsad Todorovac Reviewed-by: Hangbin Liu Acked-by: Nikolay Aleksandrov Link: https://lore.kernel.org/r/20230808141503.4060661-13-idosch@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- tools/testing/selftests/net/forwarding/tc_flower.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/tc_flower.sh b/tools/testing/selftests/net/forwarding/tc_flower.sh index 683711f41aa9..b1daad19b01e 100755 --- a/tools/testing/selftests/net/forwarding/tc_flower.sh +++ b/tools/testing/selftests/net/forwarding/tc_flower.sh @@ -52,8 +52,8 @@ match_dst_mac_test() tc_check_packets "dev $h2 ingress" 101 1 check_fail $? "Matched on a wrong filter" - tc_check_packets "dev $h2 ingress" 102 1 - check_err $? "Did not match on correct filter" + tc_check_packets "dev $h2 ingress" 102 0 + check_fail $? "Did not match on correct filter" tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower @@ -78,8 +78,8 @@ match_src_mac_test() tc_check_packets "dev $h2 ingress" 101 1 check_fail $? "Matched on a wrong filter" - tc_check_packets "dev $h2 ingress" 102 1 - check_err $? "Did not match on correct filter" + tc_check_packets "dev $h2 ingress" 102 0 + check_fail $? "Did not match on correct filter" tc filter del dev $h2 ingress protocol ip pref 1 handle 101 flower tc filter del dev $h2 ingress protocol ip pref 2 handle 102 flower From a09c258cfa77d3ba0a7acc555c73eb6b005c4bd8 Mon Sep 17 00:00:00 2001 From: Andrew Kanner Date: Thu, 3 Aug 2023 21:03:18 +0200 Subject: [PATCH 326/513] net: core: remove unnecessary frame_sz check in bpf_xdp_adjust_tail() commit d14eea09edf427fa36bd446f4a3271f99164202f upstream. Syzkaller reported the following issue: ======================================= Too BIG xdp->frame_sz = 131072 WARNING: CPU: 0 PID: 5020 at net/core/filter.c:4121 ____bpf_xdp_adjust_tail net/core/filter.c:4121 [inline] WARNING: CPU: 0 PID: 5020 at net/core/filter.c:4121 bpf_xdp_adjust_tail+0x466/0xa10 net/core/filter.c:4103 ... Call Trace: bpf_prog_4add87e5301a4105+0x1a/0x1c __bpf_prog_run include/linux/filter.h:600 [inline] bpf_prog_run_xdp include/linux/filter.h:775 [inline] bpf_prog_run_generic_xdp+0x57e/0x11e0 net/core/dev.c:4721 netif_receive_generic_xdp net/core/dev.c:4807 [inline] do_xdp_generic+0x35c/0x770 net/core/dev.c:4866 tun_get_user+0x2340/0x3ca0 drivers/net/tun.c:1919 tun_chr_write_iter+0xe8/0x210 drivers/net/tun.c:2043 call_write_iter include/linux/fs.h:1871 [inline] new_sync_write fs/read_write.c:491 [inline] vfs_write+0x650/0xe40 fs/read_write.c:584 ksys_write+0x12f/0x250 fs/read_write.c:637 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x38/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd xdp->frame_sz > PAGE_SIZE check was introduced in commit c8741e2bfe87 ("xdp: Allow bpf_xdp_adjust_tail() to grow packet size"). But Jesper Dangaard Brouer noted that after introducing the xdp_init_buff() which all XDP driver use - it's safe to remove this check. The original intend was to catch cases where XDP drivers have not been updated to use xdp.frame_sz, but that is not longer a concern (since xdp_init_buff). Running the initial syzkaller repro it was discovered that the contiguous physical memory allocation is used for both xdp paths in tun_get_user(), e.g. tun_build_skb() and tun_alloc_skb(). It was also stated by Jesper Dangaard Brouer that XDP can work on higher order pages, as long as this is contiguous physical memory (e.g. a page). Reported-and-tested-by: syzbot+f817490f5bd20541b90a@syzkaller.appspotmail.com Closes: https://lore.kernel.org/all/000000000000774b9205f1d8a80d@google.com/T/ Link: https://syzkaller.appspot.com/bug?extid=f817490f5bd20541b90a Link: https://lore.kernel.org/all/20230725155403.796-1-andrew.kanner@gmail.com/T/ Fixes: 43b5169d8355 ("net, xdp: Introduce xdp_init_buff utility routine") Signed-off-by: Andrew Kanner Acked-by: Jesper Dangaard Brouer Acked-by: Jason Wang Link: https://lore.kernel.org/r/20230803190316.2380231-1-andrew.kanner@gmail.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/core/filter.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/net/core/filter.c b/net/core/filter.c index 18eb8049c795..458756334c4f 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3843,12 +3843,6 @@ BPF_CALL_2(bpf_xdp_adjust_tail, struct xdp_buff *, xdp, int, offset) if (unlikely(data_end > data_hard_end)) return -EINVAL; - /* ALL drivers MUST init xdp->frame_sz, chicken check below */ - if (unlikely(xdp->frame_sz > PAGE_SIZE)) { - WARN_ONCE(1, "Too BIG xdp->frame_sz = %d\n", xdp->frame_sz); - return -EINVAL; - } - if (unlikely(data_end < xdp->data + ETH_HLEN)) return -EINVAL; From 20d53895d5c0789f2409d0fe41c4877f367f7f03 Mon Sep 17 00:00:00 2001 From: Xu Kuohai Date: Fri, 4 Aug 2023 03:37:37 -0400 Subject: [PATCH 327/513] bpf, sockmap: Fix map type error in sock_map_del_link commit 7e96ec0e6605b69bb21bbf6c0ff9051e656ec2b1 upstream. sock_map_del_link() operates on both SOCKMAP and SOCKHASH, although both types have member named "progs", the offset of "progs" member in these two types is different, so "progs" should be accessed with the real map type. Fixes: 604326b41a6f ("bpf, sockmap: convert to generic sk_msg interface") Signed-off-by: Xu Kuohai Reviewed-by: John Fastabend Link: https://lore.kernel.org/r/20230804073740.194770-2-xukuohai@huaweicloud.com Signed-off-by: Martin KaFai Lau Signed-off-by: Greg Kroah-Hartman --- net/core/sock_map.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/net/core/sock_map.c b/net/core/sock_map.c index 85d3c62bdfa6..caae43e66353 100644 --- a/net/core/sock_map.c +++ b/net/core/sock_map.c @@ -148,13 +148,13 @@ static void sock_map_del_link(struct sock *sk, list_for_each_entry_safe(link, tmp, &psock->link, list) { if (link->link_raw == link_raw) { struct bpf_map *map = link->map; - struct bpf_stab *stab = container_of(map, struct bpf_stab, - map); - if (psock->saved_data_ready && stab->progs.stream_parser) + struct sk_psock_progs *progs = sock_map_progs(map); + + if (psock->saved_data_ready && progs->stream_parser) strp_stop = true; - if (psock->saved_data_ready && stab->progs.stream_verdict) + if (psock->saved_data_ready && progs->stream_verdict) verdict_stop = true; - if (psock->saved_data_ready && stab->progs.skb_verdict) + if (psock->saved_data_ready && progs->skb_verdict) verdict_stop = true; list_del(&link->list); sk_psock_free_link(link); From 3961761af392c248c05ed1f8833181ac6d1d20fa Mon Sep 17 00:00:00 2001 From: Xu Kuohai Date: Fri, 4 Aug 2023 03:37:38 -0400 Subject: [PATCH 328/513] bpf, sockmap: Fix bug that strp_done cannot be called commit 809e4dc71a0f2b8d2836035d98603694fff11d5d upstream. strp_done is only called when psock->progs.stream_parser is not NULL, but stream_parser was set to NULL by sk_psock_stop_strp(), called by sk_psock_drop() earlier. So, strp_done can never be called. Introduce SK_PSOCK_RX_ENABLED to mark whether there is strp on psock. Change the condition for calling strp_done from judging whether stream_parser is set to judging whether this flag is set. This flag is only set once when strp_init() succeeds, and will never be cleared later. Fixes: c0d95d3380ee ("bpf, sockmap: Re-evaluate proto ops when psock is removed from sockmap") Signed-off-by: Xu Kuohai Reviewed-by: John Fastabend Link: https://lore.kernel.org/r/20230804073740.194770-3-xukuohai@huaweicloud.com Signed-off-by: Martin KaFai Lau Signed-off-by: Greg Kroah-Hartman --- include/linux/skmsg.h | 1 + net/core/skmsg.c | 10 ++++++++-- 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h index 6e18ca234f81..4273505d309a 100644 --- a/include/linux/skmsg.h +++ b/include/linux/skmsg.h @@ -63,6 +63,7 @@ struct sk_psock_progs { enum sk_psock_state_bits { SK_PSOCK_TX_ENABLED, + SK_PSOCK_RX_STRP_ENABLED, }; struct sk_psock_link { diff --git a/net/core/skmsg.c b/net/core/skmsg.c index dc9b93d8f0d3..9cd14212dcd0 100644 --- a/net/core/skmsg.c +++ b/net/core/skmsg.c @@ -1124,13 +1124,19 @@ static void sk_psock_strp_data_ready(struct sock *sk) int sk_psock_init_strp(struct sock *sk, struct sk_psock *psock) { + int ret; + static const struct strp_callbacks cb = { .rcv_msg = sk_psock_strp_read, .read_sock_done = sk_psock_strp_read_done, .parse_msg = sk_psock_strp_parse, }; - return strp_init(&psock->strp, sk, &cb); + ret = strp_init(&psock->strp, sk, &cb); + if (!ret) + sk_psock_set_state(psock, SK_PSOCK_RX_STRP_ENABLED); + + return ret; } void sk_psock_start_strp(struct sock *sk, struct sk_psock *psock) @@ -1158,7 +1164,7 @@ void sk_psock_stop_strp(struct sock *sk, struct sk_psock *psock) static void sk_psock_done_strp(struct sk_psock *psock) { /* Parser has been stopped */ - if (psock->progs.stream_parser) + if (sk_psock_test_state(psock, SK_PSOCK_RX_STRP_ENABLED)) strp_done(&psock->strp); } #else From f4614e379bf98727880b21bee74a79bb61d3f456 Mon Sep 17 00:00:00 2001 From: Nathan Chancellor Date: Wed, 2 Aug 2023 10:40:29 -0700 Subject: [PATCH 329/513] mISDN: Update parameter type of dsp_cmx_send() commit 1696ec8654016dad3b1baf6c024303e584400453 upstream. When booting a kernel with CONFIG_MISDN_DSP=y and CONFIG_CFI_CLANG=y, there is a failure when dsp_cmx_send() is called indirectly from call_timer_fn(): [ 0.371412] CFI failure at call_timer_fn+0x2f/0x150 (target: dsp_cmx_send+0x0/0x530; expected type: 0x92ada1e9) The function pointer prototype that call_timer_fn() expects is void (*fn)(struct timer_list *) whereas dsp_cmx_send() has a parameter type of 'void *', which causes the control flow integrity checks to fail because the parameter types do not match. Change dsp_cmx_send()'s parameter type to be 'struct timer_list' to match the expected prototype. The argument is unused anyways, so this has no functional change, aside from avoiding the CFI failure. Reported-by: kernel test robot Closes: https://lore.kernel.org/oe-lkp/202308020936.58787e6c-oliver.sang@intel.com Signed-off-by: Nathan Chancellor Reviewed-by: Sami Tolvanen Reviewed-by: Kees Cook Fixes: e313ac12eb13 ("mISDN: Convert timers to use timer_setup()") Link: https://lore.kernel.org/r/20230802-fix-dsp_cmx_send-cfi-failure-v1-1-2f2e79b0178d@kernel.org Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- drivers/isdn/mISDN/dsp.h | 2 +- drivers/isdn/mISDN/dsp_cmx.c | 2 +- drivers/isdn/mISDN/dsp_core.c | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/isdn/mISDN/dsp.h b/drivers/isdn/mISDN/dsp.h index fa09d511a8ed..baf31258f5c9 100644 --- a/drivers/isdn/mISDN/dsp.h +++ b/drivers/isdn/mISDN/dsp.h @@ -247,7 +247,7 @@ extern void dsp_cmx_hardware(struct dsp_conf *conf, struct dsp *dsp); extern int dsp_cmx_conf(struct dsp *dsp, u32 conf_id); extern void dsp_cmx_receive(struct dsp *dsp, struct sk_buff *skb); extern void dsp_cmx_hdlc(struct dsp *dsp, struct sk_buff *skb); -extern void dsp_cmx_send(void *arg); +extern void dsp_cmx_send(struct timer_list *arg); extern void dsp_cmx_transmit(struct dsp *dsp, struct sk_buff *skb); extern int dsp_cmx_del_conf_member(struct dsp *dsp); extern int dsp_cmx_del_conf(struct dsp_conf *conf); diff --git a/drivers/isdn/mISDN/dsp_cmx.c b/drivers/isdn/mISDN/dsp_cmx.c index 6d2088fbaf69..1b73af501397 100644 --- a/drivers/isdn/mISDN/dsp_cmx.c +++ b/drivers/isdn/mISDN/dsp_cmx.c @@ -1625,7 +1625,7 @@ static u16 dsp_count; /* last sample count */ static int dsp_count_valid; /* if we have last sample count */ void -dsp_cmx_send(void *arg) +dsp_cmx_send(struct timer_list *arg) { struct dsp_conf *conf; struct dsp_conf_member *member; diff --git a/drivers/isdn/mISDN/dsp_core.c b/drivers/isdn/mISDN/dsp_core.c index 386084530c2f..fae95f166688 100644 --- a/drivers/isdn/mISDN/dsp_core.c +++ b/drivers/isdn/mISDN/dsp_core.c @@ -1195,7 +1195,7 @@ static int __init dsp_init(void) } /* set sample timer */ - timer_setup(&dsp_spl_tl, (void *)dsp_cmx_send, 0); + timer_setup(&dsp_spl_tl, dsp_cmx_send, 0); dsp_spl_tl.expires = jiffies + dsp_tics; dsp_spl_jiffies = dsp_spl_tl.expires; add_timer(&dsp_spl_tl); From 7903311b2ceca141d74be04d3e5fb56c1af5c318 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 3 Aug 2023 14:56:00 +0000 Subject: [PATCH 330/513] net/packet: annotate data-races around tp->status commit 8a9896177784063d01068293caea3f74f6830ff6 upstream. Another syzbot report [1] is about tp->status lockless reads from __packet_get_status() [1] BUG: KCSAN: data-race in __packet_rcv_has_room / __packet_set_status write to 0xffff888117d7c080 of 8 bytes by interrupt on cpu 0: __packet_set_status+0x78/0xa0 net/packet/af_packet.c:407 tpacket_rcv+0x18bb/0x1a60 net/packet/af_packet.c:2483 deliver_skb net/core/dev.c:2173 [inline] __netif_receive_skb_core+0x408/0x1e80 net/core/dev.c:5337 __netif_receive_skb_one_core net/core/dev.c:5491 [inline] __netif_receive_skb+0x57/0x1b0 net/core/dev.c:5607 process_backlog+0x21f/0x380 net/core/dev.c:5935 __napi_poll+0x60/0x3b0 net/core/dev.c:6498 napi_poll net/core/dev.c:6565 [inline] net_rx_action+0x32b/0x750 net/core/dev.c:6698 __do_softirq+0xc1/0x265 kernel/softirq.c:571 invoke_softirq kernel/softirq.c:445 [inline] __irq_exit_rcu+0x57/0xa0 kernel/softirq.c:650 sysvec_apic_timer_interrupt+0x6d/0x80 arch/x86/kernel/apic/apic.c:1106 asm_sysvec_apic_timer_interrupt+0x1a/0x20 arch/x86/include/asm/idtentry.h:645 smpboot_thread_fn+0x33c/0x4a0 kernel/smpboot.c:112 kthread+0x1d7/0x210 kernel/kthread.c:379 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:308 read to 0xffff888117d7c080 of 8 bytes by interrupt on cpu 1: __packet_get_status net/packet/af_packet.c:436 [inline] packet_lookup_frame net/packet/af_packet.c:524 [inline] __tpacket_has_room net/packet/af_packet.c:1255 [inline] __packet_rcv_has_room+0x3f9/0x450 net/packet/af_packet.c:1298 tpacket_rcv+0x275/0x1a60 net/packet/af_packet.c:2285 deliver_skb net/core/dev.c:2173 [inline] dev_queue_xmit_nit+0x38a/0x5e0 net/core/dev.c:2243 xmit_one net/core/dev.c:3574 [inline] dev_hard_start_xmit+0xcf/0x3f0 net/core/dev.c:3594 __dev_queue_xmit+0xefb/0x1d10 net/core/dev.c:4244 dev_queue_xmit include/linux/netdevice.h:3088 [inline] can_send+0x4eb/0x5d0 net/can/af_can.c:276 bcm_can_tx+0x314/0x410 net/can/bcm.c:302 bcm_tx_timeout_handler+0xdb/0x260 __run_hrtimer kernel/time/hrtimer.c:1685 [inline] __hrtimer_run_queues+0x217/0x700 kernel/time/hrtimer.c:1749 hrtimer_run_softirq+0xd6/0x120 kernel/time/hrtimer.c:1766 __do_softirq+0xc1/0x265 kernel/softirq.c:571 run_ksoftirqd+0x17/0x20 kernel/softirq.c:939 smpboot_thread_fn+0x30a/0x4a0 kernel/smpboot.c:164 kthread+0x1d7/0x210 kernel/kthread.c:379 ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:308 value changed: 0x0000000000000000 -> 0x0000000020000081 Reported by Kernel Concurrency Sanitizer on: CPU: 1 PID: 19 Comm: ksoftirqd/1 Not tainted 6.4.0-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 05/27/2023 Fixes: 69e3c75f4d54 ("net: TX_RING and packet mmap") Reported-by: syzbot Signed-off-by: Eric Dumazet Reviewed-by: Willem de Bruijn Link: https://lore.kernel.org/r/20230803145600.2937518-1-edumazet@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/packet/af_packet.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index ccf84ce41fd7..62c0fbb9de81 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -368,18 +368,20 @@ static void __packet_set_status(struct packet_sock *po, void *frame, int status) { union tpacket_uhdr h; + /* WRITE_ONCE() are paired with READ_ONCE() in __packet_get_status */ + h.raw = frame; switch (po->tp_version) { case TPACKET_V1: - h.h1->tp_status = status; + WRITE_ONCE(h.h1->tp_status, status); flush_dcache_page(pgv_to_page(&h.h1->tp_status)); break; case TPACKET_V2: - h.h2->tp_status = status; + WRITE_ONCE(h.h2->tp_status, status); flush_dcache_page(pgv_to_page(&h.h2->tp_status)); break; case TPACKET_V3: - h.h3->tp_status = status; + WRITE_ONCE(h.h3->tp_status, status); flush_dcache_page(pgv_to_page(&h.h3->tp_status)); break; default: @@ -396,17 +398,19 @@ static int __packet_get_status(const struct packet_sock *po, void *frame) smp_rmb(); + /* READ_ONCE() are paired with WRITE_ONCE() in __packet_set_status */ + h.raw = frame; switch (po->tp_version) { case TPACKET_V1: flush_dcache_page(pgv_to_page(&h.h1->tp_status)); - return h.h1->tp_status; + return READ_ONCE(h.h1->tp_status); case TPACKET_V2: flush_dcache_page(pgv_to_page(&h.h2->tp_status)); - return h.h2->tp_status; + return READ_ONCE(h.h2->tp_status); case TPACKET_V3: flush_dcache_page(pgv_to_page(&h.h3->tp_status)); - return h.h3->tp_status; + return READ_ONCE(h.h3->tp_status); default: WARN(1, "TPACKET version not supported.\n"); BUG(); From e95808121953410db8c59f0abfde70ac0d34222c Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Thu, 3 Aug 2023 17:26:49 +0200 Subject: [PATCH 331/513] tunnels: fix kasan splat when generating ipv4 pmtu error commit 6a7ac3d20593865209dceb554d8b3f094c6bd940 upstream. If we try to emit an icmp error in response to a nonliner skb, we get BUG: KASAN: slab-out-of-bounds in ip_compute_csum+0x134/0x220 Read of size 4 at addr ffff88811c50db00 by task iperf3/1691 CPU: 2 PID: 1691 Comm: iperf3 Not tainted 6.5.0-rc3+ #309 [..] kasan_report+0x105/0x140 ip_compute_csum+0x134/0x220 iptunnel_pmtud_build_icmp+0x554/0x1020 skb_tunnel_check_pmtu+0x513/0xb80 vxlan_xmit_one+0x139e/0x2ef0 vxlan_xmit+0x1867/0x2760 dev_hard_start_xmit+0x1ee/0x4f0 br_dev_queue_push_xmit+0x4d1/0x660 [..] ip_compute_csum() cannot deal with nonlinear skbs, so avoid it. After this change, splat is gone and iperf3 is no longer stuck. Fixes: 4cb47a8644cc ("tunnels: PMTU discovery support for directly bridged IP packets") Signed-off-by: Florian Westphal Link: https://lore.kernel.org/r/20230803152653.29535-2-fw@strlen.de Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/ipv4/ip_tunnel_core.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index cc1caab4a654..d3275d1ed260 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -224,7 +224,7 @@ static int iptunnel_pmtud_build_icmp(struct sk_buff *skb, int mtu) .un.frag.__unused = 0, .un.frag.mtu = htons(mtu), }; - icmph->checksum = ip_compute_csum(icmph, len); + icmph->checksum = csum_fold(skb_checksum(skb, 0, len, 0)); skb_reset_transport_header(skb); niph = skb_push(skb, sizeof(*niph)); From 789fcd94c9cac133dd4d96e193188661aca9f6c3 Mon Sep 17 00:00:00 2001 From: Magnus Karlsson Date: Wed, 9 Aug 2023 16:28:43 +0200 Subject: [PATCH 332/513] xsk: fix refcount underflow in error path commit 85c2c79a07302fe68a1ad5cc449458cc559e314d upstream. Fix a refcount underflow problem reported by syzbot that can happen when a system is running out of memory. If xp_alloc_tx_descs() fails, and it can only fail due to not having enough memory, then the error path is triggered. In this error path, the refcount of the pool is decremented as it has incremented before. However, the reference to the pool in the socket was not nulled. This means that when the socket is closed later, the socket teardown logic will think that there is a pool attached to the socket and try to decrease the refcount again, leading to a refcount underflow. I chose this fix as it involved adding just a single line. Another option would have been to move xp_get_pool() and the assignment of xs->pool to after the if-statement and using xs_umem->pool instead of xs->pool in the whole if-statement resulting in somewhat simpler code, but this would have led to much more churn in the code base perhaps making it harder to backport. Fixes: ba3beec2ec1d ("xsk: Fix possible crash when multiple sockets are created") Reported-by: syzbot+8ada0057e69293a05fd4@syzkaller.appspotmail.com Signed-off-by: Magnus Karlsson Link: https://lore.kernel.org/r/20230809142843.13944-1-magnus.karlsson@gmail.com Signed-off-by: Martin KaFai Lau Signed-off-by: Greg Kroah-Hartman --- net/xdp/xsk.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c index e80e3fcbb8e8..e5eb5616be0c 100644 --- a/net/xdp/xsk.c +++ b/net/xdp/xsk.c @@ -1001,6 +1001,7 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len) err = xp_alloc_tx_descs(xs->pool, xs); if (err) { xp_put_pool(xs->pool); + xs->pool = NULL; sockfd_put(sock); goto out_unlock; } From 49a1fee22fae61cae56b22332c1e34d08dba6f55 Mon Sep 17 00:00:00 2001 From: Ziyang Xuan Date: Wed, 2 Aug 2023 19:43:20 +0800 Subject: [PATCH 333/513] bonding: Fix incorrect deletion of ETH_P_8021AD protocol vid from slaves commit 01f4fd27087078c90a0e22860d1dfa2cd0510791 upstream. BUG_ON(!vlan_info) is triggered in unregister_vlan_dev() with following testcase: # ip netns add ns1 # ip netns exec ns1 ip link add bond0 type bond mode 0 # ip netns exec ns1 ip link add bond_slave_1 type veth peer veth2 # ip netns exec ns1 ip link set bond_slave_1 master bond0 # ip netns exec ns1 ip link add link bond_slave_1 name vlan10 type vlan id 10 protocol 802.1ad # ip netns exec ns1 ip link add link bond0 name bond0_vlan10 type vlan id 10 protocol 802.1ad # ip netns exec ns1 ip link set bond_slave_1 nomaster # ip netns del ns1 The logical analysis of the problem is as follows: 1. create ETH_P_8021AD protocol vlan10 for bond_slave_1: register_vlan_dev() vlan_vid_add() vlan_info_alloc() __vlan_vid_add() // add [ETH_P_8021AD, 10] vid to bond_slave_1 2. create ETH_P_8021AD protocol bond0_vlan10 for bond0: register_vlan_dev() vlan_vid_add() __vlan_vid_add() vlan_add_rx_filter_info() if (!vlan_hw_filter_capable(dev, proto)) // condition established because bond0 without NETIF_F_HW_VLAN_STAG_FILTER return 0; if (netif_device_present(dev)) return dev->netdev_ops->ndo_vlan_rx_add_vid(dev, proto, vid); // will be never called // The slaves of bond0 will not refer to the [ETH_P_8021AD, 10] vid. 3. detach bond_slave_1 from bond0: __bond_release_one() vlan_vids_del_by_dev() list_for_each_entry(vid_info, &vlan_info->vid_list, list) vlan_vid_del(dev, vid_info->proto, vid_info->vid); // bond_slave_1 [ETH_P_8021AD, 10] vid will be deleted. // bond_slave_1->vlan_info will be assigned NULL. 4. delete vlan10 during delete ns1: default_device_exit_batch() dev->rtnl_link_ops->dellink() // unregister_vlan_dev() for vlan10 vlan_info = rtnl_dereference(real_dev->vlan_info); // real_dev of vlan10 is bond_slave_1 BUG_ON(!vlan_info); // bond_slave_1->vlan_info is NULL now, bug is triggered!!! Add S-VLAN tag related features support to bond driver. So the bond driver will always propagate the VLAN info to its slaves. Fixes: 8ad227ff89a7 ("net: vlan: add 802.1ad support") Suggested-by: Ido Schimmel Signed-off-by: Ziyang Xuan Reviewed-by: Ido Schimmel Link: https://lore.kernel.org/r/20230802114320.4156068-1-william.xuanziyang@huawei.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- drivers/net/bonding/bond_main.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 69cc36c0840f..e64c652b78f0 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -5491,7 +5491,9 @@ void bond_setup(struct net_device *bond_dev) bond_dev->hw_features = BOND_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_FILTER; bond_dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; bond_dev->features |= bond_dev->hw_features; From f239c9e1d98b313435481b4926e8bdd06197e4d8 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 3 Aug 2023 16:30:21 +0000 Subject: [PATCH 334/513] dccp: fix data-race around dp->dccps_mss_cache commit a47e598fbd8617967e49d85c49c22f9fc642704c upstream. dccp_sendmsg() reads dp->dccps_mss_cache before locking the socket. Same thing in do_dccp_getsockopt(). Add READ_ONCE()/WRITE_ONCE() annotations, and change dccp_sendmsg() to check again dccps_mss_cache after socket is locked. Fixes: 7c657876b63c ("[DCCP]: Initial implementation") Reported-by: syzbot Signed-off-by: Eric Dumazet Link: https://lore.kernel.org/r/20230803163021.2958262-1-edumazet@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/dccp/output.c | 2 +- net/dccp/proto.c | 10 ++++++++-- 2 files changed, 9 insertions(+), 3 deletions(-) diff --git a/net/dccp/output.c b/net/dccp/output.c index b8a24734385e..fd2eb148d24d 100644 --- a/net/dccp/output.c +++ b/net/dccp/output.c @@ -187,7 +187,7 @@ unsigned int dccp_sync_mss(struct sock *sk, u32 pmtu) /* And store cached results */ icsk->icsk_pmtu_cookie = pmtu; - dp->dccps_mss_cache = cur_mps; + WRITE_ONCE(dp->dccps_mss_cache, cur_mps); return cur_mps; } diff --git a/net/dccp/proto.c b/net/dccp/proto.c index a23b19663601..5422d64af246 100644 --- a/net/dccp/proto.c +++ b/net/dccp/proto.c @@ -639,7 +639,7 @@ static int do_dccp_getsockopt(struct sock *sk, int level, int optname, return dccp_getsockopt_service(sk, len, (__be32 __user *)optval, optlen); case DCCP_SOCKOPT_GET_CUR_MPS: - val = dp->dccps_mss_cache; + val = READ_ONCE(dp->dccps_mss_cache); break; case DCCP_SOCKOPT_AVAILABLE_CCIDS: return ccid_getsockopt_builtin_ccids(sk, len, optval, optlen); @@ -748,7 +748,7 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) trace_dccp_probe(sk, len); - if (len > dp->dccps_mss_cache) + if (len > READ_ONCE(dp->dccps_mss_cache)) return -EMSGSIZE; lock_sock(sk); @@ -781,6 +781,12 @@ int dccp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) goto out_discard; } + /* We need to check dccps_mss_cache after socket is locked. */ + if (len > dp->dccps_mss_cache) { + rc = -EMSGSIZE; + goto out_discard; + } + skb_reserve(skb, sk->sk_prot->max_header); rc = memcpy_from_msg(skb_put(skb, len), msg, len); if (rc != 0) From b1f985cf1c52db6c4e0606fca9a1d3fdbaf348f1 Mon Sep 17 00:00:00 2001 From: Andrew Kanner Date: Thu, 3 Aug 2023 20:59:48 +0200 Subject: [PATCH 335/513] drivers: net: prevent tun_build_skb() to exceed the packet size limit commit 59eeb232940515590de513b997539ef495faca9a upstream. Using the syzkaller repro with reduced packet size it was discovered that XDP_PACKET_HEADROOM is not checked in tun_can_build_skb(), although pad may be incremented in tun_build_skb(). This may end up with exceeding the PAGE_SIZE limit in tun_build_skb(). Jason Wang proposed to count XDP_PACKET_HEADROOM always (e.g. without rcu_access_pointer(tun->xdp_prog)) in tun_can_build_skb() since there's a window during which XDP program might be attached between tun_can_build_skb() and tun_build_skb(). Fixes: 7df13219d757 ("tun: reserve extra headroom only when XDP is set") Link: https://syzkaller.appspot.com/bug?extid=f817490f5bd20541b90a Signed-off-by: Andrew Kanner Link: https://lore.kernel.org/r/20230803185947.2379988-1-andrew.kanner@gmail.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- drivers/net/tun.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/tun.c b/drivers/net/tun.c index 924bdae314c8..e685c84ebe3a 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -1574,7 +1574,7 @@ static bool tun_can_build_skb(struct tun_struct *tun, struct tun_file *tfile, if (zerocopy) return false; - if (SKB_DATA_ALIGN(len + TUN_RX_PAD) + + if (SKB_DATA_ALIGN(len + TUN_RX_PAD + XDP_PACKET_HEADROOM) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) > PAGE_SIZE) return false; From df21468bfdc885f2495c01f5bb3e3431f0c07306 Mon Sep 17 00:00:00 2001 From: Piotr Gardocki Date: Mon, 7 Aug 2023 13:50:11 -0700 Subject: [PATCH 336/513] iavf: fix potential races for FDIR filters commit 0fb1d8eb234b6979d4981d2d385780dd7d8d9771 upstream. Add fdir_fltr_lock locking in unprotected places. The change in iavf_fdir_is_dup_fltr adds a spinlock around a loop which iterates over all filters and looks for a duplicate. The filter can be removed from list and freed from memory at the same time it's being compared. All other places where filters are deleted are already protected with spinlock. The remaining changes protect adapter->fdir_active_fltr variable so now all its uses are under a spinlock. Fixes: 527691bf0682 ("iavf: Support IPv4 Flow Director filters") Signed-off-by: Piotr Gardocki Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230807205011.3129224-1-anthony.l.nguyen@intel.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/intel/iavf/iavf_ethtool.c | 5 ++++- drivers/net/ethernet/intel/iavf/iavf_fdir.c | 11 ++++++++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index 5af3ae68b7a1..e622b6e6ac2b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -1387,14 +1387,15 @@ static int iavf_add_fdir_ethtool(struct iavf_adapter *adapter, struct ethtool_rx if (fsp->flow_type & FLOW_MAC_EXT) return -EINVAL; + spin_lock_bh(&adapter->fdir_fltr_lock); if (adapter->fdir_active_fltr >= IAVF_MAX_FDIR_FILTERS) { + spin_unlock_bh(&adapter->fdir_fltr_lock); dev_err(&adapter->pdev->dev, "Unable to add Flow Director filter because VF reached the limit of max allowed filters (%u)\n", IAVF_MAX_FDIR_FILTERS); return -ENOSPC; } - spin_lock_bh(&adapter->fdir_fltr_lock); if (iavf_find_fdir_fltr_by_loc(adapter, fsp->location)) { dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, it already exists\n"); spin_unlock_bh(&adapter->fdir_fltr_lock); @@ -1767,7 +1768,9 @@ static int iavf_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, case ETHTOOL_GRXCLSRLCNT: if (!FDIR_FLTR_SUPPORT(adapter)) break; + spin_lock_bh(&adapter->fdir_fltr_lock); cmd->rule_cnt = adapter->fdir_active_fltr; + spin_unlock_bh(&adapter->fdir_fltr_lock); cmd->data = IAVF_MAX_FDIR_FILTERS; ret = 0; break; diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c index 6146203efd84..505e82ebafe4 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c @@ -722,7 +722,9 @@ void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *f bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr) { struct iavf_fdir_fltr *tmp; + bool ret = false; + spin_lock_bh(&adapter->fdir_fltr_lock); list_for_each_entry(tmp, &adapter->fdir_list_head, list) { if (tmp->flow_type != fltr->flow_type) continue; @@ -732,11 +734,14 @@ bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr * !memcmp(&tmp->ip_data, &fltr->ip_data, sizeof(fltr->ip_data)) && !memcmp(&tmp->ext_data, &fltr->ext_data, - sizeof(fltr->ext_data))) - return true; + sizeof(fltr->ext_data))) { + ret = true; + break; + } } + spin_unlock_bh(&adapter->fdir_fltr_lock); - return false; + return ret; } /** From 918c1e6843b7e81d0e5cf7994f41f28dc34c98b0 Mon Sep 17 00:00:00 2001 From: Douglas Miller Date: Wed, 2 Aug 2023 13:32:41 -0400 Subject: [PATCH 337/513] IB/hfi1: Fix possible panic during hotplug remove commit 4fdfaef71fced490835145631a795497646f4555 upstream. During hotplug remove it is possible that the update counters work might be pending, and may run after memory has been freed. Cancel the update counters work before freeing memory. Fixes: 7724105686e7 ("IB/hfi1: add driver files") Signed-off-by: Douglas Miller Signed-off-by: Dennis Dalessandro Link: https://lore.kernel.org/r/169099756100.3927190.15284930454106475280.stgit@awfm-02.cornelisnetworks.com Signed-off-by: Leon Romanovsky Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/hw/hfi1/chip.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/infiniband/hw/hfi1/chip.c b/drivers/infiniband/hw/hfi1/chip.c index 689921dc3d4a..b69dd618146e 100644 --- a/drivers/infiniband/hw/hfi1/chip.c +++ b/drivers/infiniband/hw/hfi1/chip.c @@ -12306,6 +12306,7 @@ static void free_cntrs(struct hfi1_devdata *dd) if (dd->synth_stats_timer.function) del_timer_sync(&dd->synth_stats_timer); + cancel_work_sync(&dd->update_cntr_work); ppd = (struct hfi1_pportdata *)(dd + 1); for (i = 0; i < dd->num_pports; i++, ppd++) { kfree(ppd->cntrs); From 26a27dd76054d57ccb68f506673f3258e249b02d Mon Sep 17 00:00:00 2001 From: Daniel Stone Date: Tue, 8 Aug 2023 11:44:05 +0100 Subject: [PATCH 338/513] drm/rockchip: Don't spam logs in atomic check commit 43dae319b50fac075ad864f84501c703ef20eb2b upstream. Userspace should not be able to trigger DRM_ERROR messages to spam the logs; especially not through atomic commit parameters which are completely legitimate for userspace to attempt. Signed-off-by: Daniel Stone Fixes: 7707f7227f09 ("drm/rockchip: Add support for afbc") Signed-off-by: Heiko Stuebner Link: https://patchwork.freedesktop.org/patch/msgid/20230808104405.522493-1-daniels@collabora.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/rockchip/rockchip_drm_vop.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c index 26a3ed142b6b..cfe13b203b89 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c @@ -820,12 +820,12 @@ static int vop_plane_atomic_check(struct drm_plane *plane, * need align with 2 pixel. */ if (fb->format->is_yuv && ((new_plane_state->src.x1 >> 16) % 2)) { - DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n"); + DRM_DEBUG_KMS("Invalid Source: Yuv format not support odd xpos\n"); return -EINVAL; } if (fb->format->is_yuv && new_plane_state->rotation & DRM_MODE_REFLECT_Y) { - DRM_ERROR("Invalid Source: Yuv format does not support this rotation\n"); + DRM_DEBUG_KMS("Invalid Source: Yuv format does not support this rotation\n"); return -EINVAL; } @@ -833,7 +833,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane, struct vop *vop = to_vop(crtc); if (!vop->data->afbc) { - DRM_ERROR("vop does not support AFBC\n"); + DRM_DEBUG_KMS("vop does not support AFBC\n"); return -EINVAL; } @@ -842,15 +842,16 @@ static int vop_plane_atomic_check(struct drm_plane *plane, return ret; if (new_plane_state->src.x1 || new_plane_state->src.y1) { - DRM_ERROR("AFBC does not support offset display, xpos=%d, ypos=%d, offset=%d\n", - new_plane_state->src.x1, - new_plane_state->src.y1, fb->offsets[0]); + DRM_DEBUG_KMS("AFBC does not support offset display, " \ + "xpos=%d, ypos=%d, offset=%d\n", + new_plane_state->src.x1, new_plane_state->src.y1, + fb->offsets[0]); return -EINVAL; } if (new_plane_state->rotation && new_plane_state->rotation != DRM_MODE_ROTATE_0) { - DRM_ERROR("No rotation support in AFBC, rotation=%d\n", - new_plane_state->rotation); + DRM_DEBUG_KMS("No rotation support in AFBC, rotation=%d\n", + new_plane_state->rotation); return -EINVAL; } } From f78a4238a87332b8e8359854610559dc54add951 Mon Sep 17 00:00:00 2001 From: Felix Fietkau Date: Thu, 22 Jun 2023 18:59:19 +0200 Subject: [PATCH 339/513] wifi: cfg80211: fix sband iftype data lookup for AP_VLAN commit 5fb9a9fb71a33be61d7d8e8ba4597bfb18d604d0 upstream. AP_VLAN interfaces are virtual, so doesn't really exist as a type for capabilities. When passed in as a type, AP is the one that's really intended. Fixes: c4cbaf7973a7 ("cfg80211: Add support for HE") Signed-off-by: Felix Fietkau Link: https://lore.kernel.org/r/20230622165919.46841-1-nbd@nbd.name Signed-off-by: Johannes Berg Signed-off-by: Greg Kroah-Hartman --- include/net/cfg80211.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h index 27336fc70467..963a810ed70d 100644 --- a/include/net/cfg80211.h +++ b/include/net/cfg80211.h @@ -510,6 +510,9 @@ ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband, if (WARN_ON(iftype >= NL80211_IFTYPE_MAX)) return NULL; + if (iftype == NL80211_IFTYPE_AP_VLAN) + iftype = NL80211_IFTYPE_AP; + for (i = 0; i < sband->n_iftype_data; i++) { const struct ieee80211_sband_iftype_data *data = &sband->iftype_data[i]; From 7d496cd83a9db017766a88707b591e0ca9e923c9 Mon Sep 17 00:00:00 2001 From: Michael Guralnik Date: Wed, 19 Jul 2023 12:02:41 +0300 Subject: [PATCH 340/513] RDMA/umem: Set iova in ODP flow commit 186b169cf1e4be85aa212a893ea783a543400979 upstream. Fixing the ODP registration flow to set the iova correctly. The calculation in ib_umem_num_dma_blocks() function assumes the iova of the umem is set correctly. When iova is not set, the calculation in ib_umem_num_dma_blocks() is equivalent to length/page_size, which is true only when memory is aligned. For unaligned memory, iova must be set for the ALIGN() in the ib_umem_num_dma_blocks() to take effect and return a correct value. mlx5_ib uses ib_umem_num_dma_blocks() to decide the mkey size to use for the MR. Without this fix, when registering unaligned ODP MR, a wrong size mkey might be chosen and this might cause the UMR to fail. UMR would fail over insufficient size to update the mkey translation: infiniband mlx5_0: dump_cqe:273:(pid 0): dump error cqe 00000000: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00000010: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00000020: 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00000030: 00 00 00 00 0f 00 78 06 25 00 00 58 00 da ac d2 infiniband mlx5_0: mlx5_ib_post_send_wait:806:(pid 20311): reg umr failed (6) infiniband mlx5_0: pagefault_real_mr:661:(pid 20311): Failed to update mkey page tables Fixes: f0093fb1a7cb ("RDMA/mlx5: Move mlx5_ib_cont_pages() to the creation of the mlx5_ib_mr") Fixes: a665aca89a41 ("RDMA/umem: Split ib_umem_num_pages() into ib_umem_num_dma_blocks()") Signed-off-by: Artemy Kovalyov Signed-off-by: Michael Guralnik Link: https://lore.kernel.org/r/3d4be7ca2155bf239dd8c00a2d25974a92c26ab8.1689757344.git.leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Greg Kroah-Hartman --- drivers/infiniband/core/umem.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 86d479772fbc..957634eceba8 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -85,6 +85,8 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, dma_addr_t mask; int i; + umem->iova = va = virt; + if (umem->is_odp) { unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift); @@ -100,7 +102,6 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, */ pgsz_bitmap &= GENMASK(BITS_PER_LONG - 1, PAGE_SHIFT); - umem->iova = va = virt; /* The best result is the smallest page size that results in the minimum * number of required pages. Compute the largest page size that could * work based on VA address bits that don't change. From 1ae9703c2e3207168e145f0f5519d5a572f7eb54 Mon Sep 17 00:00:00 2001 From: Li Yang Date: Wed, 2 Aug 2023 14:13:47 -0500 Subject: [PATCH 341/513] net: phy: at803x: remove set/get wol callbacks for AR8032 commit d7791cec2304aea22eb2ada944e4d467302f5bfe upstream. Since the AR8032 part does not support wol, remove related callbacks from it. Fixes: 5800091a2061 ("net: phy: at803x: add support for AR8032 PHY") Signed-off-by: Li Yang Cc: David Bauer Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/phy/at803x.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 5ae39d236b30..ba61007bfc49 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -1375,8 +1375,6 @@ static struct phy_driver at803x_driver[] = { .flags = PHY_POLL_CABLE_TEST, .config_init = at803x_config_init, .link_change_notify = at803x_link_change_notify, - .set_wol = at803x_set_wol, - .get_wol = at803x_get_wol, .suspend = at803x_suspend, .resume = at803x_resume, /* PHY_BASIC_FEATURES */ From 094310eb2b93c4db52c59921aae5e0c9b63e75c9 Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Mon, 7 Aug 2023 19:34:50 +0800 Subject: [PATCH 342/513] net: hns3: refactor hclge_mac_link_status_wait for interface reuse commit 08469dacfad25428b66549716811807203744f4f upstream. Some nic configurations could only be performed after link is down. So this patch refactor this API for reuse. Signed-off-by: Jie Wang Signed-off-by: Jijie Shao Reviewed-by: Leon Romanovsky Link: https://lore.kernel.org/r/20230807113452.474224-3-shaojijie@huawei.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- .../ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index bfdc021f4a19..cbec320cf9ee 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -70,6 +70,8 @@ static void hclge_sync_mac_table(struct hclge_dev *hdev); static void hclge_restore_hw_table(struct hclge_dev *hdev); static void hclge_sync_promisc_mode(struct hclge_dev *hdev); static void hclge_sync_fd_table(struct hclge_dev *hdev); +static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, + int wait_cnt); static struct hnae3_ae_algo ae_algo; @@ -7745,10 +7747,9 @@ static void hclge_phy_link_status_wait(struct hclge_dev *hdev, } while (++i < HCLGE_PHY_LINK_STATUS_NUM); } -static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) +static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret, + int wait_cnt) { -#define HCLGE_MAC_LINK_STATUS_NUM 100 - int link_status; int i = 0; int ret; @@ -7761,13 +7762,15 @@ static int hclge_mac_link_status_wait(struct hclge_dev *hdev, int link_ret) return 0; msleep(HCLGE_LINK_STATUS_MS); - } while (++i < HCLGE_MAC_LINK_STATUS_NUM); + } while (++i < wait_cnt); return -EBUSY; } static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, bool is_phy) { +#define HCLGE_MAC_LINK_STATUS_NUM 100 + int link_ret; link_ret = en ? HCLGE_LINK_STATUS_UP : HCLGE_LINK_STATUS_DOWN; @@ -7775,7 +7778,8 @@ static int hclge_mac_phy_link_status_wait(struct hclge_dev *hdev, bool en, if (is_phy) hclge_phy_link_status_wait(hdev, link_ret); - return hclge_mac_link_status_wait(hdev, link_ret); + return hclge_mac_link_status_wait(hdev, link_ret, + HCLGE_MAC_LINK_STATUS_NUM); } static int hclge_set_app_loopback(struct hclge_dev *hdev, bool en) From 91307347d632f179ad3ecc577d99ca2141788067 Mon Sep 17 00:00:00 2001 From: Jie Wang Date: Mon, 7 Aug 2023 19:34:51 +0800 Subject: [PATCH 343/513] net: hns3: add wait until mac link down commit 6265e242f7b95f2c1195b42ec912b84ad161470e upstream. In some configure flow of hns3 driver, for example, change mtu, it will disable MAC through firmware before configuration. But firmware disables MAC asynchronously. The rx traffic may be not stopped in this case. So fixes it by waiting until mac link is down. Fixes: a9775bb64aa7 ("net: hns3: fix set and get link ksettings issue") Signed-off-by: Jie Wang Signed-off-by: Jijie Shao Reviewed-by: Leon Romanovsky Link: https://lore.kernel.org/r/20230807113452.474224-4-shaojijie@huawei.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- .../net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c index cbec320cf9ee..847ebb31d470 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c @@ -7658,6 +7658,8 @@ static void hclge_enable_fd(struct hnae3_handle *handle, bool enable) static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) { +#define HCLGE_LINK_STATUS_WAIT_CNT 3 + struct hclge_desc desc; struct hclge_config_mac_mode_cmd *req = (struct hclge_config_mac_mode_cmd *)desc.data; @@ -7682,9 +7684,15 @@ static void hclge_cfg_mac_mode(struct hclge_dev *hdev, bool enable) req->txrx_pad_fcs_loop_en = cpu_to_le32(loop_en); ret = hclge_cmd_send(&hdev->hw, &desc, 1); - if (ret) + if (ret) { dev_err(&hdev->pdev->dev, "mac enable fail, ret =%d.\n", ret); + return; + } + + if (!enable) + hclge_mac_link_status_wait(hdev, HCLGE_LINK_STATUS_DOWN, + HCLGE_LINK_STATUS_WAIT_CNT); } static int hclge_config_switch_param(struct hclge_dev *hdev, int vfid, From 4457300cfd843176ce7318e3693235bdba4503f7 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 8 Aug 2023 10:52:31 +0300 Subject: [PATCH 344/513] nexthop: Fix infinite nexthop dump when using maximum nexthop ID commit 913f60cacda73ccac8eead94983e5884c03e04cd upstream. A netlink dump callback can return a positive number to signal that more information needs to be dumped or zero to signal that the dump is complete. In the second case, the core netlink code will append the NLMSG_DONE message to the skb in order to indicate to user space that the dump is complete. The nexthop dump callback always returns a positive number if nexthops were filled in the provided skb, even if the dump is complete. This means that a dump will span at least two recvmsg() calls as long as nexthops are present. In the last recvmsg() call the dump callback will not fill in any nexthops because the previous call indicated that the dump should restart from the last dumped nexthop ID plus one. # ip nexthop add id 1 blackhole # strace -e sendto,recvmsg -s 5 ip nexthop sendto(3, [[{nlmsg_len=24, nlmsg_type=RTM_GETNEXTHOP, nlmsg_flags=NLM_F_REQUEST|NLM_F_DUMP, nlmsg_seq=1691394315, nlmsg_pid=0}, {nh_family=AF_UNSPEC, nh_scope=RT_SCOPE_UNIVERSE, nh_protocol=RTPROT_UNSPEC, nh_flags=0}], {nlmsg_len=0, nlmsg_type=0 /* NLMSG_??? */, nlmsg_flags=0, nlmsg_seq=0, nlmsg_pid=0}], 152, 0, NULL, 0) = 152 recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=NULL, iov_len=0}], msg_iovlen=1, msg_controllen=0, msg_flags=MSG_TRUNC}, MSG_PEEK|MSG_TRUNC) = 36 recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=[{nlmsg_len=36, nlmsg_type=RTM_NEWNEXTHOP, nlmsg_flags=NLM_F_MULTI, nlmsg_seq=1691394315, nlmsg_pid=343}, {nh_family=AF_INET, nh_scope=RT_SCOPE_UNIVERSE, nh_protocol=RTPROT_UNSPEC, nh_flags=0}, [[{nla_len=8, nla_type=NHA_ID}, 1], {nla_len=4, nla_type=NHA_BLACKHOLE}]], iov_len=32768}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 36 id 1 blackhole recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=NULL, iov_len=0}], msg_iovlen=1, msg_controllen=0, msg_flags=MSG_TRUNC}, MSG_PEEK|MSG_TRUNC) = 20 recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=[{nlmsg_len=20, nlmsg_type=NLMSG_DONE, nlmsg_flags=NLM_F_MULTI, nlmsg_seq=1691394315, nlmsg_pid=343}, 0], iov_len=32768}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 20 +++ exited with 0 +++ This behavior is both inefficient and buggy. If the last nexthop to be dumped had the maximum ID of 0xffffffff, then the dump will restart from 0 (0xffffffff + 1) and never end: # ip nexthop add id $((2**32-1)) blackhole # ip nexthop id 4294967295 blackhole id 4294967295 blackhole [...] Fix by adjusting the dump callback to return zero when the dump is complete. After the fix only one recvmsg() call is made and the NLMSG_DONE message is appended to the RTM_NEWNEXTHOP response: # ip nexthop add id $((2**32-1)) blackhole # strace -e sendto,recvmsg -s 5 ip nexthop sendto(3, [[{nlmsg_len=24, nlmsg_type=RTM_GETNEXTHOP, nlmsg_flags=NLM_F_REQUEST|NLM_F_DUMP, nlmsg_seq=1691394080, nlmsg_pid=0}, {nh_family=AF_UNSPEC, nh_scope=RT_SCOPE_UNIVERSE, nh_protocol=RTPROT_UNSPEC, nh_flags=0}], {nlmsg_len=0, nlmsg_type=0 /* NLMSG_??? */, nlmsg_flags=0, nlmsg_seq=0, nlmsg_pid=0}], 152, 0, NULL, 0) = 152 recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=NULL, iov_len=0}], msg_iovlen=1, msg_controllen=0, msg_flags=MSG_TRUNC}, MSG_PEEK|MSG_TRUNC) = 56 recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=[[{nlmsg_len=36, nlmsg_type=RTM_NEWNEXTHOP, nlmsg_flags=NLM_F_MULTI, nlmsg_seq=1691394080, nlmsg_pid=342}, {nh_family=AF_INET, nh_scope=RT_SCOPE_UNIVERSE, nh_protocol=RTPROT_UNSPEC, nh_flags=0}, [[{nla_len=8, nla_type=NHA_ID}, 4294967295], {nla_len=4, nla_type=NHA_BLACKHOLE}]], [{nlmsg_len=20, nlmsg_type=NLMSG_DONE, nlmsg_flags=NLM_F_MULTI, nlmsg_seq=1691394080, nlmsg_pid=342}, 0]], iov_len=32768}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 56 id 4294967295 blackhole +++ exited with 0 +++ Note that if the NLMSG_DONE message cannot be appended because of size limitations, then another recvmsg() will be needed, but the core netlink code will not invoke the dump callback and simply reply with a NLMSG_DONE message since it knows that the callback previously returned zero. Add a test that fails before the fix: # ./fib_nexthops.sh -t basic [...] TEST: Maximum nexthop ID dump [FAIL] [...] And passes after it: # ./fib_nexthops.sh -t basic [...] TEST: Maximum nexthop ID dump [ OK ] [...] Fixes: ab84be7e54fc ("net: Initial nexthop code") Reported-by: Petr Machata Closes: https://lore.kernel.org/netdev/87sf91enuf.fsf@nvidia.com/ Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Reviewed-by: David Ahern Link: https://lore.kernel.org/r/20230808075233.3337922-2-idosch@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/ipv4/nexthop.c | 6 +----- tools/testing/selftests/net/fib_nexthops.sh | 5 +++++ 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 6cc7d347ec0a..2cfcf5b3ae43 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -3222,13 +3222,9 @@ static int rtm_dump_nexthop(struct sk_buff *skb, struct netlink_callback *cb) &rtm_dump_nexthop_cb, &filter); if (err < 0) { if (likely(skb->len)) - goto out; - goto out_err; + err = skb->len; } -out: - err = skb->len; -out_err: cb->seq = net->nexthop.seq; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); return err; diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index 0c066ba579d4..34b230914deb 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -1917,6 +1917,11 @@ basic() run_cmd "$IP link set dev lo up" + # Dump should not loop endlessly when maximum nexthop ID is configured. + run_cmd "$IP nexthop add id $((2**32-1)) blackhole" + run_cmd "timeout 5 $IP nexthop" + log_test $? 0 "Maximum nexthop ID dump" + # # groups # From 608a4327c257c06f0d9d152ae2168024934700c5 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 8 Aug 2023 10:52:32 +0300 Subject: [PATCH 345/513] nexthop: Make nexthop bucket dump more efficient commit f10d3d9df49d9e6ee244fda6ca264f901a9c5d85 upstream. rtm_dump_nexthop_bucket_nh() is used to dump nexthop buckets belonging to a specific resilient nexthop group. The function returns a positive return code (the skb length) upon both success and failure. The above behavior is problematic. When a complete nexthop bucket dump is requested, the function that walks the different nexthops treats the non-zero return code as an error. This causes buckets belonging to different resilient nexthop groups to be dumped using different buffers even if they can all fit in the same buffer: # ip link add name dummy1 up type dummy # ip nexthop add id 1 dev dummy1 # ip nexthop add id 10 group 1 type resilient buckets 1 # ip nexthop add id 20 group 1 type resilient buckets 1 # strace -e recvmsg -s 0 ip nexthop bucket [...] recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[...], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 64 id 10 index 0 idle_time 10.27 nhid 1 [...] recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[...], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 64 id 20 index 0 idle_time 6.44 nhid 1 [...] Fix by only returning a non-zero return code when an error occurred and restarting the dump from the bucket index we failed to fill in. This allows buckets belonging to different resilient nexthop groups to be dumped using the same buffer: # ip link add name dummy1 up type dummy # ip nexthop add id 1 dev dummy1 # ip nexthop add id 10 group 1 type resilient buckets 1 # ip nexthop add id 20 group 1 type resilient buckets 1 # strace -e recvmsg -s 0 ip nexthop bucket [...] recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[...], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 128 id 10 index 0 idle_time 30.21 nhid 1 id 20 index 0 idle_time 26.7 nhid 1 [...] While this change is more of a performance improvement change than an actual bug fix, it is a prerequisite for a subsequent patch that does fix a bug. Fixes: 8a1bbabb034d ("nexthop: Add netlink handlers for bucket dump") Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Reviewed-by: David Ahern Link: https://lore.kernel.org/r/20230808075233.3337922-3-idosch@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/ipv4/nexthop.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index 2cfcf5b3ae43..b755986dfd3c 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -3364,25 +3364,19 @@ static int rtm_dump_nexthop_bucket_nh(struct sk_buff *skb, dd->filter.res_bucket_nh_id != nhge->nh->id) continue; + dd->ctx->bucket_index = bucket_index; err = nh_fill_res_bucket(skb, nh, bucket, bucket_index, RTM_NEWNEXTHOPBUCKET, portid, cb->nlh->nlmsg_seq, NLM_F_MULTI, cb->extack); - if (err < 0) { - if (likely(skb->len)) - goto out; - goto out_err; - } + if (err) + return err; } dd->ctx->done_nh_idx = dd->ctx->nh.idx + 1; - bucket_index = 0; + dd->ctx->bucket_index = 0; -out: - err = skb->len; -out_err: - dd->ctx->bucket_index = bucket_index; - return err; + return 0; } static int rtm_dump_nexthop_bucket_cb(struct sk_buff *skb, From 7e1dc94b2d5089916840bf5b8a61063106713eda Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 8 Aug 2023 10:52:33 +0300 Subject: [PATCH 346/513] nexthop: Fix infinite nexthop bucket dump when using maximum nexthop ID commit 8743aeff5bc4dcb5b87b43765f48d5ac3ad7dd9f upstream. A netlink dump callback can return a positive number to signal that more information needs to be dumped or zero to signal that the dump is complete. In the second case, the core netlink code will append the NLMSG_DONE message to the skb in order to indicate to user space that the dump is complete. The nexthop bucket dump callback always returns a positive number if nexthop buckets were filled in the provided skb, even if the dump is complete. This means that a dump will span at least two recvmsg() calls as long as nexthop buckets are present. In the last recvmsg() call the dump callback will not fill in any nexthop buckets because the previous call indicated that the dump should restart from the last dumped nexthop ID plus one. # ip link add name dummy1 up type dummy # ip nexthop add id 1 dev dummy1 # ip nexthop add id 10 group 1 type resilient buckets 2 # strace -e sendto,recvmsg -s 5 ip nexthop bucket sendto(3, [[{nlmsg_len=24, nlmsg_type=RTM_GETNEXTHOPBUCKET, nlmsg_flags=NLM_F_REQUEST|NLM_F_DUMP, nlmsg_seq=1691396980, nlmsg_pid=0}, {family=AF_UNSPEC, data="\x00\x00\x00\x00\x00"...}], {nlmsg_len=0, nlmsg_type=0 /* NLMSG_??? */, nlmsg_flags=0, nlmsg_seq=0, nlmsg_pid=0}], 152, 0, NULL, 0) = 152 recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=NULL, iov_len=0}], msg_iovlen=1, msg_controllen=0, msg_flags=MSG_TRUNC}, MSG_PEEK|MSG_TRUNC) = 128 recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=[[{nlmsg_len=64, nlmsg_type=RTM_NEWNEXTHOPBUCKET, nlmsg_flags=NLM_F_MULTI, nlmsg_seq=1691396980, nlmsg_pid=347}, {family=AF_UNSPEC, data="\x00\x00\x00\x00\x00"...}], [{nlmsg_len=64, nlmsg_type=RTM_NEWNEXTHOPBUCKET, nlmsg_flags=NLM_F_MULTI, nlmsg_seq=1691396980, nlmsg_pid=347}, {family=AF_UNSPEC, data="\x00\x00\x00\x00\x00"...}]], iov_len=32768}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 128 id 10 index 0 idle_time 6.66 nhid 1 id 10 index 1 idle_time 6.66 nhid 1 recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=NULL, iov_len=0}], msg_iovlen=1, msg_controllen=0, msg_flags=MSG_TRUNC}, MSG_PEEK|MSG_TRUNC) = 20 recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=[{nlmsg_len=20, nlmsg_type=NLMSG_DONE, nlmsg_flags=NLM_F_MULTI, nlmsg_seq=1691396980, nlmsg_pid=347}, 0], iov_len=32768}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 20 +++ exited with 0 +++ This behavior is both inefficient and buggy. If the last nexthop to be dumped had the maximum ID of 0xffffffff, then the dump will restart from 0 (0xffffffff + 1) and never end: # ip link add name dummy1 up type dummy # ip nexthop add id 1 dev dummy1 # ip nexthop add id $((2**32-1)) group 1 type resilient buckets 2 # ip nexthop bucket id 4294967295 index 0 idle_time 5.55 nhid 1 id 4294967295 index 1 idle_time 5.55 nhid 1 id 4294967295 index 0 idle_time 5.55 nhid 1 id 4294967295 index 1 idle_time 5.55 nhid 1 [...] Fix by adjusting the dump callback to return zero when the dump is complete. After the fix only one recvmsg() call is made and the NLMSG_DONE message is appended to the RTM_NEWNEXTHOPBUCKET responses: # ip link add name dummy1 up type dummy # ip nexthop add id 1 dev dummy1 # ip nexthop add id $((2**32-1)) group 1 type resilient buckets 2 # strace -e sendto,recvmsg -s 5 ip nexthop bucket sendto(3, [[{nlmsg_len=24, nlmsg_type=RTM_GETNEXTHOPBUCKET, nlmsg_flags=NLM_F_REQUEST|NLM_F_DUMP, nlmsg_seq=1691396737, nlmsg_pid=0}, {family=AF_UNSPEC, data="\x00\x00\x00\x00\x00"...}], {nlmsg_len=0, nlmsg_type=0 /* NLMSG_??? */, nlmsg_flags=0, nlmsg_seq=0, nlmsg_pid=0}], 152, 0, NULL, 0) = 152 recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=NULL, iov_len=0}], msg_iovlen=1, msg_controllen=0, msg_flags=MSG_TRUNC}, MSG_PEEK|MSG_TRUNC) = 148 recvmsg(3, {msg_name={sa_family=AF_NETLINK, nl_pid=0, nl_groups=00000000}, msg_namelen=12, msg_iov=[{iov_base=[[{nlmsg_len=64, nlmsg_type=RTM_NEWNEXTHOPBUCKET, nlmsg_flags=NLM_F_MULTI, nlmsg_seq=1691396737, nlmsg_pid=350}, {family=AF_UNSPEC, data="\x00\x00\x00\x00\x00"...}], [{nlmsg_len=64, nlmsg_type=RTM_NEWNEXTHOPBUCKET, nlmsg_flags=NLM_F_MULTI, nlmsg_seq=1691396737, nlmsg_pid=350}, {family=AF_UNSPEC, data="\x00\x00\x00\x00\x00"...}], [{nlmsg_len=20, nlmsg_type=NLMSG_DONE, nlmsg_flags=NLM_F_MULTI, nlmsg_seq=1691396737, nlmsg_pid=350}, 0]], iov_len=32768}], msg_iovlen=1, msg_controllen=0, msg_flags=0}, 0) = 148 id 4294967295 index 0 idle_time 6.61 nhid 1 id 4294967295 index 1 idle_time 6.61 nhid 1 +++ exited with 0 +++ Note that if the NLMSG_DONE message cannot be appended because of size limitations, then another recvmsg() will be needed, but the core netlink code will not invoke the dump callback and simply reply with a NLMSG_DONE message since it knows that the callback previously returned zero. Add a test that fails before the fix: # ./fib_nexthops.sh -t basic_res [...] TEST: Maximum nexthop ID dump [FAIL] [...] And passes after it: # ./fib_nexthops.sh -t basic_res [...] TEST: Maximum nexthop ID dump [ OK ] [...] Fixes: 8a1bbabb034d ("nexthop: Add netlink handlers for bucket dump") Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Reviewed-by: David Ahern Link: https://lore.kernel.org/r/20230808075233.3337922-4-idosch@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- net/ipv4/nexthop.c | 6 +----- tools/testing/selftests/net/fib_nexthops.sh | 5 +++++ 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/net/ipv4/nexthop.c b/net/ipv4/nexthop.c index b755986dfd3c..c140a36bd1e6 100644 --- a/net/ipv4/nexthop.c +++ b/net/ipv4/nexthop.c @@ -3425,13 +3425,9 @@ static int rtm_dump_nexthop_bucket(struct sk_buff *skb, if (err < 0) { if (likely(skb->len)) - goto out; - goto out_err; + err = skb->len; } -out: - err = skb->len; -out_err: cb->seq = net->nexthop.seq; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); return err; diff --git a/tools/testing/selftests/net/fib_nexthops.sh b/tools/testing/selftests/net/fib_nexthops.sh index 34b230914deb..a194dbcb405a 100755 --- a/tools/testing/selftests/net/fib_nexthops.sh +++ b/tools/testing/selftests/net/fib_nexthops.sh @@ -2142,6 +2142,11 @@ basic_res() run_cmd "$IP nexthop bucket list fdb" log_test $? 255 "Dump all nexthop buckets with invalid 'fdb' keyword" + # Dump should not loop endlessly when maximum nexthop ID is configured. + run_cmd "$IP nexthop add id $((2**32-1)) group 1/2 type resilient buckets 4" + run_cmd "timeout 5 $IP nexthop bucket" + log_test $? 0 "Maximum nexthop ID dump" + # # resilient nexthop buckets get requests # From 086a80eb62131940125eddd43b2fb1ed7d9ab806 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Wed, 12 Jul 2023 18:26:45 +0530 Subject: [PATCH 347/513] dmaengine: mcf-edma: Fix a potential un-allocated memory access commit 0a46781c89dece85386885a407244ca26e5c1c44 upstream. When 'mcf_edma' is allocated, some space is allocated for a flexible array at the end of the struct. 'chans' item are allocated, that is to say 'pdata->dma_channels'. Then, this number of item is stored in 'mcf_edma->n_chans'. A few lines later, if 'mcf_edma->n_chans' is 0, then a default value of 64 is set. This ends to no space allocated by devm_kzalloc() because chans was 0, but 64 items are read and/or written in some not allocated memory. Change the logic to define a default value before allocating the memory. Fixes: e7a3ff92eaf1 ("dmaengine: fsl-edma: add ColdFire mcf5441x edma support") Signed-off-by: Christophe JAILLET Link: https://lore.kernel.org/r/f55d914407c900828f6fad3ea5fa791a5f17b9a4.1685172449.git.christophe.jaillet@wanadoo.fr Signed-off-by: Vinod Koul Signed-off-by: Greg Kroah-Hartman --- drivers/dma/mcf-edma.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/dma/mcf-edma.c b/drivers/dma/mcf-edma.c index e12b754e6398..60d3c5f09ad6 100644 --- a/drivers/dma/mcf-edma.c +++ b/drivers/dma/mcf-edma.c @@ -191,7 +191,13 @@ static int mcf_edma_probe(struct platform_device *pdev) return -EINVAL; } - chans = pdata->dma_channels; + if (!pdata->dma_channels) { + dev_info(&pdev->dev, "setting default channel number to 64"); + chans = 64; + } else { + chans = pdata->dma_channels; + } + len = sizeof(*mcf_edma) + sizeof(*mcf_chan) * chans; mcf_edma = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); if (!mcf_edma) @@ -203,11 +209,6 @@ static int mcf_edma_probe(struct platform_device *pdev) mcf_edma->drvdata = &mcf_data; mcf_edma->big_endian = 1; - if (!mcf_edma->n_chans) { - dev_info(&pdev->dev, "setting default channel number to 64"); - mcf_edma->n_chans = 64; - } - mutex_init(&mcf_edma->fsl_edma_mutex); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); From f638fc2f737766936196f535b20659c47200838f Mon Sep 17 00:00:00 2001 From: Daniel Jurgens Date: Tue, 11 Jul 2023 00:28:10 +0300 Subject: [PATCH 348/513] net/mlx5: Allow 0 for total host VFs commit 2dc2b3922d3c0f52d3a792d15dcacfbc4cc76b8f upstream. When querying eswitch functions 0 is a valid number of host VFs. After introducing ARM SRIOV falling through to getting the max value from PCI results in using the total VFs allowed on the ARM for the host. Fixes: 86eec50beaf3 ("net/mlx5: Support querying max VFs from device"); Signed-off-by: Daniel Jurgens Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/sriov.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c index e8185b69ac6c..373d3d4bf3a6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c @@ -256,8 +256,7 @@ static u16 mlx5_get_max_vfs(struct mlx5_core_dev *dev) host_total_vfs = MLX5_GET(query_esw_functions_out, out, host_params_context.host_total_vfs); kvfree(out); - if (host_total_vfs) - return host_total_vfs; + return host_total_vfs; } done: From 27e8db8380eb82220725ab9725eccb835ab1655f Mon Sep 17 00:00:00 2001 From: Moshe Shemesh Date: Wed, 19 Jul 2023 11:33:44 +0300 Subject: [PATCH 349/513] net/mlx5: Skip clock update work when device is in error state commit d006207625657322ba8251b6e7e829f9659755dc upstream. When device is in error state, marked by the flag MLX5_DEVICE_STATE_INTERNAL_ERROR, the HW and PCI may not be accessible and so clock update work should be skipped. Furthermore, such access through PCI in error state, after calling mlx5_pci_disable_device() can result in failing to recover from pci errors. Fixes: ef9814deafd0 ("net/mlx5e: Add HW timestamping (TS) support") Reported-and-tested-by: Ganesh G R Closes: https://lore.kernel.org/netdev/9bdb9b9d-140a-7a28-f0de-2e64e873c068@nvidia.com Signed-off-by: Moshe Shemesh Reviewed-by: Aya Levin Signed-off-by: Saeed Mahameed Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c index 8490c0cf80a8..6fece284de0f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c @@ -189,10 +189,15 @@ static void mlx5_timestamp_overflow(struct work_struct *work) clock = container_of(timer, struct mlx5_clock, timer); mdev = container_of(clock, struct mlx5_core_dev, clock); + if (mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) + goto out; + write_seqlock_irqsave(&clock->lock, flags); timecounter_read(&timer->tc); mlx5_update_clock_info_page(mdev); write_sequnlock_irqrestore(&clock->lock, flags); + +out: schedule_delayed_work(&timer->overflow_work, timer->overflow_period); } From cee62753cf2e26e476a9e2774a912f970f1d25ff Mon Sep 17 00:00:00 2001 From: Nick Child Date: Wed, 9 Aug 2023 17:10:34 -0500 Subject: [PATCH 350/513] ibmvnic: Enforce stronger sanity checks on login response commit db17ba719bceb52f0ae4ebca0e4c17d9a3bebf05 upstream. Ensure that all offsets in a login response buffer are within the size of the allocated response buffer. Any offsets or lengths that surpass the allocation are likely the result of an incomplete response buffer. In these cases, a full reset is necessary. When attempting to login, the ibmvnic device will allocate a response buffer and pass a reference to the VIOS. The VIOS will then send the ibmvnic device a LOGIN_RSP CRQ to signal that the buffer has been filled with data. If the ibmvnic device does not get a response in 20 seconds, the old buffer is freed and a new login request is sent. With 2 outstanding requests, any LOGIN_RSP CRQ's could be for the older login request. If this is the case then the login response buffer (which is for the newer login request) could be incomplete and contain invalid data. Therefore, we must enforce strict sanity checks on the response buffer values. Testing has shown that the `off_rxadd_buff_size` value is filled in last by the VIOS and will be the smoking gun for these circumstances. Until VIOS can implement a mechanism for tracking outstanding response buffers and a method for mapping a LOGIN_RSP CRQ to a particular login response buffer, the best ibmvnic can do in this situation is perform a full reset. Fixes: dff515a3e71d ("ibmvnic: Harden device login requests") Signed-off-by: Nick Child Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230809221038.51296-1-nnac123@linux.ibm.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/ibm/ibmvnic.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 450b4fd9aa7f..18f636cbbefd 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4788,6 +4788,7 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, int num_tx_pools; int num_rx_pools; u64 *size_array; + u32 rsp_len; int i; /* CHECK: Test/set of login_pending does not need to be atomic @@ -4839,6 +4840,23 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, ibmvnic_reset(adapter, VNIC_RESET_FATAL); return -EIO; } + + rsp_len = be32_to_cpu(login_rsp->len); + if (be32_to_cpu(login->login_rsp_len) < rsp_len || + rsp_len <= be32_to_cpu(login_rsp->off_txsubm_subcrqs) || + rsp_len <= be32_to_cpu(login_rsp->off_rxadd_subcrqs) || + rsp_len <= be32_to_cpu(login_rsp->off_rxadd_buff_size) || + rsp_len <= be32_to_cpu(login_rsp->off_supp_tx_desc)) { + /* This can happen if a login request times out and there are + * 2 outstanding login requests sent, the LOGIN_RSP crq + * could have been for the older login request. So we are + * parsing the newer response buffer which may be incomplete + */ + dev_err(dev, "FATAL: Login rsp offsets/lengths invalid\n"); + ibmvnic_reset(adapter, VNIC_RESET_FATAL); + return -EIO; + } + size_array = (u64 *)((u8 *)(adapter->login_rsp_buf) + be32_to_cpu(adapter->login_rsp_buf->off_rxadd_buff_size)); /* variable buffer sizes are not supported, so just read the From 34fcc823823af4750377851554171b0a0afaf42f Mon Sep 17 00:00:00 2001 From: Nick Child Date: Wed, 9 Aug 2023 17:10:35 -0500 Subject: [PATCH 351/513] ibmvnic: Unmap DMA login rsp buffer on send login fail commit 411c565b4bc63e9584a8493882bd566e35a90588 upstream. If the LOGIN CRQ fails to send then we must DMA unmap the response buffer. Previously, if the CRQ failed then the memory was freed without DMA unmapping. Fixes: c98d9cc4170d ("ibmvnic: send_login should check for crq errors") Signed-off-by: Nick Child Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230809221038.51296-2-nnac123@linux.ibm.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/ibm/ibmvnic.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index 18f636cbbefd..ddc701ebdec8 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -4220,11 +4220,14 @@ static int send_login(struct ibmvnic_adapter *adapter) if (rc) { adapter->login_pending = false; netdev_err(adapter->netdev, "Failed to send login, rc=%d\n", rc); - goto buf_rsp_map_failed; + goto buf_send_failed; } return 0; +buf_send_failed: + dma_unmap_single(dev, rsp_buffer_token, rsp_buffer_size, + DMA_FROM_DEVICE); buf_rsp_map_failed: kfree(login_rsp_buffer); adapter->login_rsp_buf = NULL; From 555e126dd30b886f0f8934da8f01a79653d944d3 Mon Sep 17 00:00:00 2001 From: Nick Child Date: Wed, 9 Aug 2023 17:10:36 -0500 Subject: [PATCH 352/513] ibmvnic: Handle DMA unmapping of login buffs in release functions commit d78a671eb8996af19d6311ecdee9790d2fa479f0 upstream. Rather than leaving the DMA unmapping of the login buffers to the login response handler, move this work into the login release functions. Previously, these functions were only used for freeing the allocated buffers. This could lead to issues if there are more than one outstanding login buffer requests, which is possible if a login request times out. If a login request times out, then there is another call to send login. The send login function makes a call to the login buffer release function. In the past, this freed the buffers but did not DMA unmap. Therefore, the VIOS could still write to the old login (now freed) buffer. It is for this reason that it is a good idea to leave the DMA unmap call to the login buffers release function. Since the login buffer release functions now handle DMA unmapping, remove the duplicate DMA unmapping in handle_login_rsp(). Fixes: dff515a3e71d ("ibmvnic: Harden device login requests") Signed-off-by: Nick Child Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230809221038.51296-3-nnac123@linux.ibm.com Signed-off-by: Jakub Kicinski Signed-off-by: Greg Kroah-Hartman --- drivers/net/ethernet/ibm/ibmvnic.c | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index ddc701ebdec8..890e27b986e2 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -1007,12 +1007,22 @@ static int ibmvnic_login(struct net_device *netdev) static void release_login_buffer(struct ibmvnic_adapter *adapter) { + if (!adapter->login_buf) + return; + + dma_unmap_single(&adapter->vdev->dev, adapter->login_buf_token, + adapter->login_buf_sz, DMA_TO_DEVICE); kfree(adapter->login_buf); adapter->login_buf = NULL; } static void release_login_rsp_buffer(struct ibmvnic_adapter *adapter) { + if (!adapter->login_rsp_buf) + return; + + dma_unmap_single(&adapter->vdev->dev, adapter->login_rsp_buf_token, + adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); kfree(adapter->login_rsp_buf); adapter->login_rsp_buf = NULL; } @@ -4803,11 +4813,6 @@ static int handle_login_rsp(union ibmvnic_crq *login_rsp_crq, } adapter->login_pending = false; - dma_unmap_single(dev, adapter->login_buf_token, adapter->login_buf_sz, - DMA_TO_DEVICE); - dma_unmap_single(dev, adapter->login_rsp_buf_token, - adapter->login_rsp_buf_sz, DMA_FROM_DEVICE); - /* If the number of queues requested can't be allocated by the * server, the login response will return with code 1. We will need * to resend the login buffer with fewer queues requested. From c40d4b60c58d7f37069bfc5767c0bcd2f9987cb0 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Mon, 24 Jul 2023 06:26:53 -0700 Subject: [PATCH 353/513] btrfs: don't stop integrity writeback too early commit effa24f689ce0948f68c754991a445a8d697d3a8 upstream. extent_write_cache_pages stops writing pages as soon as nr_to_write hits zero. That is the right thing for opportunistic writeback, but incorrect for data integrity writeback, which needs to ensure that no dirty pages are left in the range. Thus only stop the writeback for WB_SYNC_NONE if nr_to_write hits 0. This is a port of write_cache_pages changes in commit 05fe478dd04e ("mm: write_cache_pages integrity fix"). Note that I've only trigger the problem with other changes to the btrfs writeback code, but this condition seems worthwhile fixing anyway. CC: stable@vger.kernel.org # 4.14+ Reviewed-by: Josef Bacik Signed-off-by: Christoph Hellwig Reviewed-by: David Sterba [ updated comment ] Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/extent_io.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 7bd704779a99..f9f6dfbc86bc 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -4844,11 +4844,12 @@ int btree_write_cache_pages(struct address_space *mapping, } /* - * the filesystem may choose to bump up nr_to_write. + * The filesystem may choose to bump up nr_to_write. * We have to make sure to honor the new nr_to_write - * at any time + * at any time. */ - nr_to_write_done = wbc->nr_to_write <= 0; + nr_to_write_done = (wbc->sync_mode == WB_SYNC_NONE && + wbc->nr_to_write <= 0); } pagevec_release(&pvec); cond_resched(); From 69dd147de419b04d1d8d2ca67ef424cddd5b8fd5 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Thu, 3 Aug 2023 17:20:42 +0800 Subject: [PATCH 354/513] btrfs: exit gracefully if reloc roots don't match commit 05d7ce504545f7874529701664c90814ca645c5d upstream. [BUG] Syzbot reported a crash that an ASSERT() got triggered inside prepare_to_merge(). [CAUSE] The root cause of the triggered ASSERT() is we can have a race between quota tree creation and relocation. This leads us to create a duplicated quota tree in the btrfs_read_fs_root() path, and since it's treated as fs tree, it would have ROOT_SHAREABLE flag, causing us to create a reloc tree for it. The bug itself is fixed by a dedicated patch for it, but this already taught us the ASSERT() is not something straightforward for developers. [ENHANCEMENT] Instead of using an ASSERT(), let's handle it gracefully and output extra info about the mismatch reloc roots to help debug. Also with the above ASSERT() removed, we can trigger ASSERT(0)s inside merge_reloc_roots() later. Also replace those ASSERT(0)s with WARN_ON()s. CC: stable@vger.kernel.org # 5.15+ Reported-by: syzbot+ae97a827ae1c3336bbb4@syzkaller.appspotmail.com Reviewed-by: Filipe Manana Signed-off-by: Qu Wenruo Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/relocation.c | 45 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 37 insertions(+), 8 deletions(-) diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index becf3396d533..dd8d47958a81 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -1905,7 +1905,39 @@ int prepare_to_merge(struct reloc_control *rc, int err) err = PTR_ERR(root); break; } - ASSERT(root->reloc_root == reloc_root); + + if (unlikely(root->reloc_root != reloc_root)) { + if (root->reloc_root) { + btrfs_err(fs_info, +"reloc tree mismatch, root %lld has reloc root key (%lld %u %llu) gen %llu, expect reloc root key (%lld %u %llu) gen %llu", + root->root_key.objectid, + root->reloc_root->root_key.objectid, + root->reloc_root->root_key.type, + root->reloc_root->root_key.offset, + btrfs_root_generation( + &root->reloc_root->root_item), + reloc_root->root_key.objectid, + reloc_root->root_key.type, + reloc_root->root_key.offset, + btrfs_root_generation( + &reloc_root->root_item)); + } else { + btrfs_err(fs_info, +"reloc tree mismatch, root %lld has no reloc root, expect reloc root key (%lld %u %llu) gen %llu", + root->root_key.objectid, + reloc_root->root_key.objectid, + reloc_root->root_key.type, + reloc_root->root_key.offset, + btrfs_root_generation( + &reloc_root->root_item)); + } + list_add(&reloc_root->root_list, &reloc_roots); + btrfs_put_root(root); + btrfs_abort_transaction(trans, -EUCLEAN); + if (!err) + err = -EUCLEAN; + break; + } /* * set reference count to 1, so btrfs_recover_relocation @@ -1978,7 +2010,7 @@ void merge_reloc_roots(struct reloc_control *rc) root = btrfs_get_fs_root(fs_info, reloc_root->root_key.offset, false); if (btrfs_root_refs(&reloc_root->root_item) > 0) { - if (IS_ERR(root)) { + if (WARN_ON(IS_ERR(root))) { /* * For recovery we read the fs roots on mount, * and if we didn't find the root then we marked @@ -1987,17 +2019,14 @@ void merge_reloc_roots(struct reloc_control *rc) * memory. However there's no reason we can't * handle the error properly here just in case. */ - ASSERT(0); ret = PTR_ERR(root); goto out; } - if (root->reloc_root != reloc_root) { + if (WARN_ON(root->reloc_root != reloc_root)) { /* - * This is actually impossible without something - * going really wrong (like weird race condition - * or cosmic rays). + * This can happen if on-disk metadata has some + * corruption, e.g. bad reloc tree key offset. */ - ASSERT(0); ret = -EINVAL; goto out; } From 314135b7bae9618a317874ae195272682cf2d5d4 Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Thu, 3 Aug 2023 17:20:43 +0800 Subject: [PATCH 355/513] btrfs: reject invalid reloc tree root keys with stack dump commit 6ebcd021c92b8e4b904552e4d87283032100796d upstream. [BUG] Syzbot reported a crash that an ASSERT() got triggered inside prepare_to_merge(). That ASSERT() makes sure the reloc tree is properly pointed back by its subvolume tree. [CAUSE] After more debugging output, it turns out we had an invalid reloc tree: BTRFS error (device loop1): reloc tree mismatch, root 8 has no reloc root, expect reloc root key (-8, 132, 8) gen 17 Note the above root key is (TREE_RELOC_OBJECTID, ROOT_ITEM, QUOTA_TREE_OBJECTID), meaning it's a reloc tree for quota tree. But reloc trees can only exist for subvolumes, as for non-subvolume trees, we just COW the involved tree block, no need to create a reloc tree since those tree blocks won't be shared with other trees. Only subvolumes tree can share tree blocks with other trees (thus they have BTRFS_ROOT_SHAREABLE flag). Thus this new debug output proves my previous assumption that corrupted on-disk data can trigger that ASSERT(). [FIX] Besides the dedicated fix and the graceful exit, also let tree-checker to check such root keys, to make sure reloc trees can only exist for subvolumes. CC: stable@vger.kernel.org # 5.15+ Reported-by: syzbot+ae97a827ae1c3336bbb4@syzkaller.appspotmail.com Reviewed-by: Filipe Manana Signed-off-by: Qu Wenruo Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/disk-io.c | 3 ++- fs/btrfs/tree-checker.c | 14 ++++++++++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 3c0b3b4ec5ad..4e35c6fb7be7 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -1437,7 +1437,8 @@ static int btrfs_init_fs_root(struct btrfs_root *root, dev_t anon_dev) goto fail; if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID && - !btrfs_is_data_reloc_root(root)) { + !btrfs_is_data_reloc_root(root) && + is_fstree(root->root_key.objectid)) { set_bit(BTRFS_ROOT_SHAREABLE, &root->state); btrfs_check_and_init_root_item(&root->root_item); } diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index a84d2d489510..bd71c7369794 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -442,6 +442,20 @@ static int check_root_key(struct extent_buffer *leaf, struct btrfs_key *key, btrfs_item_key_to_cpu(leaf, &item_key, slot); is_root_item = (item_key.type == BTRFS_ROOT_ITEM_KEY); + /* + * Bad rootid for reloc trees. + * + * Reloc trees are only for subvolume trees, other trees only need + * to be COWed to be relocated. + */ + if (unlikely(is_root_item && key->objectid == BTRFS_TREE_RELOC_OBJECTID && + !is_fstree(key->offset))) { + generic_err(leaf, slot, + "invalid reloc tree for root %lld, root id is not a subvolume tree", + key->offset); + return -EUCLEAN; + } + /* No such tree id */ if (unlikely(key->objectid == 0)) { if (is_root_item) From ae6e21f8bb2a29db1c9be7a001b038e6f2b2afb0 Mon Sep 17 00:00:00 2001 From: Josef Bacik Date: Wed, 2 Aug 2023 09:20:24 -0400 Subject: [PATCH 356/513] btrfs: set cache_block_group_error if we find an error commit 92fb94b69c6accf1e49fff699640fa0ce03dc910 upstream. We set cache_block_group_error if btrfs_cache_block_group() returns an error, this is because we could end up not finding space to allocate and mistakenly return -ENOSPC, and which could then abort the transaction with the incorrect errno, and in the case of ENOSPC result in a WARN_ON() that will trip up tests like generic/475. However there's the case where multiple threads can be racing, one thread gets the proper error, and the other thread doesn't actually call btrfs_cache_block_group(), it instead sees ->cached == BTRFS_CACHE_ERROR. Again the result is the same, we fail to allocate our space and return -ENOSPC. Instead we need to set cache_block_group_error to -EIO in this case to make sure that if we do not make our allocation we get the appropriate error returned back to the caller. CC: stable@vger.kernel.org # 4.14+ Signed-off-by: Josef Bacik Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/extent-tree.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 750c1ff9947d..597cc2607481 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -4334,8 +4334,11 @@ static noinline int find_free_extent(struct btrfs_root *root, ret = 0; } - if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) + if (unlikely(block_group->cached == BTRFS_CACHE_ERROR)) { + if (!cache_block_group_error) + cache_block_group_error = -EIO; goto loop; + } bg_ret = NULL; ret = do_allocation(block_group, &ffe_ctl, &bg_ret); From d68f8ef6ef7047c7544095598add59f6ff156ed9 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 11 Jul 2023 17:40:40 +0800 Subject: [PATCH 357/513] nvme-tcp: fix potential unbalanced freeze & unfreeze commit 99dc264014d5aed66ee37ddf136a38b5a2b1b529 upstream. Move start_freeze into nvme_tcp_configure_io_queues(), and there is at least two benefits: 1) fix unbalanced freeze and unfreeze, since re-connection work may fail or be broken by removal 2) IO during error recovery can be failfast quickly because nvme fabrics unquiesces queues after teardown. One side-effect is that !mpath request may timeout during connecting because of queue topo change, but that looks not one big deal: 1) same problem exists with current code base 2) compared with !mpath, mpath use case is dominant Fixes: 2875b0aecabe ("nvme-tcp: fix controller reset hang during traffic") Cc: stable@vger.kernel.org Signed-off-by: Ming Lei Tested-by: Yi Zhang Reviewed-by: Sagi Grimberg Signed-off-by: Keith Busch Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/host/tcp.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index fb47d0603e05..4ca7ef941600 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -1888,6 +1888,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) goto out_cleanup_connect_q; if (!new) { + nvme_start_freeze(ctrl); nvme_start_queues(ctrl); if (!nvme_wait_freeze_timeout(ctrl, NVME_IO_TIMEOUT)) { /* @@ -1896,6 +1897,7 @@ static int nvme_tcp_configure_io_queues(struct nvme_ctrl *ctrl, bool new) * to be safe. */ ret = -ENODEV; + nvme_unfreeze(ctrl); goto out_wait_freeze_timed_out; } blk_mq_update_nr_hw_queues(ctrl->tagset, @@ -2014,7 +2016,6 @@ static void nvme_tcp_teardown_io_queues(struct nvme_ctrl *ctrl, if (ctrl->queue_count <= 1) return; blk_mq_quiesce_queue(ctrl->admin_q); - nvme_start_freeze(ctrl); nvme_stop_queues(ctrl); nvme_sync_io_queues(ctrl); nvme_tcp_stop_io_queues(ctrl); From 9bdbbcf9d148aebd686aabe13e80e1e4e8610c60 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Tue, 11 Jul 2023 17:40:41 +0800 Subject: [PATCH 358/513] nvme-rdma: fix potential unbalanced freeze & unfreeze commit 29b434d1e49252b3ad56ad3197e47fafff5356a1 upstream. Move start_freeze into nvme_rdma_configure_io_queues(), and there is at least two benefits: 1) fix unbalanced freeze and unfreeze, since re-connection work may fail or be broken by removal 2) IO during error recovery can be failfast quickly because nvme fabrics unquiesces queues after teardown. One side-effect is that !mpath request may timeout during connecting because of queue topo change, but that looks not one big deal: 1) same problem exists with current code base 2) compared with !mpath, mpath use case is dominant Fixes: 9f98772ba307 ("nvme-rdma: fix controller reset hang during traffic") Cc: stable@vger.kernel.org Signed-off-by: Ming Lei Tested-by: Yi Zhang Reviewed-by: Sagi Grimberg Signed-off-by: Keith Busch Signed-off-by: Greg Kroah-Hartman --- drivers/nvme/host/rdma.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 2db9c166a1b7..b76e1d4adcc7 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -989,6 +989,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) goto out_cleanup_connect_q; if (!new) { + nvme_start_freeze(&ctrl->ctrl); nvme_start_queues(&ctrl->ctrl); if (!nvme_wait_freeze_timeout(&ctrl->ctrl, NVME_IO_TIMEOUT)) { /* @@ -997,6 +998,7 @@ static int nvme_rdma_configure_io_queues(struct nvme_rdma_ctrl *ctrl, bool new) * to be safe. */ ret = -ENODEV; + nvme_unfreeze(&ctrl->ctrl); goto out_wait_freeze_timed_out; } blk_mq_update_nr_hw_queues(ctrl->ctrl.tagset, @@ -1038,7 +1040,6 @@ static void nvme_rdma_teardown_io_queues(struct nvme_rdma_ctrl *ctrl, bool remove) { if (ctrl->ctrl.queue_count > 1) { - nvme_start_freeze(&ctrl->ctrl); nvme_stop_queues(&ctrl->ctrl); nvme_sync_io_queues(&ctrl->ctrl); nvme_rdma_stop_io_queues(ctrl); From b757ef99df3988c61e382daef48fd83ce447f2b7 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Sun, 13 Aug 2023 00:06:55 +0200 Subject: [PATCH 359/513] netfilter: nf_tables: report use refcount overflow commit 1689f25924ada8fe14a4a82c38925d04994c7142 upstream. Overflow use refcount checks are not complete. Add helper function to deal with object reference counter tracking. Report -EMFILE in case UINT_MAX is reached. nft_use_dec() splats in case that reference counter underflows, which should not ever happen. Add nft_use_inc_restore() and nft_use_dec_restore() which are used to restore reference counter from error and abort paths. Use u32 in nft_flowtable and nft_object since helper functions cannot work on bitfields. Remove the few early incomplete checks now that the helper functions are in place and used to check for refcount overflow. Fixes: 96518518cc41 ("netfilter: add nftables") Signed-off-by: Pablo Neira Ayuso Signed-off-by: Greg Kroah-Hartman --- include/net/netfilter/nf_tables.h | 31 +++++- net/netfilter/nf_tables_api.c | 164 ++++++++++++++++++------------ net/netfilter/nft_flow_offload.c | 6 +- net/netfilter/nft_immediate.c | 8 +- net/netfilter/nft_objref.c | 8 +- 5 files changed, 141 insertions(+), 76 deletions(-) diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index d486bddda15d..1458b3eae8ad 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1144,6 +1144,29 @@ int __nft_release_basechain(struct nft_ctx *ctx); unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv); +static inline bool nft_use_inc(u32 *use) +{ + if (*use == UINT_MAX) + return false; + + (*use)++; + + return true; +} + +static inline void nft_use_dec(u32 *use) +{ + WARN_ON_ONCE((*use)-- == 0); +} + +/* For error and abort path: restore use counter to previous state. */ +static inline void nft_use_inc_restore(u32 *use) +{ + WARN_ON_ONCE(!nft_use_inc(use)); +} + +#define nft_use_dec_restore nft_use_dec + /** * struct nft_table - nf_tables table * @@ -1227,8 +1250,8 @@ struct nft_object { struct list_head list; struct rhlist_head rhlhead; struct nft_object_hash_key key; - u32 genmask:2, - use:30; + u32 genmask:2; + u32 use; u64 handle; u16 udlen; u8 *udata; @@ -1330,8 +1353,8 @@ struct nft_flowtable { char *name; int hooknum; int ops_len; - u32 genmask:2, - use:30; + u32 genmask:2; + u32 use; u64 handle; /* runtime data below here */ struct list_head hook_list ____cacheline_aligned; diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index ce9f962380b7..1e84314fe334 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -255,8 +255,10 @@ int nf_tables_bind_chain(const struct nft_ctx *ctx, struct nft_chain *chain) if (chain->bound) return -EBUSY; + if (!nft_use_inc(&chain->use)) + return -EMFILE; + chain->bound = true; - chain->use++; nft_chain_trans_bind(ctx, chain); return 0; @@ -439,7 +441,7 @@ static int nft_delchain(struct nft_ctx *ctx) if (IS_ERR(trans)) return PTR_ERR(trans); - ctx->table->use--; + nft_use_dec(&ctx->table->use); nft_deactivate_next(ctx->net, ctx->chain); return 0; @@ -478,7 +480,7 @@ nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule) /* You cannot delete the same rule twice */ if (nft_is_active_next(ctx->net, rule)) { nft_deactivate_next(ctx->net, rule); - ctx->chain->use--; + nft_use_dec(&ctx->chain->use); return 0; } return -ENOENT; @@ -645,7 +647,7 @@ static int nft_delset(const struct nft_ctx *ctx, struct nft_set *set) nft_map_deactivate(ctx, set); nft_deactivate_next(ctx->net, set); - ctx->table->use--; + nft_use_dec(&ctx->table->use); return err; } @@ -677,7 +679,7 @@ static int nft_delobj(struct nft_ctx *ctx, struct nft_object *obj) return err; nft_deactivate_next(ctx->net, obj); - ctx->table->use--; + nft_use_dec(&ctx->table->use); return err; } @@ -712,7 +714,7 @@ static int nft_delflowtable(struct nft_ctx *ctx, return err; nft_deactivate_next(ctx->net, flowtable); - ctx->table->use--; + nft_use_dec(&ctx->table->use); return err; } @@ -2263,9 +2265,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, struct nft_rule **rules; int err; - if (table->use == UINT_MAX) - return -EOVERFLOW; - if (nla[NFTA_CHAIN_HOOK]) { struct nft_stats __percpu *stats = NULL; struct nft_chain_hook hook; @@ -2362,6 +2361,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, if (err < 0) goto err_destroy_chain; + if (!nft_use_inc(&table->use)) { + err = -EMFILE; + goto err_use; + } + trans = nft_trans_chain_add(ctx, NFT_MSG_NEWCHAIN); if (IS_ERR(trans)) { err = PTR_ERR(trans); @@ -2378,10 +2382,11 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, goto err_unregister_hook; } - table->use++; - return 0; + err_unregister_hook: + nft_use_dec_restore(&table->use); +err_use: nf_tables_unregister_hook(net, table, chain); err_destroy_chain: nf_tables_chain_destroy(ctx); @@ -3566,9 +3571,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, return -EINVAL; handle = nf_tables_alloc_handle(table); - if (chain->use == UINT_MAX) - return -EOVERFLOW; - if (nla[NFTA_RULE_POSITION]) { pos_handle = be64_to_cpu(nla_get_be64(nla[NFTA_RULE_POSITION])); old_rule = __nft_rule_lookup(chain, pos_handle); @@ -3662,6 +3664,11 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, } } + if (!nft_use_inc(&chain->use)) { + err = -EMFILE; + goto err_release_rule; + } + if (info->nlh->nlmsg_flags & NLM_F_REPLACE) { err = nft_delrule(&ctx, old_rule); if (err < 0) @@ -3693,7 +3700,6 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, } } kvfree(expr_info); - chain->use++; if (flow) nft_trans_flow_rule(trans) = flow; @@ -3704,6 +3710,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, return 0; err_destroy_flow_rule: + nft_use_dec_restore(&chain->use); if (flow) nft_flow_rule_destroy(flow); err_release_rule: @@ -4721,9 +4728,15 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, alloc_size = sizeof(*set) + size + udlen; if (alloc_size < size || alloc_size > INT_MAX) return -ENOMEM; + + if (!nft_use_inc(&table->use)) + return -EMFILE; + set = kvzalloc(alloc_size, GFP_KERNEL); - if (!set) - return -ENOMEM; + if (!set) { + err = -ENOMEM; + goto err_alloc; + } name = nla_strdup(nla[NFTA_SET_NAME], GFP_KERNEL); if (!name) { @@ -4781,7 +4794,7 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, goto err_set_expr_alloc; list_add_tail_rcu(&set->list, &table->sets); - table->use++; + return 0; err_set_expr_alloc: @@ -4793,6 +4806,9 @@ static int nf_tables_newset(struct sk_buff *skb, const struct nfnl_info *info, kfree(set->name); err_set_name: kvfree(set); +err_alloc: + nft_use_dec_restore(&table->use); + return err; } @@ -4927,9 +4943,6 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, struct nft_set_binding *i; struct nft_set_iter iter; - if (set->use == UINT_MAX) - return -EOVERFLOW; - if (!list_empty(&set->bindings) && nft_set_is_anonymous(set)) return -EBUSY; @@ -4957,10 +4970,12 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set, return iter.err; } bind: + if (!nft_use_inc(&set->use)) + return -EMFILE; + binding->chain = ctx->chain; list_add_tail_rcu(&binding->list, &set->bindings); nft_set_trans_bind(ctx, set); - set->use++; return 0; } @@ -5034,7 +5049,7 @@ void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set) nft_clear(ctx->net, set); } - set->use++; + nft_use_inc_restore(&set->use); } EXPORT_SYMBOL_GPL(nf_tables_activate_set); @@ -5050,7 +5065,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, else list_del_rcu(&binding->list); - set->use--; + nft_use_dec(&set->use); break; case NFT_TRANS_PREPARE: if (nft_set_is_anonymous(set)) { @@ -5059,7 +5074,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, nft_deactivate_next(ctx->net, set); } - set->use--; + nft_use_dec(&set->use); return; case NFT_TRANS_ABORT: case NFT_TRANS_RELEASE: @@ -5067,7 +5082,7 @@ void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set, set->flags & (NFT_SET_MAP | NFT_SET_OBJECT)) nft_map_deactivate(ctx, set); - set->use--; + nft_use_dec(&set->use); fallthrough; default: nf_tables_unbind_set(ctx, set, binding, @@ -5799,7 +5814,7 @@ void nft_set_elem_destroy(const struct nft_set *set, void *elem, nft_set_elem_expr_destroy(&ctx, nft_set_ext_expr(ext)); if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) - (*nft_set_ext_obj(ext))->use--; + nft_use_dec(&(*nft_set_ext_obj(ext))->use); kfree(elem); } EXPORT_SYMBOL_GPL(nft_set_elem_destroy); @@ -6290,8 +6305,16 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, set->objtype, genmask); if (IS_ERR(obj)) { err = PTR_ERR(obj); + obj = NULL; + goto err_parse_key_end; + } + + if (!nft_use_inc(&obj->use)) { + err = -EMFILE; + obj = NULL; goto err_parse_key_end; } + err = nft_set_ext_add(&tmpl, NFT_SET_EXT_OBJREF); if (err < 0) goto err_parse_key_end; @@ -6363,10 +6386,9 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, udata->len = ulen - 1; nla_memcpy(&udata->data, nla[NFTA_SET_ELEM_USERDATA], ulen); } - if (obj) { + if (obj) *nft_set_ext_obj(ext) = obj; - obj->use++; - } + err = nft_set_elem_expr_setup(ctx, ext, expr_array, num_exprs); if (err < 0) goto err_elem_expr; @@ -6421,14 +6443,14 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, err_element_clash: kfree(trans); err_elem_expr: - if (obj) - obj->use--; - nf_tables_set_elem_destroy(ctx, set, elem.priv); err_parse_data: if (nla[NFTA_SET_ELEM_DATA] != NULL) nft_data_release(&elem.data.val, desc.type); err_parse_key_end: + if (obj) + nft_use_dec_restore(&obj->use); + nft_data_release(&elem.key_end.val, NFT_DATA_VALUE); err_parse_key: nft_data_release(&elem.key.val, NFT_DATA_VALUE); @@ -6507,7 +6529,7 @@ void nft_data_hold(const struct nft_data *data, enum nft_data_types type) case NFT_JUMP: case NFT_GOTO: chain = data->verdict.chain; - chain->use++; + nft_use_inc_restore(&chain->use); break; } } @@ -6522,7 +6544,7 @@ static void nft_setelem_data_activate(const struct net *net, if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) nft_data_hold(nft_set_ext_data(ext), set->dtype); if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) - (*nft_set_ext_obj(ext))->use++; + nft_use_inc_restore(&(*nft_set_ext_obj(ext))->use); } static void nft_setelem_data_deactivate(const struct net *net, @@ -6534,7 +6556,7 @@ static void nft_setelem_data_deactivate(const struct net *net, if (nft_set_ext_exists(ext, NFT_SET_EXT_DATA)) nft_data_release(nft_set_ext_data(ext), set->dtype); if (nft_set_ext_exists(ext, NFT_SET_EXT_OBJREF)) - (*nft_set_ext_obj(ext))->use--; + nft_use_dec(&(*nft_set_ext_obj(ext))->use); } static int nft_del_setelem(struct nft_ctx *ctx, struct nft_set *set, @@ -7069,9 +7091,14 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info, nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); + if (!nft_use_inc(&table->use)) + return -EMFILE; + type = nft_obj_type_get(net, objtype); - if (IS_ERR(type)) - return PTR_ERR(type); + if (IS_ERR(type)) { + err = PTR_ERR(type); + goto err_type; + } obj = nft_obj_init(&ctx, type, nla[NFTA_OBJ_DATA]); if (IS_ERR(obj)) { @@ -7105,7 +7132,7 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info, goto err_obj_ht; list_add_tail_rcu(&obj->list, &table->objects); - table->use++; + return 0; err_obj_ht: /* queued in transaction log */ @@ -7121,6 +7148,9 @@ static int nf_tables_newobj(struct sk_buff *skb, const struct nfnl_info *info, kfree(obj); err_init: module_put(type->owner); +err_type: + nft_use_dec_restore(&table->use); + return err; } @@ -7511,7 +7541,7 @@ void nf_tables_deactivate_flowtable(const struct nft_ctx *ctx, case NFT_TRANS_PREPARE: case NFT_TRANS_ABORT: case NFT_TRANS_RELEASE: - flowtable->use--; + nft_use_dec(&flowtable->use); fallthrough; default: return; @@ -7859,9 +7889,14 @@ static int nf_tables_newflowtable(struct sk_buff *skb, nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); + if (!nft_use_inc(&table->use)) + return -EMFILE; + flowtable = kzalloc(sizeof(*flowtable), GFP_KERNEL); - if (!flowtable) - return -ENOMEM; + if (!flowtable) { + err = -ENOMEM; + goto flowtable_alloc; + } flowtable->table = table; flowtable->handle = nf_tables_alloc_handle(table); @@ -7916,7 +7951,6 @@ static int nf_tables_newflowtable(struct sk_buff *skb, goto err5; list_add_tail_rcu(&flowtable->list, &table->flowtables); - table->use++; return 0; err5: @@ -7933,6 +7967,9 @@ static int nf_tables_newflowtable(struct sk_buff *skb, kfree(flowtable->name); err1: kfree(flowtable); +flowtable_alloc: + nft_use_dec_restore(&table->use); + return err; } @@ -9169,7 +9206,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) */ if (nft_set_is_anonymous(nft_trans_set(trans)) && !list_empty(&nft_trans_set(trans)->bindings)) - trans->ctx.table->use--; + nft_use_dec(&trans->ctx.table->use); } nf_tables_set_notify(&trans->ctx, nft_trans_set(trans), NFT_MSG_NEWSET, GFP_KERNEL); @@ -9388,7 +9425,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; } - trans->ctx.table->use--; + nft_use_dec_restore(&trans->ctx.table->use); nft_chain_del(trans->ctx.chain); nf_tables_unregister_hook(trans->ctx.net, trans->ctx.table, @@ -9396,7 +9433,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) } break; case NFT_MSG_DELCHAIN: - trans->ctx.table->use++; + nft_use_inc_restore(&trans->ctx.table->use); nft_clear(trans->ctx.net, trans->ctx.chain); nft_trans_destroy(trans); break; @@ -9405,7 +9442,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; } - trans->ctx.chain->use--; + nft_use_dec_restore(&trans->ctx.chain->use); list_del_rcu(&nft_trans_rule(trans)->list); nft_rule_expr_deactivate(&trans->ctx, nft_trans_rule(trans), @@ -9414,7 +9451,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_flow_rule_destroy(nft_trans_flow_rule(trans)); break; case NFT_MSG_DELRULE: - trans->ctx.chain->use++; + nft_use_inc_restore(&trans->ctx.chain->use); nft_clear(trans->ctx.net, nft_trans_rule(trans)); nft_rule_expr_activate(&trans->ctx, nft_trans_rule(trans)); if (trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD) @@ -9427,7 +9464,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_trans_destroy(trans); break; } - trans->ctx.table->use--; + nft_use_dec_restore(&trans->ctx.table->use); if (nft_trans_set_bound(trans)) { nft_trans_destroy(trans); break; @@ -9435,7 +9472,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) list_del_rcu(&nft_trans_set(trans)->list); break; case NFT_MSG_DELSET: - trans->ctx.table->use++; + nft_use_inc_restore(&trans->ctx.table->use); nft_clear(trans->ctx.net, nft_trans_set(trans)); if (nft_trans_set(trans)->flags & (NFT_SET_MAP | NFT_SET_OBJECT)) nft_map_activate(&trans->ctx, nft_trans_set(trans)); @@ -9478,12 +9515,12 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_obj_destroy(&trans->ctx, nft_trans_obj_newobj(trans)); nft_trans_destroy(trans); } else { - trans->ctx.table->use--; + nft_use_dec_restore(&trans->ctx.table->use); nft_obj_del(nft_trans_obj(trans)); } break; case NFT_MSG_DELOBJ: - trans->ctx.table->use++; + nft_use_inc_restore(&trans->ctx.table->use); nft_clear(trans->ctx.net, nft_trans_obj(trans)); nft_trans_destroy(trans); break; @@ -9492,7 +9529,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) nft_unregister_flowtable_net_hooks(net, &nft_trans_flowtable_hooks(trans)); } else { - trans->ctx.table->use--; + nft_use_dec_restore(&trans->ctx.table->use); list_del_rcu(&nft_trans_flowtable(trans)->list); nft_unregister_flowtable_net_hooks(net, &nft_trans_flowtable(trans)->hook_list); @@ -9503,7 +9540,7 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) list_splice(&nft_trans_flowtable_hooks(trans), &nft_trans_flowtable(trans)->hook_list); } else { - trans->ctx.table->use++; + nft_use_inc_restore(&trans->ctx.table->use); nft_clear(trans->ctx.net, nft_trans_flowtable(trans)); } nft_trans_destroy(trans); @@ -9956,8 +9993,9 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data, if (desc->flags & NFT_DATA_DESC_SETELEM && chain->flags & NFT_CHAIN_BINDING) return -EINVAL; + if (!nft_use_inc(&chain->use)) + return -EMFILE; - chain->use++; data->verdict.chain = chain; break; } @@ -9975,7 +10013,7 @@ static void nft_verdict_uninit(const struct nft_data *data) case NFT_JUMP: case NFT_GOTO: chain = data->verdict.chain; - chain->use--; + nft_use_dec(&chain->use); break; } } @@ -10144,11 +10182,11 @@ int __nft_release_basechain(struct nft_ctx *ctx) nf_tables_unregister_hook(ctx->net, ctx->chain->table, ctx->chain); list_for_each_entry_safe(rule, nr, &ctx->chain->rules, list) { list_del(&rule->list); - ctx->chain->use--; + nft_use_dec(&ctx->chain->use); nf_tables_rule_release(ctx, rule); } nft_chain_del(ctx->chain); - ctx->table->use--; + nft_use_dec(&ctx->table->use); nf_tables_chain_destroy(ctx); return 0; @@ -10201,18 +10239,18 @@ static void __nft_release_table(struct net *net, struct nft_table *table) ctx.chain = chain; list_for_each_entry_safe(rule, nr, &chain->rules, list) { list_del(&rule->list); - chain->use--; + nft_use_dec(&chain->use); nf_tables_rule_release(&ctx, rule); } } list_for_each_entry_safe(flowtable, nf, &table->flowtables, list) { list_del(&flowtable->list); - table->use--; + nft_use_dec(&table->use); nf_tables_flowtable_destroy(flowtable); } list_for_each_entry_safe(set, ns, &table->sets, list) { list_del(&set->list); - table->use--; + nft_use_dec(&table->use); if (set->flags & (NFT_SET_MAP | NFT_SET_OBJECT)) nft_map_deactivate(&ctx, set); @@ -10220,13 +10258,13 @@ static void __nft_release_table(struct net *net, struct nft_table *table) } list_for_each_entry_safe(obj, ne, &table->objects, list) { nft_obj_del(obj); - table->use--; + nft_use_dec(&table->use); nft_obj_destroy(&ctx, obj); } list_for_each_entry_safe(chain, nc, &table->chains, list) { ctx.chain = chain; nft_chain_del(chain); - table->use--; + nft_use_dec(&table->use); nf_tables_chain_destroy(&ctx); } nf_tables_table_destroy(&ctx); diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c index aac6db8680d4..a5fc7213be3e 100644 --- a/net/netfilter/nft_flow_offload.c +++ b/net/netfilter/nft_flow_offload.c @@ -381,8 +381,10 @@ static int nft_flow_offload_init(const struct nft_ctx *ctx, if (IS_ERR(flowtable)) return PTR_ERR(flowtable); + if (!nft_use_inc(&flowtable->use)) + return -EMFILE; + priv->flowtable = flowtable; - flowtable->use++; return nf_ct_netns_get(ctx->net, ctx->family); } @@ -401,7 +403,7 @@ static void nft_flow_offload_activate(const struct nft_ctx *ctx, { struct nft_flow_offload *priv = nft_expr_priv(expr); - priv->flowtable->use++; + nft_use_inc_restore(&priv->flowtable->use); } static void nft_flow_offload_destroy(const struct nft_ctx *ctx, diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c index 6bf1c852e8ea..7d5b63c5a30a 100644 --- a/net/netfilter/nft_immediate.c +++ b/net/netfilter/nft_immediate.c @@ -168,7 +168,7 @@ static void nft_immediate_deactivate(const struct nft_ctx *ctx, nft_immediate_chain_deactivate(ctx, chain, phase); nft_chain_del(chain); chain->bound = false; - chain->table->use--; + nft_use_dec(&chain->table->use); break; } break; @@ -207,7 +207,7 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx, * let the transaction records release this chain and its rules. */ if (chain->bound) { - chain->use--; + nft_use_dec(&chain->use); break; } @@ -215,9 +215,9 @@ static void nft_immediate_destroy(const struct nft_ctx *ctx, chain_ctx = *ctx; chain_ctx.chain = chain; - chain->use--; + nft_use_dec(&chain->use); list_for_each_entry_safe(rule, n, &chain->rules, list) { - chain->use--; + nft_use_dec(&chain->use); list_del(&rule->list); nf_tables_rule_destroy(&chain_ctx, rule); } diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c index 3ff91bcaa5f2..156787b76667 100644 --- a/net/netfilter/nft_objref.c +++ b/net/netfilter/nft_objref.c @@ -41,8 +41,10 @@ static int nft_objref_init(const struct nft_ctx *ctx, if (IS_ERR(obj)) return -ENOENT; + if (!nft_use_inc(&obj->use)) + return -EMFILE; + nft_objref_priv(expr) = obj; - obj->use++; return 0; } @@ -71,7 +73,7 @@ static void nft_objref_deactivate(const struct nft_ctx *ctx, if (phase == NFT_TRANS_COMMIT) return; - obj->use--; + nft_use_dec(&obj->use); } static void nft_objref_activate(const struct nft_ctx *ctx, @@ -79,7 +81,7 @@ static void nft_objref_activate(const struct nft_ctx *ctx, { struct nft_object *obj = nft_objref_priv(expr); - obj->use++; + nft_use_inc_restore(&obj->use); } static struct nft_expr_type nft_objref_type; From 0f52d7b782514cc073a8f301b6319b2254a1611f Mon Sep 17 00:00:00 2001 From: Tony Battersby Date: Mon, 24 Jul 2023 14:25:40 -0400 Subject: [PATCH 360/513] scsi: core: Fix legacy /proc parsing buffer overflow commit 9426d3cef5000824e5f24f80ed5f42fb935f2488 upstream. (lightly modified commit message mostly by Linus Torvalds) The parsing code for /proc/scsi/scsi is disgusting and broken. We should have just used 'sscanf()' or something simple like that, but the logic may actually predate our kernel sscanf library routine for all I know. It certainly predates both git and BK histories. And we can't change it to be something sane like that now, because the string matching at the start is done case-insensitively, and the separator parsing between numbers isn't done at all, so *any* separator will work, including a possible terminating NUL character. This interface is root-only, and entirely for legacy use, so there is absolutely no point in trying to tighten up the parsing. Because any separator has traditionally worked, it's entirely possible that people have used random characters rather than the suggested space. So don't bother to try to pretty it up, and let's just make a minimal patch that can be back-ported and we can forget about this whole sorry thing for another two decades. Just make it at least not read past the end of the supplied data. Link: https://lore.kernel.org/linux-scsi/b570f5fe-cb7c-863a-6ed9-f6774c219b88@cybernetics.com/ Cc: Linus Torvalds Cc: Martin K Petersen Cc: James Bottomley Cc: Willy Tarreau Cc: stable@kernel.org Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Signed-off-by: Tony Battersby Signed-off-by: Martin K Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/scsi_proc.c | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/drivers/scsi/scsi_proc.c b/drivers/scsi/scsi_proc.c index d6982d355739..94603e64cc6b 100644 --- a/drivers/scsi/scsi_proc.c +++ b/drivers/scsi/scsi_proc.c @@ -311,7 +311,7 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, size_t length, loff_t *ppos) { int host, channel, id, lun; - char *buffer, *p; + char *buffer, *end, *p; int err; if (!buf || length > PAGE_SIZE) @@ -326,10 +326,14 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, goto out; err = -EINVAL; - if (length < PAGE_SIZE) - buffer[length] = '\0'; - else if (buffer[PAGE_SIZE-1]) - goto out; + if (length < PAGE_SIZE) { + end = buffer + length; + *end = '\0'; + } else { + end = buffer + PAGE_SIZE - 1; + if (*end) + goto out; + } /* * Usage: echo "scsi add-single-device 0 1 2 3" >/proc/scsi/scsi @@ -338,10 +342,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, if (!strncmp("scsi add-single-device", buffer, 22)) { p = buffer + 23; - host = simple_strtoul(p, &p, 0); - channel = simple_strtoul(p + 1, &p, 0); - id = simple_strtoul(p + 1, &p, 0); - lun = simple_strtoul(p + 1, &p, 0); + host = (p < end) ? simple_strtoul(p, &p, 0) : 0; + channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_add_single_device(host, channel, id, lun); @@ -352,10 +356,10 @@ static ssize_t proc_scsi_write(struct file *file, const char __user *buf, } else if (!strncmp("scsi remove-single-device", buffer, 25)) { p = buffer + 26; - host = simple_strtoul(p, &p, 0); - channel = simple_strtoul(p + 1, &p, 0); - id = simple_strtoul(p + 1, &p, 0); - lun = simple_strtoul(p + 1, &p, 0); + host = (p < end) ? simple_strtoul(p, &p, 0) : 0; + channel = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + id = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; + lun = (p + 1 < end) ? simple_strtoul(p + 1, &p, 0) : 0; err = scsi_remove_single_device(host, channel, id, lun); } From 7a792b3d888aab2c65389f9f4f9f2f6c000b1a0d Mon Sep 17 00:00:00 2001 From: Michael Kelley Date: Fri, 28 Jul 2023 21:59:24 -0700 Subject: [PATCH 361/513] scsi: storvsc: Fix handling of virtual Fibre Channel timeouts commit 175544ad48cbf56affeef2a679c6a4d4fb1e2881 upstream. Hyper-V provides the ability to connect Fibre Channel LUNs to the host system and present them in a guest VM as a SCSI device. I/O to the vFC device is handled by the storvsc driver. The storvsc driver includes a partial integration with the FC transport implemented in the generic portion of the Linux SCSI subsystem so that FC attributes can be displayed in /sys. However, the partial integration means that some aspects of vFC don't work properly. Unfortunately, a full and correct integration isn't practical because of limitations in what Hyper-V provides to the guest. In particular, in the context of Hyper-V storvsc, the FC transport timeout function fc_eh_timed_out() causes a kernel panic because it can't find the rport and dereferences a NULL pointer. The original patch that added the call from storvsc_eh_timed_out() to fc_eh_timed_out() is faulty in this regard. In many cases a timeout is due to a transient condition, so the situation can be improved by just continuing to wait like with other I/O requests issued by storvsc, and avoiding the guaranteed panic. For a permanent failure, continuing to wait may result in a hung thread instead of a panic, which again may be better. So fix the panic by removing the storvsc call to fc_eh_timed_out(). This allows storvsc to keep waiting for a response. The change has been tested by users who experienced a panic in fc_eh_timed_out() due to transient timeouts, and it solves their problem. In the future we may want to deprecate the vFC functionality in storvsc since it can't be fully fixed. But it has current users for whom it is working well enough, so it should probably stay for a while longer. Fixes: 3930d7309807 ("scsi: storvsc: use default I/O timeout handler for FC devices") Cc: stable@vger.kernel.org Signed-off-by: Michael Kelley Link: https://lore.kernel.org/r/1690606764-79669-1-git-send-email-mikelley@microsoft.com Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/storvsc_drv.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c index e78cfda035a1..9f8ebbec7bc3 100644 --- a/drivers/scsi/storvsc_drv.c +++ b/drivers/scsi/storvsc_drv.c @@ -1730,10 +1730,6 @@ static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd) */ static enum blk_eh_timer_return storvsc_eh_timed_out(struct scsi_cmnd *scmnd) { -#if IS_ENABLED(CONFIG_SCSI_FC_ATTRS) - if (scmnd->device->host->transportt == fc_transport_template) - return fc_eh_timed_out(scmnd); -#endif return BLK_EH_RESET_TIMER; } From 171e117cdc0a13702b7d1fedd875dc0c4a43b7d1 Mon Sep 17 00:00:00 2001 From: Alexandra Diupina Date: Fri, 28 Jul 2023 15:35:21 +0300 Subject: [PATCH 362/513] scsi: 53c700: Check that command slot is not NULL commit 8366d1f1249a0d0bba41d0bd1298d63e5d34c7f7 upstream. Add a check for the command slot value to avoid dereferencing a NULL pointer. Found by Linux Verification Center (linuxtesting.org) with SVACE. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Co-developed-by: Vladimir Telezhnikov Signed-off-by: Vladimir Telezhnikov Signed-off-by: Alexandra Diupina Link: https://lore.kernel.org/r/20230728123521.18293-1-adiupina@astralinux.ru Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/53c700.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c index a12e3525977d..2f810dac7b8b 100644 --- a/drivers/scsi/53c700.c +++ b/drivers/scsi/53c700.c @@ -1599,7 +1599,7 @@ NCR_700_intr(int irq, void *dev_id) printk("scsi%d (%d:%d) PHASE MISMATCH IN SEND MESSAGE %d remain, return %p[%04x], phase %s\n", host->host_no, pun, lun, count, (void *)temp, temp - hostdata->pScript, sbcl_to_string(NCR_700_readb(host, SBCL_REG))); #endif resume_offset = hostdata->pScript + Ent_SendMessagePhaseMismatch; - } else if(dsp >= to32bit(&slot->pSG[0].ins) && + } else if (slot && dsp >= to32bit(&slot->pSG[0].ins) && dsp <= to32bit(&slot->pSG[NCR_700_SG_SEGMENTS].ins)) { int data_transfer = NCR_700_readl(host, DBC_REG) & 0xffffff; int SGcount = (dsp - to32bit(&slot->pSG[0].ins))/sizeof(struct NCR_700_SG_List); From 461f8ac666fa232afee5ed6420099913ec4e4ba2 Mon Sep 17 00:00:00 2001 From: Zhu Wang Date: Tue, 1 Aug 2023 19:14:21 +0800 Subject: [PATCH 363/513] scsi: snic: Fix possible memory leak if device_add() fails commit 41320b18a0e0dfb236dba4edb9be12dba1878156 upstream. If device_add() returns error, the name allocated by dev_set_name() needs be freed. As the comment of device_add() says, put_device() should be used to give up the reference in the error path. So fix this by calling put_device(), then the name can be freed in kobject_cleanp(). Fixes: c8806b6c9e82 ("snic: driver for Cisco SCSI HBA") Signed-off-by: Zhu Wang Acked-by: Narsimhulu Musini Link: https://lore.kernel.org/r/20230801111421.63651-1-wangzhu9@huawei.com Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/snic/snic_disc.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/snic/snic_disc.c b/drivers/scsi/snic/snic_disc.c index 7cf871323b2c..c445853c623e 100644 --- a/drivers/scsi/snic/snic_disc.c +++ b/drivers/scsi/snic/snic_disc.c @@ -317,6 +317,7 @@ snic_tgt_create(struct snic *snic, struct snic_tgt_id *tgtid) "Snic Tgt: device_add, with err = %d\n", ret); + put_device(&tgt->dev); put_device(&snic->shost->shost_gendev); spin_lock_irqsave(snic->shost->host_lock, flags); list_del(&tgt->list); From 6bc7f4c8c27d526f968788b8a985896755b1df35 Mon Sep 17 00:00:00 2001 From: Zhu Wang Date: Thu, 3 Aug 2023 10:02:30 +0800 Subject: [PATCH 364/513] scsi: core: Fix possible memory leak if device_add() fails commit 04b5b5cb0136ce970333a9c6cec7e46adba1ea3a upstream. If device_add() returns error, the name allocated by dev_set_name() needs be freed. As the comment of device_add() says, put_device() should be used to decrease the reference count in the error path. So fix this by calling put_device(), then the name can be freed in kobject_cleanp(). Fixes: ee959b00c335 ("SCSI: convert struct class_device to struct device") Signed-off-by: Zhu Wang Link: https://lore.kernel.org/r/20230803020230.226903-1-wangzhu9@huawei.com Reviewed-by: Bart Van Assche Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/raid_class.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c index 898a0bdf8df6..711252e52d8e 100644 --- a/drivers/scsi/raid_class.c +++ b/drivers/scsi/raid_class.c @@ -248,6 +248,7 @@ int raid_component_add(struct raid_template *r,struct device *raid_dev, return 0; err_out: + put_device(&rc->dev); list_del(&rc->node); rd->component_count--; put_device(component_dev); From e70469c289539cdc2bed00e3e4b63ec5be4cb2cb Mon Sep 17 00:00:00 2001 From: Karan Tilak Kumar Date: Thu, 27 Jul 2023 12:39:19 -0700 Subject: [PATCH 365/513] scsi: fnic: Replace return codes in fnic_clean_pending_aborts() commit 5a43b07a87835660f91d88a4db11abfea8c523b7 upstream. fnic_clean_pending_aborts() was returning a non-zero value irrespective of failure or success. This caused the caller of this function to assume that the device reset had failed, even though it would succeed in most cases. As a consequence, a successful device reset would escalate to host reset. Reviewed-by: Sesidhar Baddela Tested-by: Karan Tilak Kumar Signed-off-by: Karan Tilak Kumar Link: https://lore.kernel.org/r/20230727193919.2519-1-kartilak@cisco.com Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/fnic/fnic_scsi.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c index d084a7db3925..e6c36b5b0739 100644 --- a/drivers/scsi/fnic/fnic_scsi.c +++ b/drivers/scsi/fnic/fnic_scsi.c @@ -2172,7 +2172,7 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, bool new_sc) { - int ret = SUCCESS; + int ret = 0; struct fnic_pending_aborts_iter_data iter_data = { .fnic = fnic, .lun_dev = lr_sc->device, @@ -2192,9 +2192,11 @@ static int fnic_clean_pending_aborts(struct fnic *fnic, /* walk again to check, if IOs are still pending in fw */ if (fnic_is_abts_pending(fnic, lr_sc)) - ret = FAILED; + ret = 1; clean_pending_aborts_end: + FNIC_SCSI_DBG(KERN_INFO, fnic->lport->host, + "%s: exit status: %d\n", __func__, ret); return ret; } From 85db1cd1744e1f1fa8d80040aa4727dd88a0e0f4 Mon Sep 17 00:00:00 2001 From: Nilesh Javali Date: Mon, 7 Aug 2023 15:07:25 +0530 Subject: [PATCH 366/513] scsi: qedi: Fix firmware halt over suspend and resume commit 1516ee035df32115197cd93ae3619dba7b020986 upstream. While performing certain power-off sequences, PCI drivers are called to suspend and resume their underlying devices through PCI PM (power management) interface. However the hardware does not support PCI PM suspend/resume operations so system wide suspend/resume leads to bad MFW (management firmware) state which causes various follow-up errors in driver when communicating with the device/firmware. To fix this driver implements PCI PM suspend handler to indicate unsupported operation to the PCI subsystem explicitly, thus avoiding system to go into suspended/standby mode. Fixes: ace7f46ba5fd ("scsi: qedi: Add QLogic FastLinQ offload iSCSI driver framework.") Signed-off-by: Nilesh Javali Link: https://lore.kernel.org/r/20230807093725.46829-2-njavali@marvell.com Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/qedi/qedi_main.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c index e0096fc5927e..d2fe8ae97abc 100644 --- a/drivers/scsi/qedi/qedi_main.c +++ b/drivers/scsi/qedi/qedi_main.c @@ -69,6 +69,7 @@ static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi); static void qedi_recovery_handler(struct work_struct *work); static void qedi_schedule_hw_err_handler(void *dev, enum qed_hw_err_type err_type); +static int qedi_suspend(struct pci_dev *pdev, pm_message_t state); static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle) { @@ -2515,6 +2516,22 @@ static void qedi_shutdown(struct pci_dev *pdev) __qedi_remove(pdev, QEDI_MODE_SHUTDOWN); } +static int qedi_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct qedi_ctx *qedi; + + if (!pdev) { + QEDI_ERR(NULL, "pdev is NULL.\n"); + return -ENODEV; + } + + qedi = pci_get_drvdata(pdev); + + QEDI_ERR(&qedi->dbg_ctx, "%s: Device does not support suspend operation\n", __func__); + + return -EPERM; +} + static int __qedi_probe(struct pci_dev *pdev, int mode) { struct qedi_ctx *qedi; @@ -2873,6 +2890,7 @@ static struct pci_driver qedi_pci_driver = { .remove = qedi_remove, .shutdown = qedi_shutdown, .err_handler = &qedi_err_handler, + .suspend = qedi_suspend, }; static int __init qedi_init(void) From f8d6d25756ea51f381ce86166a01ae5018858d88 Mon Sep 17 00:00:00 2001 From: Nilesh Javali Date: Mon, 7 Aug 2023 15:07:24 +0530 Subject: [PATCH 367/513] scsi: qedf: Fix firmware halt over suspend and resume commit ef222f551e7c4e2008fc442ffc9edcd1a7fd8f63 upstream. While performing certain power-off sequences, PCI drivers are called to suspend and resume their underlying devices through PCI PM (power management) interface. However the hardware does not support PCI PM suspend/resume operations so system wide suspend/resume leads to bad MFW (management firmware) state which causes various follow-up errors in driver when communicating with the device/firmware. To fix this driver implements PCI PM suspend handler to indicate unsupported operation to the PCI subsystem explicitly, thus avoiding system to go into suspended/standby mode. Fixes: 61d8658b4a43 ("scsi: qedf: Add QLogic FastLinQ offload FCoE driver framework.") Signed-off-by: Saurav Kashyap Signed-off-by: Nilesh Javali Link: https://lore.kernel.org/r/20230807093725.46829-1-njavali@marvell.com Signed-off-by: Martin K. Petersen Signed-off-by: Greg Kroah-Hartman --- drivers/scsi/qedf/qedf_main.c | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c index cf10c1a60399..61959dd2237f 100644 --- a/drivers/scsi/qedf/qedf_main.c +++ b/drivers/scsi/qedf/qedf_main.c @@ -31,6 +31,7 @@ static void qedf_remove(struct pci_dev *pdev); static void qedf_shutdown(struct pci_dev *pdev); static void qedf_schedule_recovery_handler(void *dev); static void qedf_recovery_handler(struct work_struct *work); +static int qedf_suspend(struct pci_dev *pdev, pm_message_t state); /* * Driver module parameters. @@ -3276,6 +3277,7 @@ static struct pci_driver qedf_pci_driver = { .probe = qedf_probe, .remove = qedf_remove, .shutdown = qedf_shutdown, + .suspend = qedf_suspend, }; static int __qedf_probe(struct pci_dev *pdev, int mode) @@ -4005,6 +4007,22 @@ static void qedf_shutdown(struct pci_dev *pdev) __qedf_remove(pdev, QEDF_MODE_NORMAL); } +static int qedf_suspend(struct pci_dev *pdev, pm_message_t state) +{ + struct qedf_ctx *qedf; + + if (!pdev) { + QEDF_ERR(NULL, "pdev is NULL.\n"); + return -ENODEV; + } + + qedf = pci_get_drvdata(pdev); + + QEDF_ERR(&qedf->dbg_ctx, "%s: Device does not support suspend operation\n", __func__); + + return -EPERM; +} + /* * Recovery handler code */ From 5d094d4e7b99c75da9ece3a9a955eb3728f3988c Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Sat, 29 Jul 2023 16:42:23 +0900 Subject: [PATCH 368/513] alpha: remove __init annotation from exported page_is_ram() commit 6ccbd7fd474674654019a20177c943359469103a upstream. EXPORT_SYMBOL and __init is a bad combination because the .init.text section is freed up after the initialization. Commit c5a130325f13 ("ACPI/APEI: Add parameter check before error injection") exported page_is_ram(), hence the __init annotation should be removed. This fixes the modpost warning in ARCH=alpha builds: WARNING: modpost: vmlinux: page_is_ram: EXPORT_SYMBOL used for init symbol. Remove __init or EXPORT_SYMBOL. Fixes: c5a130325f13 ("ACPI/APEI: Add parameter check before error injection") Signed-off-by: Masahiro Yamada Reviewed-by: Randy Dunlap Signed-off-by: Greg Kroah-Hartman --- arch/alpha/kernel/setup.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c index b4fbbba30aa2..8c4c14a171e2 100644 --- a/arch/alpha/kernel/setup.c +++ b/arch/alpha/kernel/setup.c @@ -385,8 +385,7 @@ setup_memory(void *kernel_end) #endif /* CONFIG_BLK_DEV_INITRD */ } -int __init -page_is_ram(unsigned long pfn) +int page_is_ram(unsigned long pfn) { struct memclust_struct * cluster; struct memdesc_struct * memdesc; From af99918f0e39aeb14d2cd08ca79faf9ccb1ec47f Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Thu, 22 Jun 2023 18:15:03 +0000 Subject: [PATCH 369/513] sch_netem: fix issues in netem_change() vs get_dist_table() commit 11b73313c12403f617b47752db0ab3deef201af7 upstream. In blamed commit, I missed that get_dist_table() was allocating memory using GFP_KERNEL, and acquiring qdisc lock to perform the swap of newly allocated table with current one. In this patch, get_dist_table() is allocating memory and copy user data before we acquire the qdisc lock. Then we perform swap operations while being protected by the lock. Note that after this patch netem_change() no longer can do partial changes. If an error is returned, qdisc conf is left unchanged. Fixes: 2174a08db80d ("sch_netem: acquire qdisc lock in netem_change()") Reported-by: syzbot Signed-off-by: Eric Dumazet Cc: Stephen Hemminger Acked-by: Jamal Hadi Salim Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230622181503.2327695-1-edumazet@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Fedor Pchelkin Signed-off-by: Greg Kroah-Hartman --- net/sched/sch_netem.c | 59 ++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 34 deletions(-) diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c index be42b1196786..08aaa6efc62c 100644 --- a/net/sched/sch_netem.c +++ b/net/sched/sch_netem.c @@ -773,12 +773,10 @@ static void dist_free(struct disttable *d) * signed 16 bit values. */ -static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, - const struct nlattr *attr) +static int get_dist_table(struct disttable **tbl, const struct nlattr *attr) { size_t n = nla_len(attr)/sizeof(__s16); const __s16 *data = nla_data(attr); - spinlock_t *root_lock; struct disttable *d; int i; @@ -793,13 +791,7 @@ static int get_dist_table(struct Qdisc *sch, struct disttable **tbl, for (i = 0; i < n; i++) d->table[i] = data[i]; - root_lock = qdisc_root_sleeping_lock(sch); - - spin_lock_bh(root_lock); - swap(*tbl, d); - spin_unlock_bh(root_lock); - - dist_free(d); + *tbl = d; return 0; } @@ -956,6 +948,8 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, { struct netem_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_NETEM_MAX + 1]; + struct disttable *delay_dist = NULL; + struct disttable *slot_dist = NULL; struct tc_netem_qopt *qopt; struct clgstate old_clg; int old_loss_model = CLG_RANDOM; @@ -969,6 +963,18 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, if (ret < 0) return ret; + if (tb[TCA_NETEM_DELAY_DIST]) { + ret = get_dist_table(&delay_dist, tb[TCA_NETEM_DELAY_DIST]); + if (ret) + goto table_free; + } + + if (tb[TCA_NETEM_SLOT_DIST]) { + ret = get_dist_table(&slot_dist, tb[TCA_NETEM_SLOT_DIST]); + if (ret) + goto table_free; + } + sch_tree_lock(sch); /* backup q->clg and q->loss_model */ old_clg = q->clg; @@ -978,26 +984,17 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, ret = get_loss_clg(q, tb[TCA_NETEM_LOSS]); if (ret) { q->loss_model = old_loss_model; + q->clg = old_clg; goto unlock; } } else { q->loss_model = CLG_RANDOM; } - if (tb[TCA_NETEM_DELAY_DIST]) { - ret = get_dist_table(sch, &q->delay_dist, - tb[TCA_NETEM_DELAY_DIST]); - if (ret) - goto get_table_failure; - } - - if (tb[TCA_NETEM_SLOT_DIST]) { - ret = get_dist_table(sch, &q->slot_dist, - tb[TCA_NETEM_SLOT_DIST]); - if (ret) - goto get_table_failure; - } - + if (delay_dist) + swap(q->delay_dist, delay_dist); + if (slot_dist) + swap(q->slot_dist, slot_dist); sch->limit = qopt->limit; q->latency = PSCHED_TICKS2NS(qopt->latency); @@ -1047,17 +1044,11 @@ static int netem_change(struct Qdisc *sch, struct nlattr *opt, unlock: sch_tree_unlock(sch); - return ret; -get_table_failure: - /* recover clg and loss_model, in case of - * q->clg and q->loss_model were modified - * in get_loss_clg() - */ - q->clg = old_clg; - q->loss_model = old_loss_model; - - goto unlock; +table_free: + dist_free(delay_dist); + dist_free(slot_dist); + return ret; } static int netem_init(struct Qdisc *sch, struct nlattr *opt, From c3b954a51b6447d060c1b30ec4efb5db34a056f7 Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 13 Aug 2023 03:16:18 +0000 Subject: [PATCH 370/513] tick: Detect and fix jiffies update stall [ Upstream commit a1ff03cd6fb9c501fff63a4a2bface9adcfa81cd ] tick: Detect and fix jiffies update stall On some rare cases, the timekeeper CPU may be delaying its jiffies update duty for a while. Known causes include: * The timekeeper is waiting on stop_machine in a MULTI_STOP_DISABLE_IRQ or MULTI_STOP_RUN state. Disabled interrupts prevent from timekeeping updates while waiting for the target CPU to complete its stop_machine() callback. * The timekeeper vcpu has VMEXIT'ed for a long while due to some overload on the host. Detect and fix these situations with emergency timekeeping catchups. Original-patch-by: Paul E. McKenney Signed-off-by: Frederic Weisbecker Cc: Thomas Gleixner Signed-off-by: Joel Fernandes (Google) Signed-off-by: Greg Kroah-Hartman --- kernel/time/tick-sched.c | 17 +++++++++++++++++ kernel/time/tick-sched.h | 4 ++++ 2 files changed, 21 insertions(+) diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index f42d0776bc84..7701c720dc1f 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -180,6 +180,8 @@ static ktime_t tick_init_jiffy_update(void) return period; } +#define MAX_STALLED_JIFFIES 5 + static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) { int cpu = smp_processor_id(); @@ -207,6 +209,21 @@ static void tick_sched_do_timer(struct tick_sched *ts, ktime_t now) if (tick_do_timer_cpu == cpu) tick_do_update_jiffies64(now); + /* + * If jiffies update stalled for too long (timekeeper in stop_machine() + * or VMEXIT'ed for several msecs), force an update. + */ + if (ts->last_tick_jiffies != jiffies) { + ts->stalled_jiffies = 0; + ts->last_tick_jiffies = READ_ONCE(jiffies); + } else { + if (++ts->stalled_jiffies == MAX_STALLED_JIFFIES) { + tick_do_update_jiffies64(now); + ts->stalled_jiffies = 0; + ts->last_tick_jiffies = READ_ONCE(jiffies); + } + } + if (ts->inidle) ts->got_idle_tick = 1; } diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h index d952ae393423..504649513399 100644 --- a/kernel/time/tick-sched.h +++ b/kernel/time/tick-sched.h @@ -49,6 +49,8 @@ enum tick_nohz_mode { * @timer_expires_base: Base time clock monotonic for @timer_expires * @next_timer: Expiry time of next expiring timer for debugging purpose only * @tick_dep_mask: Tick dependency mask - is set, if someone needs the tick + * @last_tick_jiffies: Value of jiffies seen on last tick + * @stalled_jiffies: Number of stalled jiffies detected across ticks */ struct tick_sched { struct hrtimer sched_timer; @@ -77,6 +79,8 @@ struct tick_sched { u64 next_timer; ktime_t idle_expires; atomic_t tick_dep_mask; + unsigned long last_tick_jiffies; + unsigned int stalled_jiffies; }; extern struct tick_sched *tick_get_tick_sched(int cpu); From b4d36e6c5dc417f3f394b5e576dec2760b3999ae Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sun, 13 Aug 2023 03:16:19 +0000 Subject: [PATCH 371/513] timers/nohz: Switch to ONESHOT_STOPPED in the low-res handler when the tick is stopped [ Upstream commit 62c1256d544747b38e77ca9b5bfe3a26f9592576 ] When tick_nohz_stop_tick() stops the tick and high resolution timers are disabled, then the clock event device is not put into ONESHOT_STOPPED mode. This can lead to spurious timer interrupts with some clock event device drivers that don't shut down entirely after firing. Eliminate these by putting the device into ONESHOT_STOPPED mode at points where it is not being reprogrammed. When there are no timers active, then tick_program_event() with KTIME_MAX can be used to stop the device. When there is a timer active, the device can be stopped at the next tick (any new timer added by timers will reprogram the tick). Signed-off-by: Nicholas Piggin Signed-off-by: Thomas Gleixner Link: https://lore.kernel.org/r/20220422141446.915024-1-npiggin@gmail.com Signed-off-by: Joel Fernandes (Google) Signed-off-by: Greg Kroah-Hartman --- kernel/time/tick-sched.c | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 7701c720dc1f..5786e2794ae1 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -950,6 +950,8 @@ static void tick_nohz_stop_tick(struct tick_sched *ts, int cpu) if (unlikely(expires == KTIME_MAX)) { if (ts->nohz_mode == NOHZ_MODE_HIGHRES) hrtimer_cancel(&ts->sched_timer); + else + tick_program_event(KTIME_MAX, 1); return; } @@ -1356,9 +1358,15 @@ static void tick_nohz_handler(struct clock_event_device *dev) tick_sched_do_timer(ts, now); tick_sched_handle(ts, regs); - /* No need to reprogram if we are running tickless */ - if (unlikely(ts->tick_stopped)) + if (unlikely(ts->tick_stopped)) { + /* + * The clockevent device is not reprogrammed, so change the + * clock event device to ONESHOT_STOPPED to avoid spurious + * interrupts on devices which might not be truly one shot. + */ + tick_program_event(KTIME_MAX, 1); return; + } hrtimer_forward(&ts->sched_timer, now, TICK_NSEC); tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1); From c597d8cb0d33462e449d7be345330e980abc035b Mon Sep 17 00:00:00 2001 From: Frederic Weisbecker Date: Sun, 13 Aug 2023 03:16:20 +0000 Subject: [PATCH 372/513] timers/nohz: Last resort update jiffies on nohz_full IRQ entry [ Upstream commit 53e87e3cdc155f20c3417b689df8d2ac88d79576 ] When at least one CPU runs in nohz_full mode, a dedicated timekeeper CPU is guaranteed to stay online and to never stop its tick. Meanwhile on some rare case, the dedicated timekeeper may be running with interrupts disabled for a while, such as in stop_machine. If jiffies stop being updated, a nohz_full CPU may end up endlessly programming the next tick in the past, taking the last jiffies update monotonic timestamp as a stale base, resulting in an tick storm. Here is a scenario where it matters: 0) CPU 0 is the timekeeper and CPU 1 a nohz_full CPU. 1) A stop machine callback is queued to execute somewhere. 2) CPU 0 reaches MULTI_STOP_DISABLE_IRQ while CPU 1 is still in MULTI_STOP_PREPARE. Hence CPU 0 can't do its timekeeping duty. CPU 1 can still take IRQs. 3) CPU 1 receives an IRQ which queues a timer callback one jiffy forward. 4) On IRQ exit, CPU 1 schedules the tick one jiffy forward, taking last_jiffies_update as a base. But last_jiffies_update hasn't been updated for 2 jiffies since the timekeeper has interrupts disabled. 5) clockevents_program_event(), which relies on ktime_get(), observes that the expiration is in the past and therefore programs the min delta event on the clock. 6) The tick fires immediately, goto 3) 7) Tick storm, the nohz_full CPU is drown and takes ages to reach MULTI_STOP_DISABLE_IRQ, which is the only way out of this situation. Solve this with unconditionally updating jiffies if the value is stale on nohz_full IRQ entry. IRQs and other disturbances are expected to be rare enough on nohz_full for the unconditional call to ktime_get() to actually matter. Reported-by: Paul E. McKenney Signed-off-by: Frederic Weisbecker Signed-off-by: Thomas Gleixner Tested-by: Paul E. McKenney Link: https://lore.kernel.org/r/20211026141055.57358-2-frederic@kernel.org Signed-off-by: Joel Fernandes (Google) Signed-off-by: Greg Kroah-Hartman --- kernel/softirq.c | 3 ++- kernel/time/tick-sched.c | 7 +++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/kernel/softirq.c b/kernel/softirq.c index 322b65d45676..41f470929e99 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -595,7 +595,8 @@ void irq_enter_rcu(void) { __irq_enter_raw(); - if (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET)) + if (tick_nohz_full_cpu(smp_processor_id()) || + (is_idle_task(current) && (irq_count() == HARDIRQ_OFFSET))) tick_irq_enter(); account_hardirq_enter(current); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 5786e2794ae1..7f5310d1a4d6 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -1420,6 +1420,13 @@ static inline void tick_nohz_irq_enter(void) now = ktime_get(); if (ts->idle_active) tick_nohz_stop_idle(ts, now); + /* + * If all CPUs are idle. We may need to update a stale jiffies value. + * Note nohz_full is a special case: a timekeeper is guaranteed to stay + * alive but it might be busy looping with interrupts disabled in some + * rare case (typically stop machine). So we must make sure we have a + * last resort. + */ if (ts->tick_stopped) tick_nohz_update_jiffies(now); } From f6f7927ac664ba23447f8dd3c3dfe2f4ee39272f Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Wed, 16 Aug 2023 18:22:04 +0200 Subject: [PATCH 373/513] Linux 5.15.127 Link: https://lore.kernel.org/r/20230813211710.787645394@linuxfoundation.org Tested-by: Thierry Reding Tested-by: SeongJae Park Tested-by: Guenter Roeck Tested-by: Ron Economos Tested-by: Shuah Khan Tested-by: Harshit Mogalapalli Tested-by: Florian Fainelli Tested-by: Allen Pais Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 42993220a57a..f5e69631ca58 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 15 -SUBLEVEL = 126 +SUBLEVEL = 127 EXTRAVERSION = NAME = Trick or Treat From a798977df6d0636865b23e3b0f76255513c47546 Mon Sep 17 00:00:00 2001 From: Kunihiko Hayashi Date: Fri, 30 Jun 2023 09:45:33 +0900 Subject: [PATCH 374/513] mmc: sdhci-f-sdh30: Replace with sdhci_pltfm [ Upstream commit 5def5c1c15bf22934ee227af85c1716762f3829f ] Even if sdhci_pltfm_pmops is specified for PM, this driver doesn't apply sdhci_pltfm, so the structure is not correctly referenced in PM functions. This applies sdhci_pltfm to this driver to fix this issue. - Call sdhci_pltfm_init() instead of sdhci_alloc_host() and other functions that covered by sdhci_pltfm. - Move ops and quirks to sdhci_pltfm_data - Replace sdhci_priv() with own private function sdhci_f_sdh30_priv(). Fixes: 87a507459f49 ("mmc: sdhci: host: add new f_sdh30") Signed-off-by: Kunihiko Hayashi Acked-by: Adrian Hunter Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230630004533.26644-1-hayashi.kunihiko@socionext.com Signed-off-by: Ulf Hansson Signed-off-by: Sasha Levin --- drivers/mmc/host/sdhci_f_sdh30.c | 60 ++++++++++++++------------------ 1 file changed, 27 insertions(+), 33 deletions(-) diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index 6c4f43e11282..8876fd1c7eee 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -26,9 +26,16 @@ struct f_sdhost_priv { bool enable_cmd_dat_delay; }; +static void *sdhci_f_sdhost_priv(struct sdhci_host *host) +{ + struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); + + return sdhci_pltfm_priv(pltfm_host); +} + static void sdhci_f_sdh30_soft_voltage_switch(struct sdhci_host *host) { - struct f_sdhost_priv *priv = sdhci_priv(host); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); u32 ctrl = 0; usleep_range(2500, 3000); @@ -61,7 +68,7 @@ static unsigned int sdhci_f_sdh30_get_min_clock(struct sdhci_host *host) static void sdhci_f_sdh30_reset(struct sdhci_host *host, u8 mask) { - struct f_sdhost_priv *priv = sdhci_priv(host); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); u32 ctl; if (sdhci_readw(host, SDHCI_CLOCK_CONTROL) == 0) @@ -85,30 +92,32 @@ static const struct sdhci_ops sdhci_f_sdh30_ops = { .set_uhs_signaling = sdhci_set_uhs_signaling, }; +static const struct sdhci_pltfm_data sdhci_f_sdh30_pltfm_data = { + .ops = &sdhci_f_sdh30_ops, + .quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC + | SDHCI_QUIRK_INVERTED_WRITE_PROTECT, + .quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE + | SDHCI_QUIRK2_TUNING_WORK_AROUND, +}; + static int sdhci_f_sdh30_probe(struct platform_device *pdev) { struct sdhci_host *host; struct device *dev = &pdev->dev; - int irq, ctrl = 0, ret = 0; + int ctrl = 0, ret = 0; struct f_sdhost_priv *priv; + struct sdhci_pltfm_host *pltfm_host; u32 reg = 0; - irq = platform_get_irq(pdev, 0); - if (irq < 0) - return irq; - - host = sdhci_alloc_host(dev, sizeof(struct f_sdhost_priv)); + host = sdhci_pltfm_init(pdev, &sdhci_f_sdh30_pltfm_data, + sizeof(struct f_sdhost_priv)); if (IS_ERR(host)) return PTR_ERR(host); - priv = sdhci_priv(host); + pltfm_host = sdhci_priv(host); + priv = sdhci_pltfm_priv(pltfm_host); priv->dev = dev; - host->quirks = SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC | - SDHCI_QUIRK_INVERTED_WRITE_PROTECT; - host->quirks2 = SDHCI_QUIRK2_SUPPORT_SINGLE | - SDHCI_QUIRK2_TUNING_WORK_AROUND; - priv->enable_cmd_dat_delay = device_property_read_bool(dev, "fujitsu,cmd-dat-delay-select"); @@ -116,18 +125,6 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) if (ret) goto err; - platform_set_drvdata(pdev, host); - - host->hw_name = "f_sdh30"; - host->ops = &sdhci_f_sdh30_ops; - host->irq = irq; - - host->ioaddr = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(host->ioaddr)) { - ret = PTR_ERR(host->ioaddr); - goto err; - } - if (dev_of_node(dev)) { sdhci_get_of_property(pdev); @@ -182,23 +179,20 @@ static int sdhci_f_sdh30_probe(struct platform_device *pdev) err_clk: clk_disable_unprepare(priv->clk_iface); err: - sdhci_free_host(host); + sdhci_pltfm_free(pdev); + return ret; } static int sdhci_f_sdh30_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); - struct f_sdhost_priv *priv = sdhci_priv(host); - - sdhci_remove_host(host, readl(host->ioaddr + SDHCI_INT_STATUS) == - 0xffffffff); + struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); clk_disable_unprepare(priv->clk_iface); clk_disable_unprepare(priv->clk); - sdhci_free_host(host); - platform_set_drvdata(pdev, NULL); + sdhci_pltfm_unregister(pdev); return 0; } From d61a0886d3365038f65f6c57afacdb7bdc820464 Mon Sep 17 00:00:00 2001 From: Davide Caratti Date: Tue, 14 Feb 2023 10:52:37 +0100 Subject: [PATCH 375/513] selftests: forwarding: tc_actions: cleanup temporary files when test is aborted [ Upstream commit f58531716ced8975a4ade108ef4af35f98722af7 ] remove temporary files created by 'mirred_egress_to_ingress_tcp' test in the cleanup() handler. Also, change variable names to avoid clashing with globals from lib.sh. Suggested-by: Paolo Abeni Signed-off-by: Davide Caratti Link: https://lore.kernel.org/r/091649045a017fc00095ecbb75884e5681f7025f.1676368027.git.dcaratti@redhat.com Signed-off-by: Jakub Kicinski Stable-dep-of: 5e8670610b93 ("selftests: forwarding: tc_actions: Use ncat instead of nc") Signed-off-by: Sasha Levin --- .../selftests/net/forwarding/tc_actions.sh | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh index 1e27031288c8..9c2aca8a4b8d 100755 --- a/tools/testing/selftests/net/forwarding/tc_actions.sh +++ b/tools/testing/selftests/net/forwarding/tc_actions.sh @@ -155,10 +155,10 @@ gact_trap_test() mirred_egress_to_ingress_tcp_test() { - local tmpfile=$(mktemp) tmpfile1=$(mktemp) + mirred_e2i_tf1=$(mktemp) mirred_e2i_tf2=$(mktemp) RET=0 - dd conv=sparse status=none if=/dev/zero bs=1M count=2 of=$tmpfile + dd conv=sparse status=none if=/dev/zero bs=1M count=2 of=$mirred_e2i_tf1 tc filter add dev $h1 protocol ip pref 100 handle 100 egress flower \ $tcflags ip_proto tcp src_ip 192.0.2.1 dst_ip 192.0.2.2 \ action ct commit nat src addr 192.0.2.2 pipe \ @@ -174,11 +174,11 @@ mirred_egress_to_ingress_tcp_test() ip_proto icmp \ action drop - ip vrf exec v$h1 nc --recv-only -w10 -l -p 12345 -o $tmpfile1 & + ip vrf exec v$h1 nc --recv-only -w10 -l -p 12345 -o $mirred_e2i_tf2 & local rpid=$! - ip vrf exec v$h1 nc -w1 --send-only 192.0.2.2 12345 <$tmpfile + ip vrf exec v$h1 nc -w1 --send-only 192.0.2.2 12345 <$mirred_e2i_tf1 wait -n $rpid - cmp -s $tmpfile $tmpfile1 + cmp -s $mirred_e2i_tf1 $mirred_e2i_tf2 check_err $? "server output check failed" $MZ $h1 -c 10 -p 64 -a $h1mac -b $h1mac -A 192.0.2.1 -B 192.0.2.1 \ @@ -195,7 +195,7 @@ mirred_egress_to_ingress_tcp_test() tc filter del dev $h1 egress protocol ip pref 101 handle 101 flower tc filter del dev $h1 ingress protocol ip pref 102 handle 102 flower - rm -f $tmpfile $tmpfile1 + rm -f $mirred_e2i_tf1 $mirred_e2i_tf2 log_test "mirred_egress_to_ingress_tcp ($tcflags)" } @@ -224,6 +224,8 @@ setup_prepare() cleanup() { + local tf + pre_cleanup switch_destroy @@ -234,6 +236,8 @@ cleanup() ip link set $swp2 address $swp2origmac ip link set $swp1 address $swp1origmac + + for tf in $mirred_e2i_tf1 $mirred_e2i_tf2; do rm -f $tf; done } mirred_egress_redirect_test() From 396a1921406a7e7eb7be653a2d9e5808285df545 Mon Sep 17 00:00:00 2001 From: Ido Schimmel Date: Tue, 8 Aug 2023 17:14:57 +0300 Subject: [PATCH 376/513] selftests: forwarding: tc_actions: Use ncat instead of nc [ Upstream commit 5e8670610b93158ffacc3241f835454ff26a3469 ] The test relies on 'nc' being the netcat version from the nmap project. While this seems to be the case on Fedora, it is not the case on Ubuntu, resulting in failures such as [1]. Fix by explicitly using the 'ncat' utility from the nmap project and the skip the test in case it is not installed. [1] # timeout set to 0 # selftests: net/forwarding: tc_actions.sh # TEST: gact drop and ok (skip_hw) [ OK ] # TEST: mirred egress flower redirect (skip_hw) [ OK ] # TEST: mirred egress flower mirror (skip_hw) [ OK ] # TEST: mirred egress matchall mirror (skip_hw) [ OK ] # TEST: mirred_egress_to_ingress (skip_hw) [ OK ] # nc: invalid option -- '-' # usage: nc [-46CDdFhklNnrStUuvZz] [-I length] [-i interval] [-M ttl] # [-m minttl] [-O length] [-P proxy_username] [-p source_port] # [-q seconds] [-s sourceaddr] [-T keyword] [-V rtable] [-W recvlimit] # [-w timeout] [-X proxy_protocol] [-x proxy_address[:port]] # [destination] [port] # nc: invalid option -- '-' # usage: nc [-46CDdFhklNnrStUuvZz] [-I length] [-i interval] [-M ttl] # [-m minttl] [-O length] [-P proxy_username] [-p source_port] # [-q seconds] [-s sourceaddr] [-T keyword] [-V rtable] [-W recvlimit] # [-w timeout] [-X proxy_protocol] [-x proxy_address[:port]] # [destination] [port] # TEST: mirred_egress_to_ingress_tcp (skip_hw) [FAIL] # server output check failed # INFO: Could not test offloaded functionality not ok 80 selftests: net/forwarding: tc_actions.sh # exit=1 Fixes: ca22da2fbd69 ("act_mirred: use the backlog for nested calls to mirred ingress") Reported-by: Mirsad Todorovac Closes: https://lore.kernel.org/netdev/adc5e40d-d040-a65e-eb26-edf47dac5b02@alu.unizg.hr/ Signed-off-by: Ido Schimmel Reviewed-by: Petr Machata Tested-by: Mirsad Todorovac Reviewed-by: Hangbin Liu Acked-by: Nikolay Aleksandrov Link: https://lore.kernel.org/r/20230808141503.4060661-12-idosch@nvidia.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- tools/testing/selftests/net/forwarding/tc_actions.sh | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/net/forwarding/tc_actions.sh b/tools/testing/selftests/net/forwarding/tc_actions.sh index 9c2aca8a4b8d..dd02ed4cacac 100755 --- a/tools/testing/selftests/net/forwarding/tc_actions.sh +++ b/tools/testing/selftests/net/forwarding/tc_actions.sh @@ -8,6 +8,8 @@ NUM_NETIFS=4 source tc_common.sh source lib.sh +require_command ncat + tcflags="skip_hw" h1_create() @@ -174,9 +176,9 @@ mirred_egress_to_ingress_tcp_test() ip_proto icmp \ action drop - ip vrf exec v$h1 nc --recv-only -w10 -l -p 12345 -o $mirred_e2i_tf2 & + ip vrf exec v$h1 ncat --recv-only -w10 -l -p 12345 -o $mirred_e2i_tf2 & local rpid=$! - ip vrf exec v$h1 nc -w1 --send-only 192.0.2.2 12345 <$mirred_e2i_tf1 + ip vrf exec v$h1 ncat -w1 --send-only 192.0.2.2 12345 <$mirred_e2i_tf1 wait -n $rpid cmp -s $mirred_e2i_tf1 $mirred_e2i_tf2 check_err $? "server output check failed" From 3d64a232e4d92938597c5c3cec2483ac14e063f7 Mon Sep 17 00:00:00 2001 From: Clayton Yager Date: Mon, 8 Aug 2022 15:38:23 -0700 Subject: [PATCH 377/513] macsec: Fix traffic counters/statistics [ Upstream commit 91ec9bd57f3524ff3d86bfb7c9ee5a315019733c ] OutOctetsProtected, OutOctetsEncrypted, InOctetsValidated, and InOctetsDecrypted were incrementing by the total number of octets in frames instead of by the number of octets of User Data in frames. The Controlled Port statistics ifOutOctets and ifInOctets were incrementing by the total number of octets instead of the number of octets of the MSDUs plus octets of the destination and source MAC addresses. The Controlled Port statistics ifInDiscards and ifInErrors were not incrementing each time the counters they aggregate were. The Controlled Port statistic ifInErrors was not included in the output of macsec_get_stats64 so the value was not present in ip commands output. The ReceiveSA counters InPktsNotValid, InPktsNotUsingSA, and InPktsUnusedSA were not incrementing. Signed-off-by: Clayton Yager Signed-off-by: David S. Miller Stable-dep-of: 32d0a49d36a2 ("macsec: use DEV_STATS_INC()") Signed-off-by: Sasha Levin --- drivers/net/macsec.c | 58 +++++++++++++++++++++++++++++++++++++------- 1 file changed, 49 insertions(+), 9 deletions(-) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 10b3f4fb2612..e7af0e7a2967 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -160,6 +160,19 @@ static struct macsec_rx_sa *macsec_rxsa_get(struct macsec_rx_sa __rcu *ptr) return sa; } +static struct macsec_rx_sa *macsec_active_rxsa_get(struct macsec_rx_sc *rx_sc) +{ + struct macsec_rx_sa *sa = NULL; + int an; + + for (an = 0; an < MACSEC_NUM_AN; an++) { + sa = macsec_rxsa_get(rx_sc->sa[an]); + if (sa) + break; + } + return sa; +} + static void free_rx_sc_rcu(struct rcu_head *head) { struct macsec_rx_sc *rx_sc = container_of(head, struct macsec_rx_sc, rcu_head); @@ -493,18 +506,28 @@ static void macsec_encrypt_finish(struct sk_buff *skb, struct net_device *dev) skb->protocol = eth_hdr(skb)->h_proto; } +static unsigned int macsec_msdu_len(struct sk_buff *skb) +{ + struct macsec_dev *macsec = macsec_priv(skb->dev); + struct macsec_secy *secy = &macsec->secy; + bool sci_present = macsec_skb_cb(skb)->has_sci; + + return skb->len - macsec_hdr_len(sci_present) - secy->icv_len; +} + static void macsec_count_tx(struct sk_buff *skb, struct macsec_tx_sc *tx_sc, struct macsec_tx_sa *tx_sa) { + unsigned int msdu_len = macsec_msdu_len(skb); struct pcpu_tx_sc_stats *txsc_stats = this_cpu_ptr(tx_sc->stats); u64_stats_update_begin(&txsc_stats->syncp); if (tx_sc->encrypt) { - txsc_stats->stats.OutOctetsEncrypted += skb->len; + txsc_stats->stats.OutOctetsEncrypted += msdu_len; txsc_stats->stats.OutPktsEncrypted++; this_cpu_inc(tx_sa->stats->OutPktsEncrypted); } else { - txsc_stats->stats.OutOctetsProtected += skb->len; + txsc_stats->stats.OutOctetsProtected += msdu_len; txsc_stats->stats.OutPktsProtected++; this_cpu_inc(tx_sa->stats->OutPktsProtected); } @@ -534,9 +557,10 @@ static void macsec_encrypt_done(struct crypto_async_request *base, int err) aead_request_free(macsec_skb_cb(skb)->req); rcu_read_lock_bh(); - macsec_encrypt_finish(skb, dev); macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); - len = skb->len; + /* packet is encrypted/protected so tx_bytes must be calculated */ + len = macsec_msdu_len(skb) + 2 * ETH_ALEN; + macsec_encrypt_finish(skb, dev); ret = dev_queue_xmit(skb); count_tx(dev, ret, len); rcu_read_unlock_bh(); @@ -695,6 +719,7 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb, macsec_skb_cb(skb)->req = req; macsec_skb_cb(skb)->tx_sa = tx_sa; + macsec_skb_cb(skb)->has_sci = sci_present; aead_request_set_callback(req, 0, macsec_encrypt_done, skb); dev_hold(skb->dev); @@ -736,15 +761,17 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); + secy->netdev->stats.rx_dropped++; return false; } if (secy->validate_frames != MACSEC_VALIDATE_DISABLED) { + unsigned int msdu_len = macsec_msdu_len(skb); u64_stats_update_begin(&rxsc_stats->syncp); if (hdr->tci_an & MACSEC_TCI_E) - rxsc_stats->stats.InOctetsDecrypted += skb->len; + rxsc_stats->stats.InOctetsDecrypted += msdu_len; else - rxsc_stats->stats.InOctetsValidated += skb->len; + rxsc_stats->stats.InOctetsValidated += msdu_len; u64_stats_update_end(&rxsc_stats->syncp); } @@ -757,6 +784,8 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsNotValid++; u64_stats_update_end(&rxsc_stats->syncp); + this_cpu_inc(rx_sa->stats->InPktsNotValid); + secy->netdev->stats.rx_errors++; return false; } @@ -849,9 +878,9 @@ static void macsec_decrypt_done(struct crypto_async_request *base, int err) macsec_finalize_skb(skb, macsec->secy.icv_len, macsec_extra_len(macsec_skb_cb(skb)->has_sci)); + len = skb->len; macsec_reset_skb(skb, macsec->secy.netdev); - len = skb->len; if (gro_cells_receive(&macsec->gro_cells, skb) == NET_RX_SUCCESS) count_rx(dev, len); @@ -1042,6 +1071,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoTag++; u64_stats_update_end(&secy_stats->syncp); + macsec->secy.netdev->stats.rx_dropped++; continue; } @@ -1151,6 +1181,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsBadTag++; u64_stats_update_end(&secy_stats->syncp); + secy->netdev->stats.rx_errors++; goto drop_nosa; } @@ -1161,11 +1192,15 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) /* If validateFrames is Strict or the C bit in the * SecTAG is set, discard */ + struct macsec_rx_sa *active_rx_sa = macsec_active_rxsa_get(rx_sc); if (hdr->tci_an & MACSEC_TCI_C || secy->validate_frames == MACSEC_VALIDATE_STRICT) { u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsNotUsingSA++; u64_stats_update_end(&rxsc_stats->syncp); + secy->netdev->stats.rx_errors++; + if (active_rx_sa) + this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA); goto drop_nosa; } @@ -1175,6 +1210,8 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsUnusedSA++; u64_stats_update_end(&rxsc_stats->syncp); + if (active_rx_sa) + this_cpu_inc(active_rx_sa->stats->InPktsUnusedSA); goto deliver; } @@ -1195,6 +1232,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); + macsec->secy.netdev->stats.rx_dropped++; goto drop; } } @@ -1223,6 +1261,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) deliver: macsec_finalize_skb(skb, secy->icv_len, macsec_extra_len(macsec_skb_cb(skb)->has_sci)); + len = skb->len; macsec_reset_skb(skb, secy->netdev); if (rx_sa) @@ -1230,7 +1269,6 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) macsec_rxsc_put(rx_sc); skb_orphan(skb); - len = skb->len; ret = gro_cells_receive(&macsec->gro_cells, skb); if (ret == NET_RX_SUCCESS) count_rx(dev, len); @@ -1272,6 +1310,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoSCI++; u64_stats_update_end(&secy_stats->syncp); + macsec->secy.netdev->stats.rx_errors++; continue; } @@ -3403,6 +3442,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, return NETDEV_TX_OK; } + len = skb->len; skb = macsec_encrypt(skb, dev); if (IS_ERR(skb)) { if (PTR_ERR(skb) != -EINPROGRESS) @@ -3413,7 +3453,6 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, macsec_count_tx(skb, &macsec->secy.tx_sc, macsec_skb_cb(skb)->tx_sa); macsec_encrypt_finish(skb, dev); - len = skb->len; ret = dev_queue_xmit(skb); count_tx(dev, ret, len); return ret; @@ -3643,6 +3682,7 @@ static void macsec_get_stats64(struct net_device *dev, s->rx_dropped = dev->stats.rx_dropped; s->tx_dropped = dev->stats.tx_dropped; + s->rx_errors = dev->stats.rx_errors; } static int macsec_get_iflink(const struct net_device *dev) From 51222e1c77a17d239262f6c2fe4d45e91742f467 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Fri, 4 Aug 2023 17:26:52 +0000 Subject: [PATCH 378/513] macsec: use DEV_STATS_INC() [ Upstream commit 32d0a49d36a2a306c2e47fe5659361e424f0ed3f ] syzbot/KCSAN reported data-races in macsec whenever dev->stats fields are updated. It appears all of these updates can happen from multiple cpus. Adopt SMP safe DEV_STATS_INC() to update dev->stats fields. Fixes: c09440f7dcb3 ("macsec: introduce IEEE 802.1AE driver") Reported-by: syzbot Signed-off-by: Eric Dumazet Cc: Sabrina Dubroca Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/macsec.c | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index e7af0e7a2967..98ce24422424 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -761,7 +761,7 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); - secy->netdev->stats.rx_dropped++; + DEV_STATS_INC(secy->netdev, rx_dropped); return false; } @@ -785,7 +785,7 @@ static bool macsec_post_decrypt(struct sk_buff *skb, struct macsec_secy *secy, u rxsc_stats->stats.InPktsNotValid++; u64_stats_update_end(&rxsc_stats->syncp); this_cpu_inc(rx_sa->stats->InPktsNotValid); - secy->netdev->stats.rx_errors++; + DEV_STATS_INC(secy->netdev, rx_errors); return false; } @@ -1071,7 +1071,7 @@ static enum rx_handler_result handle_not_macsec(struct sk_buff *skb) u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoTag++; u64_stats_update_end(&secy_stats->syncp); - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); continue; } @@ -1181,7 +1181,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsBadTag++; u64_stats_update_end(&secy_stats->syncp); - secy->netdev->stats.rx_errors++; + DEV_STATS_INC(secy->netdev, rx_errors); goto drop_nosa; } @@ -1198,7 +1198,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsNotUsingSA++; u64_stats_update_end(&rxsc_stats->syncp); - secy->netdev->stats.rx_errors++; + DEV_STATS_INC(secy->netdev, rx_errors); if (active_rx_sa) this_cpu_inc(active_rx_sa->stats->InPktsNotUsingSA); goto drop_nosa; @@ -1232,7 +1232,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&rxsc_stats->syncp); rxsc_stats->stats.InPktsLate++; u64_stats_update_end(&rxsc_stats->syncp); - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); goto drop; } } @@ -1273,7 +1273,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) if (ret == NET_RX_SUCCESS) count_rx(dev, len); else - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); rcu_read_unlock(); @@ -1310,7 +1310,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) u64_stats_update_begin(&secy_stats->syncp); secy_stats->stats.InPktsNoSCI++; u64_stats_update_end(&secy_stats->syncp); - macsec->secy.netdev->stats.rx_errors++; + DEV_STATS_INC(macsec->secy.netdev, rx_errors); continue; } @@ -1329,7 +1329,7 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb) secy_stats->stats.InPktsUnknownSCI++; u64_stats_update_end(&secy_stats->syncp); } else { - macsec->secy.netdev->stats.rx_dropped++; + DEV_STATS_INC(macsec->secy.netdev, rx_dropped); } } @@ -3438,7 +3438,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, if (!secy->operational) { kfree_skb(skb); - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); return NETDEV_TX_OK; } @@ -3446,7 +3446,7 @@ static netdev_tx_t macsec_start_xmit(struct sk_buff *skb, skb = macsec_encrypt(skb, dev); if (IS_ERR(skb)) { if (PTR_ERR(skb) != -EINPROGRESS) - dev->stats.tx_dropped++; + DEV_STATS_INC(dev, tx_dropped); return NETDEV_TX_OK; } @@ -3680,9 +3680,9 @@ static void macsec_get_stats64(struct net_device *dev, dev_fetch_sw_netstats(s, dev->tstats); - s->rx_dropped = dev->stats.rx_dropped; - s->tx_dropped = dev->stats.tx_dropped; - s->rx_errors = dev->stats.rx_errors; + s->rx_dropped = atomic_long_read(&dev->stats.__rx_dropped); + s->tx_dropped = atomic_long_read(&dev->stats.__tx_dropped); + s->rx_errors = atomic_long_read(&dev->stats.__rx_errors); } static int macsec_get_iflink(const struct net_device *dev) From 2d93157b7e2dac9acf3ee4e52189b4c79d1ac77c Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 27 Jul 2022 12:43:41 +0300 Subject: [PATCH 379/513] net/tls: Perform immediate device ctx cleanup when possible [ Upstream commit 113671b255ee3b9f5585a6d496ef0e675e698698 ] TLS context destructor can be run in atomic context. Cleanup operations for device-offloaded contexts could require access and interaction with the device callbacks, which might sleep. Hence, the cleanup of such contexts must be deferred and completed inside an async work. For all others, this is not necessary, as cleanup is atomic. Invoke cleanup immediately for them, avoiding queueing redundant gc work. Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed Signed-off-by: Jakub Kicinski Stable-dep-of: 6b47808f223c ("net: tls: avoid discarding data on record close") Signed-off-by: Sasha Levin --- net/tls/tls_device.c | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index cf75969375cf..19ba57245777 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -95,19 +95,29 @@ static void tls_device_gc_task(struct work_struct *work) static void tls_device_queue_ctx_destruction(struct tls_context *ctx) { unsigned long flags; + bool async_cleanup; spin_lock_irqsave(&tls_device_lock, flags); - if (unlikely(!refcount_dec_and_test(&ctx->refcount))) - goto unlock; + if (unlikely(!refcount_dec_and_test(&ctx->refcount))) { + spin_unlock_irqrestore(&tls_device_lock, flags); + return; + } - list_move_tail(&ctx->list, &tls_device_gc_list); + async_cleanup = ctx->netdev && ctx->tx_conf == TLS_HW; + if (async_cleanup) { + list_move_tail(&ctx->list, &tls_device_gc_list); - /* schedule_work inside the spinlock - * to make sure tls_device_down waits for that work. - */ - schedule_work(&tls_device_gc_work); -unlock: + /* schedule_work inside the spinlock + * to make sure tls_device_down waits for that work. + */ + schedule_work(&tls_device_gc_work); + } else { + list_del(&ctx->list); + } spin_unlock_irqrestore(&tls_device_lock, flags); + + if (!async_cleanup) + tls_device_free_ctx(ctx); } /* We assume that the socket is already connected */ From 9a15ca893909e1b87d975d0726b79caf5b2f8830 Mon Sep 17 00:00:00 2001 From: Tariq Toukan Date: Wed, 27 Jul 2022 12:43:42 +0300 Subject: [PATCH 380/513] net/tls: Multi-threaded calls to TX tls_dev_del [ Upstream commit 7adc91e0c93901a0eeeea10665d0feb48ffde2d4 ] Multiple TLS device-offloaded contexts can be added in parallel via concurrent calls to .tls_dev_add, while calls to .tls_dev_del are sequential in tls_device_gc_task. This is not a sustainable behavior. This creates a rate gap between add and del operations (addition rate outperforms the deletion rate). When running for enough time, the TLS device resources could get exhausted, failing to offload new connections. Replace the single-threaded garbage collector work with a per-context alternative, so they can be handled on several cores in parallel. Use a new dedicated destruct workqueue for this. Tested with mlx5 device: Before: 22141 add/sec, 103 del/sec After: 11684 add/sec, 11684 del/sec Signed-off-by: Tariq Toukan Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed Signed-off-by: Jakub Kicinski Stable-dep-of: 6b47808f223c ("net: tls: avoid discarding data on record close") Signed-off-by: Sasha Levin --- include/net/tls.h | 2 ++ net/tls/tls_device.c | 63 ++++++++++++++++++++++---------------------- 2 files changed, 33 insertions(+), 32 deletions(-) diff --git a/include/net/tls.h b/include/net/tls.h index bf3d63a52788..eda0015c5c59 100644 --- a/include/net/tls.h +++ b/include/net/tls.h @@ -179,6 +179,8 @@ struct tls_offload_context_tx { struct scatterlist sg_tx_data[MAX_SKB_FRAGS]; void (*sk_destruct)(struct sock *sk); + struct work_struct destruct_work; + struct tls_context *ctx; u8 driver_state[] __aligned(8); /* The TLS layer reserves room for driver specific state * Currently the belief is that there is not enough diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 19ba57245777..8012bd86437c 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -45,10 +45,8 @@ */ static DECLARE_RWSEM(device_offload_lock); -static void tls_device_gc_task(struct work_struct *work); +static struct workqueue_struct *destruct_wq __read_mostly; -static DECLARE_WORK(tls_device_gc_work, tls_device_gc_task); -static LIST_HEAD(tls_device_gc_list); static LIST_HEAD(tls_device_list); static LIST_HEAD(tls_device_down_list); static DEFINE_SPINLOCK(tls_device_lock); @@ -67,29 +65,17 @@ static void tls_device_free_ctx(struct tls_context *ctx) tls_ctx_free(NULL, ctx); } -static void tls_device_gc_task(struct work_struct *work) +static void tls_device_tx_del_task(struct work_struct *work) { - struct tls_context *ctx, *tmp; - unsigned long flags; - LIST_HEAD(gc_list); - - spin_lock_irqsave(&tls_device_lock, flags); - list_splice_init(&tls_device_gc_list, &gc_list); - spin_unlock_irqrestore(&tls_device_lock, flags); - - list_for_each_entry_safe(ctx, tmp, &gc_list, list) { - struct net_device *netdev = ctx->netdev; + struct tls_offload_context_tx *offload_ctx = + container_of(work, struct tls_offload_context_tx, destruct_work); + struct tls_context *ctx = offload_ctx->ctx; + struct net_device *netdev = ctx->netdev; - if (netdev && ctx->tx_conf == TLS_HW) { - netdev->tlsdev_ops->tls_dev_del(netdev, ctx, - TLS_OFFLOAD_CTX_DIR_TX); - dev_put(netdev); - ctx->netdev = NULL; - } - - list_del(&ctx->list); - tls_device_free_ctx(ctx); - } + netdev->tlsdev_ops->tls_dev_del(netdev, ctx, TLS_OFFLOAD_CTX_DIR_TX); + dev_put(netdev); + ctx->netdev = NULL; + tls_device_free_ctx(ctx); } static void tls_device_queue_ctx_destruction(struct tls_context *ctx) @@ -103,16 +89,15 @@ static void tls_device_queue_ctx_destruction(struct tls_context *ctx) return; } + list_del(&ctx->list); /* Remove from tls_device_list / tls_device_down_list */ async_cleanup = ctx->netdev && ctx->tx_conf == TLS_HW; if (async_cleanup) { - list_move_tail(&ctx->list, &tls_device_gc_list); + struct tls_offload_context_tx *offload_ctx = tls_offload_ctx_tx(ctx); - /* schedule_work inside the spinlock + /* queue_work inside the spinlock * to make sure tls_device_down waits for that work. */ - schedule_work(&tls_device_gc_work); - } else { - list_del(&ctx->list); + queue_work(destruct_wq, &offload_ctx->destruct_work); } spin_unlock_irqrestore(&tls_device_lock, flags); @@ -1115,6 +1100,9 @@ int tls_set_device_offload(struct sock *sk, struct tls_context *ctx) start_marker_record->len = 0; start_marker_record->num_frags = 0; + INIT_WORK(&offload_ctx->destruct_work, tls_device_tx_del_task); + offload_ctx->ctx = ctx; + INIT_LIST_HEAD(&offload_ctx->records_list); list_add_tail(&start_marker_record->list, &offload_ctx->records_list); spin_lock_init(&offload_ctx->lock); @@ -1372,7 +1360,7 @@ static int tls_device_down(struct net_device *netdev) up_write(&device_offload_lock); - flush_work(&tls_device_gc_work); + flush_workqueue(destruct_wq); return NOTIFY_DONE; } @@ -1413,12 +1401,23 @@ static struct notifier_block tls_dev_notifier = { int __init tls_device_init(void) { - return register_netdevice_notifier(&tls_dev_notifier); + int err; + + destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0); + if (!destruct_wq) + return -ENOMEM; + + err = register_netdevice_notifier(&tls_dev_notifier); + if (err) + destroy_workqueue(destruct_wq); + + return err; } void __exit tls_device_cleanup(void) { unregister_netdevice_notifier(&tls_dev_notifier); - flush_work(&tls_device_gc_work); + flush_workqueue(destruct_wq); + destroy_workqueue(destruct_wq); clean_acked_data_flush(); } From e2d10f1de1fac24e6e41bed71301d7a95aea43c6 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Fri, 4 Aug 2023 15:59:51 -0700 Subject: [PATCH 381/513] net: tls: avoid discarding data on record close [ Upstream commit 6b47808f223c70ff564f9b363446d2a5fa1e05b2 ] TLS records end with a 16B tag. For TLS device offload we only need to make space for this tag in the stream, the device will generate and replace it with the actual calculated tag. Long time ago the code would just re-reference the head frag which mostly worked but was suboptimal because it prevented TCP from combining the record into a single skb frag. I'm not sure if it was correct as the first frag may be shorter than the tag. The commit under fixes tried to replace that with using the page frag and if the allocation failed rolling back the data, if record was long enough. It achieves better fragment coalescing but is also buggy. We don't roll back the iterator, so unless we're at the end of send we'll skip the data we designated as tag and start the next record as if the rollback never happened. There's also the possibility that the record was constructed with MSG_MORE and the data came from a different syscall and we already told the user space that we "got it". Allocate a single dummy page and use it as fallback. Found by code inspection, and proven by forcing allocation failures. Fixes: e7b159a48ba6 ("net/tls: remove the record tail optimization") Signed-off-by: Jakub Kicinski Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- net/tls/tls_device.c | 64 +++++++++++++++++++++++--------------------- 1 file changed, 33 insertions(+), 31 deletions(-) diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c index 8012bd86437c..88785196a896 100644 --- a/net/tls/tls_device.c +++ b/net/tls/tls_device.c @@ -51,6 +51,8 @@ static LIST_HEAD(tls_device_list); static LIST_HEAD(tls_device_down_list); static DEFINE_SPINLOCK(tls_device_lock); +static struct page *dummy_page; + static void tls_device_free_ctx(struct tls_context *ctx) { if (ctx->tx_conf == TLS_HW) { @@ -297,36 +299,33 @@ static int tls_push_record(struct sock *sk, return tls_push_sg(sk, ctx, offload_ctx->sg_tx_data, 0, flags); } -static int tls_device_record_close(struct sock *sk, - struct tls_context *ctx, - struct tls_record_info *record, - struct page_frag *pfrag, - unsigned char record_type) +static void tls_device_record_close(struct sock *sk, + struct tls_context *ctx, + struct tls_record_info *record, + struct page_frag *pfrag, + unsigned char record_type) { struct tls_prot_info *prot = &ctx->prot_info; - int ret; + struct page_frag dummy_tag_frag; /* append tag * device will fill in the tag, we just need to append a placeholder * use socket memory to improve coalescing (re-using a single buffer * increases frag count) - * if we can't allocate memory now, steal some back from data + * if we can't allocate memory now use the dummy page */ - if (likely(skb_page_frag_refill(prot->tag_size, pfrag, - sk->sk_allocation))) { - ret = 0; - tls_append_frag(record, pfrag, prot->tag_size); - } else { - ret = prot->tag_size; - if (record->len <= prot->overhead_size) - return -ENOMEM; + if (unlikely(pfrag->size - pfrag->offset < prot->tag_size) && + !skb_page_frag_refill(prot->tag_size, pfrag, sk->sk_allocation)) { + dummy_tag_frag.page = dummy_page; + dummy_tag_frag.offset = 0; + pfrag = &dummy_tag_frag; } + tls_append_frag(record, pfrag, prot->tag_size); /* fill prepend */ tls_fill_prepend(ctx, skb_frag_address(&record->frags[0]), record->len - prot->overhead_size, record_type); - return ret; } static int tls_create_new_record(struct tls_offload_context_tx *offload_ctx, @@ -502,18 +501,8 @@ static int tls_push_data(struct sock *sk, if (done || record->len >= max_open_record_len || (record->num_frags >= MAX_SKB_FRAGS - 1)) { - rc = tls_device_record_close(sk, tls_ctx, record, - pfrag, record_type); - if (rc) { - if (rc > 0) { - size += rc; - } else { - size = orig_size; - destroy_record(record); - ctx->open_record = NULL; - break; - } - } + tls_device_record_close(sk, tls_ctx, record, + pfrag, record_type); rc = tls_push_record(sk, tls_ctx, @@ -1403,14 +1392,26 @@ int __init tls_device_init(void) { int err; - destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0); - if (!destruct_wq) + dummy_page = alloc_page(GFP_KERNEL); + if (!dummy_page) return -ENOMEM; + destruct_wq = alloc_workqueue("ktls_device_destruct", 0, 0); + if (!destruct_wq) { + err = -ENOMEM; + goto err_free_dummy; + } + err = register_netdevice_notifier(&tls_dev_notifier); if (err) - destroy_workqueue(destruct_wq); + goto err_destroy_wq; + return 0; + +err_destroy_wq: + destroy_workqueue(destruct_wq); +err_free_dummy: + put_page(dummy_page); return err; } @@ -1420,4 +1421,5 @@ void __exit tls_device_cleanup(void) flush_workqueue(destruct_wq); destroy_workqueue(destruct_wq); clean_acked_data_flush(); + put_page(dummy_page); } From cc159083085947c0d8466b2fd63699d0ad87b3cd Mon Sep 17 00:00:00 2001 From: Sumit Gupta Date: Thu, 11 May 2023 23:02:09 +0530 Subject: [PATCH 382/513] PCI: tegra194: Fix possible array out of bounds access [ Upstream commit 205b3d02d57ce6dce96f6d2b9c230f56a9bf9817 ] Add check to fix the possible array out of bounds violation by making speed equal to GEN1_CORE_CLK_FREQ when its value is more than the size of "pcie_gen_freq" array. This array has size of four but possible speed (CLS) values are from "0 to 0xF". So, "speed - 1" values are "-1 to 0xE". Suggested-by: Bjorn Helgaas Signed-off-by: Sumit Gupta Link: https://lore.kernel.org/lkml/72b9168b-d4d6-4312-32ea-69358df2f2d0@nvidia.com/ Acked-by: Lorenzo Pieralisi Signed-off-by: Thierry Reding Signed-off-by: Sasha Levin --- drivers/pci/controller/dwc/pcie-tegra194.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c index bdd84765e646..765abe073228 100644 --- a/drivers/pci/controller/dwc/pcie-tegra194.c +++ b/drivers/pci/controller/dwc/pcie-tegra194.c @@ -239,6 +239,7 @@ #define EP_STATE_ENABLED 1 static const unsigned int pcie_gen_freq[] = { + GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */ GEN1_CORE_CLK_FREQ, GEN2_CORE_CLK_FREQ, GEN3_CORE_CLK_FREQ, @@ -452,7 +453,11 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg) speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & PCI_EXP_LNKSTA_CLS; - clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); + + if (speed >= ARRAY_SIZE(pcie_gen_freq)) + speed = 0; + + clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]); /* If EP doesn't advertise L1SS, just return */ val = dw_pcie_readl_dbi(pci, pcie->cfg_link_cap_l1sub); @@ -989,7 +994,11 @@ static int tegra_pcie_dw_start_link(struct dw_pcie *pci) speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) & PCI_EXP_LNKSTA_CLS; - clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]); + + if (speed >= ARRAY_SIZE(pcie_gen_freq)) + speed = 0; + + clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]); tegra_pcie_enable_interrupts(pp); From ca66e9dd98ef95cd878e85f55a80f75b18d4d159 Mon Sep 17 00:00:00 2001 From: Oleksij Rempel Date: Tue, 30 May 2023 14:03:44 +0200 Subject: [PATCH 383/513] ARM: dts: imx6dl: prtrvt, prtvt7, prti6q, prtwd2: fix USB related warnings [ Upstream commit 1d14bd943fa2bbdfda1efbcc080b298fed5f1803 ] Fix USB-related warnings in prtrvt, prtvt7, prti6q and prtwd2 device trees by disabling unused usbphynop1 and usbphynop2 USB PHYs and providing proper configuration for the over-current detection. This fixes the following warnings with the current kernel: usb_phy_generic usbphynop1: dummy supplies not allowed for exclusive requests usb_phy_generic usbphynop2: dummy supplies not allowed for exclusive requests imx_usb 2184200.usb: No over current polarity defined By the way, fix over-current detection on usbotg port for prtvt7, prti6q and prtwd2 boards. Only prtrvt do not have OC on USB OTG port. Signed-off-by: Oleksij Rempel Signed-off-by: Shawn Guo Signed-off-by: Sasha Levin --- arch/arm/boot/dts/imx6dl-prtrvt.dts | 4 ++++ arch/arm/boot/dts/imx6qdl-prti6q.dtsi | 11 ++++++++++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/imx6dl-prtrvt.dts b/arch/arm/boot/dts/imx6dl-prtrvt.dts index 5ac84445e9cc..90e01de8c2c1 100644 --- a/arch/arm/boot/dts/imx6dl-prtrvt.dts +++ b/arch/arm/boot/dts/imx6dl-prtrvt.dts @@ -126,6 +126,10 @@ status = "disabled"; }; +&usbotg { + disable-over-current; +}; + &vpu { status = "disabled"; }; diff --git a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi index 19578f660b09..70dfa07a1698 100644 --- a/arch/arm/boot/dts/imx6qdl-prti6q.dtsi +++ b/arch/arm/boot/dts/imx6qdl-prti6q.dtsi @@ -69,6 +69,7 @@ vbus-supply = <®_usb_h1_vbus>; phy_type = "utmi"; dr_mode = "host"; + disable-over-current; status = "okay"; }; @@ -78,10 +79,18 @@ pinctrl-0 = <&pinctrl_usbotg>; phy_type = "utmi"; dr_mode = "host"; - disable-over-current; + over-current-active-low; status = "okay"; }; +&usbphynop1 { + status = "disabled"; +}; + +&usbphynop2 { + status = "disabled"; +}; + &usdhc1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc1>; From 7d53d1e4765c3c300fb61e0ffd3c582009e97b9b Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 2 Jun 2023 10:50:36 +0200 Subject: [PATCH 384/513] iopoll: Call cpu_relax() in busy loops [ Upstream commit b407460ee99033503993ac7437d593451fcdfe44 ] It is considered good practice to call cpu_relax() in busy loops, see Documentation/process/volatile-considered-harmful.rst. This can not only lower CPU power consumption or yield to a hyperthreaded twin processor, but also allows an architecture to mitigate hardware issues (e.g. ARM Erratum 754327 for Cortex-A9 prior to r2p0) in the architecture-specific cpu_relax() implementation. In addition, cpu_relax() is also a compiler barrier. It is not immediately obvious that the @op argument "function" will result in an actual function call (e.g. in case of inlining). Where a function call is a C sequence point, this is lost on inlining. Therefore, with agressive enough optimization it might be possible for the compiler to hoist the: (val) = op(args); "load" out of the loop because it doesn't see the value changing. The addition of cpu_relax() would inhibit this. As the iopoll helpers lack calls to cpu_relax(), people are sometimes reluctant to use them, and may fall back to open-coded polling loops (including cpu_relax() calls) instead. Fix this by adding calls to cpu_relax() to the iopoll helpers: - For the non-atomic case, it is sufficient to call cpu_relax() in case of a zero sleep-between-reads value, as a call to usleep_range() is a safe barrier otherwise. However, it doesn't hurt to add the call regardless, for simplicity, and for similarity with the atomic case below. - For the atomic case, cpu_relax() must be called regardless of the sleep-between-reads value, as there is no guarantee all architecture-specific implementations of udelay() handle this. Signed-off-by: Geert Uytterhoeven Acked-by: Peter Zijlstra (Intel) Acked-by: Arnd Bergmann Reviewed-by: Tony Lindgren Reviewed-by: Ulf Hansson Link: https://lore.kernel.org/r/45c87bec3397fdd704376807f0eec5cc71be440f.1685692810.git.geert+renesas@glider.be Signed-off-by: Sasha Levin --- include/linux/iopoll.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index 2c8860e406bd..0417360a6db9 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -53,6 +53,7 @@ } \ if (__sleep_us) \ usleep_range((__sleep_us >> 2) + 1, __sleep_us); \ + cpu_relax(); \ } \ (cond) ? 0 : -ETIMEDOUT; \ }) @@ -95,6 +96,7 @@ } \ if (__delay_us) \ udelay(__delay_us); \ + cpu_relax(); \ } \ (cond) ? 0 : -ETIMEDOUT; \ }) From e9ce774052eeef0df32d5ab339170b988de0c260 Mon Sep 17 00:00:00 2001 From: Pierre-Louis Bossart Date: Tue, 6 Jun 2023 17:25:28 -0500 Subject: [PATCH 385/513] ASoC: SOF: Intel: fix SoundWire/HDaudio mutual exclusion [ Upstream commit f751b99255cacd9ffe8c4bbf99767ad670cee1f7 ] The functionality described in Commit 61bef9e68dca ("ASoC: SOF: Intel: hda: enforce exclusion between HDaudio and SoundWire") does not seem to be properly implemented with two issues that need to be corrected. a) The test used is incorrect when DisplayAudio codecs are not supported. b) Conversely when only Display Audio codecs can be found, we do want to start the SoundWire links, if any. That will help add the relevant topologies and machine descriptors, and identify cases where the SoundWire information in ACPI needs to be modified with a quirk. Signed-off-by: Pierre-Louis Bossart Reviewed-by: Bard Liao Reviewed-by: Ranjani Sridharan Link: https://lore.kernel.org/r/20230606222529.57156-2-pierre-louis.bossart@linux.intel.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/sof/intel/hda.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c index 35cbef171f4a..038d09f6203a 100644 --- a/sound/soc/sof/intel/hda.c +++ b/sound/soc/sof/intel/hda.c @@ -1040,12 +1040,22 @@ static int hda_generic_machine_select(struct snd_sof_dev *sdev) pdata->machine = hda_mach; pdata->tplg_filename = tplg_filename; - if (codec_num == 2) { + if (codec_num == 2 || + (codec_num == 1 && !HDA_IDISP_CODEC(bus->codec_mask))) { /* * Prevent SoundWire links from starting when an external * HDaudio codec is used */ hda_mach->mach_params.link_mask = 0; + } else { + /* + * Allow SoundWire links to start when no external HDaudio codec + * was detected. This will not create a SoundWire card but + * will help detect if any SoundWire codec reports as ATTACHED. + */ + struct sof_intel_hda_dev *hdev = sdev->pdata->hw_pdata; + + hda_mach->mach_params.link_mask = hdev->info.link_mask; } } } From ff10cd3e9b3a324578eb554a8e9fb1cf6a0b9442 Mon Sep 17 00:00:00 2001 From: gaoxu Date: Tue, 6 Jun 2023 12:47:37 +0000 Subject: [PATCH 386/513] dma-remap: use kvmalloc_array/kvfree for larger dma memory remap [ Upstream commit 51ff97d54f02b4444dfc42e380ac4c058e12d5dd ] If dma_direct_alloc() alloc memory in size of 64MB, the inner function dma_common_contiguous_remap() will allocate 128KB memory by invoking the function kmalloc_array(). and the kmalloc_array seems to fail to try to allocate 128KB mem. Call trace: [14977.928623] qcrosvm: page allocation failure: order:5, mode:0x40cc0 [14977.928638] dump_backtrace.cfi_jt+0x0/0x8 [14977.928647] dump_stack_lvl+0x80/0xb8 [14977.928652] warn_alloc+0x164/0x200 [14977.928657] __alloc_pages_slowpath+0x9f0/0xb4c [14977.928660] __alloc_pages+0x21c/0x39c [14977.928662] kmalloc_order+0x48/0x108 [14977.928666] kmalloc_order_trace+0x34/0x154 [14977.928668] __kmalloc+0x548/0x7e4 [14977.928673] dma_direct_alloc+0x11c/0x4f8 [14977.928678] dma_alloc_attrs+0xf4/0x138 [14977.928680] gh_vm_ioctl_set_fw_name+0x3c4/0x610 [gunyah] [14977.928698] gh_vm_ioctl+0x90/0x14c [gunyah] [14977.928705] __arm64_sys_ioctl+0x184/0x210 work around by doing kvmalloc_array instead. Signed-off-by: Gao Xu Reviewed-by: Suren Baghdasaryan Signed-off-by: Christoph Hellwig Signed-off-by: Sasha Levin --- kernel/dma/remap.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/dma/remap.c b/kernel/dma/remap.c index b4526668072e..27596f3b4aef 100644 --- a/kernel/dma/remap.c +++ b/kernel/dma/remap.c @@ -43,13 +43,13 @@ void *dma_common_contiguous_remap(struct page *page, size_t size, void *vaddr; int i; - pages = kmalloc_array(count, sizeof(struct page *), GFP_KERNEL); + pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); if (!pages) return NULL; for (i = 0; i < count; i++) pages[i] = nth_page(page, i); vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); - kfree(pages); + kvfree(pages); return vaddr; } From fd41646d435048fa57ae01f9e9afbc5587c5d357 Mon Sep 17 00:00:00 2001 From: stuarthayhurst Date: Tue, 30 May 2023 15:44:28 +0100 Subject: [PATCH 387/513] HID: logitech-hidpp: Add USB and Bluetooth IDs for the Logitech G915 TKL Keyboard [ Upstream commit 48aea8b445c422a372cf15915101035a47105421 ] Adds the USB and Bluetooth IDs for the Logitech G915 TKL keyboard, for device detection For this device, this provides battery reporting on top of hid-generic Reviewed-by: Bastien Nocera Signed-off-by: Stuart Hayhurst Signed-off-by: Jiri Kosina Signed-off-by: Sasha Levin --- drivers/hid/hid-logitech-hidpp.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index c61da859cd3c..0ac67dd76574 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -4377,6 +4377,8 @@ static const struct hid_device_id hidpp_devices[] = { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC086) }, { /* Logitech G903 Hero Gaming Mouse over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC091) }, + { /* Logitech G915 TKL Keyboard over USB */ + HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 0xC343) }, { /* Logitech G920 Wheel over USB */ HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_G920_WHEEL), .driver_data = HIDPP_QUIRK_CLASS_G920 | HIDPP_QUIRK_FORCE_OUTPUT_REPORTS}, @@ -4392,6 +4394,8 @@ static const struct hid_device_id hidpp_devices[] = { { /* MX5500 keyboard over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb30b), .driver_data = HIDPP_QUIRK_HIDPP_CONSUMER_VENDOR_KEYS }, + { /* Logitech G915 TKL keyboard over Bluetooth */ + HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb35f) }, { /* M-RCQ142 V470 Cordless Laser Mouse over Bluetooth */ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb008) }, { /* MX Master mouse over Bluetooth */ From 4921792e04f2125b5eadef9dbe9417a8354c7eff Mon Sep 17 00:00:00 2001 From: Lang Yu Date: Fri, 5 May 2023 20:14:15 +0800 Subject: [PATCH 388/513] drm/amdgpu: install stub fence into potential unused fence pointers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 187916e6ed9d0c3b3abc27429f7a5f8c936bd1f0 ] When using cpu to update page tables, vm update fences are unused. Install stub fence into these fence pointers instead of NULL to avoid NULL dereference when calling dma_fence_wait() on them. Suggested-by: Christian König Signed-off-by: Lang Yu Reviewed-by: Christian König Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 0e4554950e07..788611a50a68 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -2260,6 +2260,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, amdgpu_vm_bo_base_init(&bo_va->base, vm, bo); bo_va->ref_count = 1; + bo_va->last_pt_update = dma_fence_get_stub(); INIT_LIST_HEAD(&bo_va->valids); INIT_LIST_HEAD(&bo_va->invalids); @@ -2974,7 +2975,8 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->update_funcs = &amdgpu_vm_cpu_funcs; else vm->update_funcs = &amdgpu_vm_sdma_funcs; - vm->last_update = NULL; + + vm->last_update = dma_fence_get_stub(); vm->last_unlocked = dma_fence_get_stub(); mutex_init(&vm->eviction_lock); @@ -3117,7 +3119,7 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm) vm->update_funcs = &amdgpu_vm_sdma_funcs; } dma_fence_put(vm->last_update); - vm->last_update = NULL; + vm->last_update = dma_fence_get_stub(); vm->is_compute_context = true; /* Free the shadow bo for compute VM */ From 9631d88503abc1697cbd7ecf414bfaf36bcd7c65 Mon Sep 17 00:00:00 2001 From: Marco Morandini Date: Tue, 30 May 2023 15:40:08 +0200 Subject: [PATCH 389/513] HID: add quirk for 03f0:464a HP Elite Presenter Mouse [ Upstream commit 0db117359e47750d8bd310d19f13e1c4ef7fc26a ] HP Elite Presenter Mouse HID Record Descriptor shows two mouses (Repord ID 0x1 and 0x2), one keypad (Report ID 0x5), two Consumer Controls (Report IDs 0x6 and 0x3). Previous to this commit it registers one mouse, one keypad and one Consumer Control, and it was usable only as a digitl laser pointer (one of the two mouses). This patch defines the 464a USB device ID and enables the HID_QUIRK_MULTI_INPUT quirk for it, allowing to use the device both as a mouse and a digital laser pointer. Signed-off-by: Marco Morandini Signed-off-by: Jiri Kosina Signed-off-by: Sasha Levin --- drivers/hid/hid-ids.h | 1 + drivers/hid/hid-quirks.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 5daec769df7a..5fceefb3c707 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -593,6 +593,7 @@ #define USB_DEVICE_ID_UGCI_FIGHTING 0x0030 #define USB_VENDOR_ID_HP 0x03f0 +#define USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A 0x464a #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A 0x0a4a #define USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A 0x0b4a #define USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE 0x134a diff --git a/drivers/hid/hid-quirks.c b/drivers/hid/hid-quirks.c index c7c06aa958c4..96ca7d981ee2 100644 --- a/drivers/hid/hid-quirks.c +++ b/drivers/hid/hid-quirks.c @@ -96,6 +96,7 @@ static const struct hid_device_id hid_quirks[] = { { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A096), HID_QUIRK_NO_INIT_REPORTS }, { HID_USB_DEVICE(USB_VENDOR_ID_HOLTEK_ALT, USB_DEVICE_ID_HOLTEK_ALT_KEYBOARD_A293), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0A4A), HID_QUIRK_ALWAYS_POLL }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_ELITE_PRESENTER_MOUSE_464A), HID_QUIRK_MULTI_INPUT }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_LOGITECH_OEM_USB_OPTICAL_MOUSE_0B4A), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE), HID_QUIRK_ALWAYS_POLL }, { HID_USB_DEVICE(USB_VENDOR_ID_HP, USB_PRODUCT_ID_HP_PIXART_OEM_USB_OPTICAL_MOUSE_094A), HID_QUIRK_ALWAYS_POLL }, From 1a650d3ccd79cdd5796edd864683a6b8dd0bf576 Mon Sep 17 00:00:00 2001 From: Patrisious Haddad Date: Mon, 5 Jun 2023 13:14:07 +0300 Subject: [PATCH 390/513] RDMA/mlx5: Return the firmware result upon destroying QP/RQ [ Upstream commit 22664c06e997087fe37f9ba208008c948571214a ] Previously when destroying a QP/RQ, the result of the firmware destruction function was ignored and upper layers weren't informed about the failure. Which in turn could lead to various problems since when upper layer isn't aware of the failure it continues its operation thinking that the related QP/RQ was successfully destroyed while it actually wasn't, which could lead to the below kernel WARN. Currently, we return the correct firmware destruction status to upper layers which in case of the RQ would be mlx5_ib_destroy_wq() which was already capable of handling RQ destruction failure or in case of a QP to destroy_qp_common(), which now would actually warn upon qp destruction failure. WARNING: CPU: 3 PID: 995 at drivers/infiniband/core/rdma_core.c:940 uverbs_destroy_ufile_hw+0xcb/0xe0 [ib_uverbs] Modules linked in: xt_conntrack xt_MASQUERADE nf_conntrack_netlink nfnetlink xt_addrtype iptable_nat nf_nat br_netfilter rpcrdma rdma_ucm ib_iser libiscsi scsi_transport_iscsi rdma_cm ib_umad ib_ipoib iw_cm ib_cm mlx5_ib ib_uverbs ib_core overlay mlx5_core fuse CPU: 3 PID: 995 Comm: python3 Not tainted 5.16.0-rc5+ #1 Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.13.0-0-gf21b5a4aeb02-prebuilt.qemu.org 04/01/2014 RIP: 0010:uverbs_destroy_ufile_hw+0xcb/0xe0 [ib_uverbs] Code: 41 5c 41 5d 41 5e e9 44 34 f0 e0 48 89 df e8 4c 77 ff ff 49 8b 86 10 01 00 00 48 85 c0 74 a1 4c 89 e7 ff d0 eb 9a 0f 0b eb c1 <0f> 0b be 04 00 00 00 48 89 df e8 b6 f6 ff ff e9 75 ff ff ff 90 0f RSP: 0018:ffff8881533e3e78 EFLAGS: 00010287 RAX: ffff88811b2cf3e0 RBX: ffff888106209700 RCX: 0000000000000000 RDX: ffff888106209780 RSI: ffff8881533e3d30 RDI: ffff888109b101a0 RBP: 0000000000000001 R08: ffff888127cb381c R09: 0de9890000000009 R10: ffff888127cb3800 R11: 0000000000000000 R12: ffff888106209780 R13: ffff888106209750 R14: ffff888100f20660 R15: 0000000000000000 FS: 00007f8be353b740(0000) GS:ffff88852c980000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00007f8bd5b117c0 CR3: 000000012cd8a004 CR4: 0000000000370ea0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: ib_uverbs_close+0x1a/0x90 [ib_uverbs] __fput+0x82/0x230 task_work_run+0x59/0x90 exit_to_user_mode_prepare+0x138/0x140 syscall_exit_to_user_mode+0x1d/0x50 ? __x64_sys_close+0xe/0x40 do_syscall_64+0x4a/0x90 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x7f8be3ae0abb Code: 03 00 00 00 0f 05 48 3d 00 f0 ff ff 77 41 c3 48 83 ec 18 89 7c 24 0c e8 83 43 f9 ff 8b 7c 24 0c 41 89 c0 b8 03 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 35 44 89 c7 89 44 24 0c e8 c1 43 f9 ff 8b 44 RSP: 002b:00007ffdb51909c0 EFLAGS: 00000293 ORIG_RAX: 0000000000000003 RAX: 0000000000000000 RBX: 0000557bb7f7c020 RCX: 00007f8be3ae0abb RDX: 0000557bb7c74010 RSI: 0000557bb7f14ca0 RDI: 0000000000000005 RBP: 0000557bb7fbd598 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000293 R12: 0000557bb7fbd5b8 R13: 0000557bb7fbd5a8 R14: 0000000000001000 R15: 0000557bb7f7c020 Signed-off-by: Patrisious Haddad Link: https://lore.kernel.org/r/c6df677f931d18090bafbe7f7dbb9524047b7d9b.1685953497.git.leon@kernel.org Signed-off-by: Leon Romanovsky Signed-off-by: Sasha Levin --- drivers/infiniband/hw/mlx5/qpc.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/infiniband/hw/mlx5/qpc.c b/drivers/infiniband/hw/mlx5/qpc.c index 8844eacf2380..e508c0753dd3 100644 --- a/drivers/infiniband/hw/mlx5/qpc.c +++ b/drivers/infiniband/hw/mlx5/qpc.c @@ -297,8 +297,7 @@ int mlx5_core_destroy_qp(struct mlx5_ib_dev *dev, struct mlx5_core_qp *qp) MLX5_SET(destroy_qp_in, in, opcode, MLX5_CMD_OP_DESTROY_QP); MLX5_SET(destroy_qp_in, in, qpn, qp->qpn); MLX5_SET(destroy_qp_in, in, uid, qp->uid); - mlx5_cmd_exec_in(dev->mdev, destroy_qp, in); - return 0; + return mlx5_cmd_exec_in(dev->mdev, destroy_qp, in); } int mlx5_core_set_delay_drop(struct mlx5_ib_dev *dev, @@ -548,14 +547,14 @@ int mlx5_core_xrcd_dealloc(struct mlx5_ib_dev *dev, u32 xrcdn) return mlx5_cmd_exec_in(dev->mdev, dealloc_xrcd, in); } -static void destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid) +static int destroy_rq_tracked(struct mlx5_ib_dev *dev, u32 rqn, u16 uid) { u32 in[MLX5_ST_SZ_DW(destroy_rq_in)] = {}; MLX5_SET(destroy_rq_in, in, opcode, MLX5_CMD_OP_DESTROY_RQ); MLX5_SET(destroy_rq_in, in, rqn, rqn); MLX5_SET(destroy_rq_in, in, uid, uid); - mlx5_cmd_exec_in(dev->mdev, destroy_rq, in); + return mlx5_cmd_exec_in(dev->mdev, destroy_rq, in); } int mlx5_core_create_rq_tracked(struct mlx5_ib_dev *dev, u32 *in, int inlen, @@ -586,8 +585,7 @@ int mlx5_core_destroy_rq_tracked(struct mlx5_ib_dev *dev, struct mlx5_core_qp *rq) { destroy_resource_common(dev, rq); - destroy_rq_tracked(dev, rq->qpn, rq->uid); - return 0; + return destroy_rq_tracked(dev, rq->qpn, rq->uid); } static void destroy_sq_tracked(struct mlx5_ib_dev *dev, u32 sqn, u16 uid) From a3f252436e5701ae8a974869b7fe075a97e13472 Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Tue, 13 Jun 2023 10:13:37 +0200 Subject: [PATCH 391/513] ovl: check type and offset of struct vfsmount in ovl_entry [ Upstream commit f723edb8a532cd26e1ff0a2b271d73762d48f762 ] Porting overlayfs to the new amount api I started experiencing random crashes that couldn't be explained easily. So after much debugging and reasoning it became clear that struct ovl_entry requires the point to struct vfsmount to be the first member and of type struct vfsmount. During the port I added a new member at the beginning of struct ovl_entry which broke all over the place in the form of random crashes and cache corruptions. While there's a comment in ovl_free_fs() to the effect of "Hack! Reuse ofs->layers as a vfsmount array before freeing it" there's no such comment on struct ovl_entry which makes this easy to trip over. Add a comment and two static asserts for both the offset and the type of pointer in struct ovl_entry. Signed-off-by: Christian Brauner Signed-off-by: Amir Goldstein Signed-off-by: Sasha Levin --- fs/overlayfs/ovl_entry.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h index b2d64f3c974b..08031638bbee 100644 --- a/fs/overlayfs/ovl_entry.h +++ b/fs/overlayfs/ovl_entry.h @@ -32,6 +32,7 @@ struct ovl_sb { }; struct ovl_layer { + /* ovl_free_fs() relies on @mnt being the first member! */ struct vfsmount *mnt; /* Trap in ovl inode cache */ struct inode *trap; @@ -42,6 +43,14 @@ struct ovl_layer { int fsid; }; +/* + * ovl_free_fs() relies on @mnt being the first member when unmounting + * the private mounts created for each layer. Let's check both the + * offset and type. + */ +static_assert(offsetof(struct ovl_layer, mnt) == 0); +static_assert(__same_type(typeof_member(struct ovl_layer, mnt), struct vfsmount *)); + struct ovl_path { const struct ovl_layer *layer; struct dentry *dentry; From 9850867042674361f455ea8901375cff5b800be5 Mon Sep 17 00:00:00 2001 From: Paulo Alcantara Date: Mon, 19 Jun 2023 16:24:37 -0300 Subject: [PATCH 392/513] smb: client: fix warning in cifs_smb3_do_mount() [ Upstream commit 12c30f33cc6769bf411088a2872843c4f9ea32f9 ] This fixes the following warning reported by kernel test robot fs/smb/client/cifsfs.c:982 cifs_smb3_do_mount() warn: possible memory leak of 'cifs_sb' Link: https://lore.kernel.org/all/202306170124.CtQqzf0I-lkp@intel.com/ Signed-off-by: Paulo Alcantara (SUSE) Signed-off-by: Steve French Signed-off-by: Sasha Levin --- fs/cifs/cifsfs.c | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index c9481289266c..b5ae209539ff 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c @@ -848,11 +848,11 @@ struct dentry * cifs_smb3_do_mount(struct file_system_type *fs_type, int flags, struct smb3_fs_context *old_ctx) { - int rc; - struct super_block *sb = NULL; - struct cifs_sb_info *cifs_sb = NULL; struct cifs_mnt_data mnt_data; + struct cifs_sb_info *cifs_sb; + struct super_block *sb; struct dentry *root; + int rc; /* * Prints in Kernel / CIFS log the attempted mount operation @@ -863,11 +863,9 @@ cifs_smb3_do_mount(struct file_system_type *fs_type, else cifs_info("Attempting to mount %s\n", old_ctx->UNC); - cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); - if (cifs_sb == NULL) { - root = ERR_PTR(-ENOMEM); - goto out; - } + cifs_sb = kzalloc(sizeof(*cifs_sb), GFP_KERNEL); + if (!cifs_sb) + return ERR_PTR(-ENOMEM); cifs_sb->ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL); if (!cifs_sb->ctx) { @@ -910,10 +908,8 @@ cifs_smb3_do_mount(struct file_system_type *fs_type, sb = sget(fs_type, cifs_match_super, cifs_set_super, flags, &mnt_data); if (IS_ERR(sb)) { - root = ERR_CAST(sb); cifs_umount(cifs_sb); - cifs_sb = NULL; - goto out; + return ERR_CAST(sb); } if (sb->s_root) { @@ -944,13 +940,9 @@ cifs_smb3_do_mount(struct file_system_type *fs_type, deactivate_locked_super(sb); return root; out: - if (cifs_sb) { - if (!sb || IS_ERR(sb)) { /* otherwise kill_sb will handle */ - kfree(cifs_sb->prepath); - smb3_cleanup_fs_context(cifs_sb->ctx); - kfree(cifs_sb); - } - } + kfree(cifs_sb->prepath); + smb3_cleanup_fs_context(cifs_sb->ctx); + kfree(cifs_sb); return root; } From 1676748aa29099fc0abd71e0fb092e76e835f25c Mon Sep 17 00:00:00 2001 From: Yunfei Dong Date: Mon, 17 Apr 2023 16:17:40 +0800 Subject: [PATCH 393/513] media: v4l2-mem2mem: add lock to protect parameter num_rdy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 56b5c3e67b0f9af3f45cf393be048ee8d8a92694 ] Getting below error when using KCSAN to check the driver. Adding lock to protect parameter num_rdy when getting the value with function: v4l2_m2m_num_src_bufs_ready/v4l2_m2m_num_dst_bufs_ready. kworker/u16:3: [name:report&]BUG: KCSAN: data-race in v4l2_m2m_buf_queue kworker/u16:3: [name:report&] kworker/u16:3: [name:report&]read-write to 0xffffff8105f35b94 of 1 bytes by task 20865 on cpu 7: kworker/u16:3:  v4l2_m2m_buf_queue+0xd8/0x10c Signed-off-by: Pina Chen Signed-off-by: Yunfei Dong Signed-off-by: Hans Verkuil Signed-off-by: Sasha Levin --- include/media/v4l2-mem2mem.h | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h index 5a91b548ecc0..8d52c4506762 100644 --- a/include/media/v4l2-mem2mem.h +++ b/include/media/v4l2-mem2mem.h @@ -588,7 +588,14 @@ void v4l2_m2m_buf_queue(struct v4l2_m2m_ctx *m2m_ctx, static inline unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) { - return m2m_ctx->out_q_ctx.num_rdy; + unsigned int num_buf_rdy; + unsigned long flags; + + spin_lock_irqsave(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + num_buf_rdy = m2m_ctx->out_q_ctx.num_rdy; + spin_unlock_irqrestore(&m2m_ctx->out_q_ctx.rdy_spinlock, flags); + + return num_buf_rdy; } /** @@ -600,7 +607,14 @@ unsigned int v4l2_m2m_num_src_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) static inline unsigned int v4l2_m2m_num_dst_bufs_ready(struct v4l2_m2m_ctx *m2m_ctx) { - return m2m_ctx->cap_q_ctx.num_rdy; + unsigned int num_buf_rdy; + unsigned long flags; + + spin_lock_irqsave(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + num_buf_rdy = m2m_ctx->cap_q_ctx.num_rdy; + spin_unlock_irqrestore(&m2m_ctx->cap_q_ctx.rdy_spinlock, flags); + + return num_buf_rdy; } /** From cb174344bf85d29d3c53180ac75a55dfbb45a56e Mon Sep 17 00:00:00 2001 From: Prashanth K Date: Tue, 9 May 2023 18:57:52 +0530 Subject: [PATCH 394/513] usb: gadget: u_serial: Avoid spinlock recursion in __gs_console_push [ Upstream commit e5990469943c711cb00bfde6338d2add6c6d0bfe ] When serial console over USB is enabled, gs_console_connect queues gs_console_work, where it acquires the spinlock and queues the usb request, and this request goes to gadget layer. Now consider a situation where gadget layer prints something to dmesg, this will eventually call gs_console_write() which requires cons->lock. And this causes spinlock recursion. Avoid this by excluding usb_ep_queue from the spinlock. spin_lock_irqsave //needs cons->lock gs_console_write . . _printk __warn_printk dev_warn/pr_err . . [USB Gadget Layer] . . usb_ep_queue gs_console_work __gs_console_push // acquires cons->lock process_one_work Signed-off-by: Prashanth K Link: https://lore.kernel.org/r/1683638872-6885-1-git-send-email-quic_prashk@quicinc.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/usb/gadget/function/u_serial.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index a8d1e8b192c5..f975dc03a190 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -915,8 +915,11 @@ static void __gs_console_push(struct gs_console *cons) } req->length = size; + + spin_unlock_irq(&cons->lock); if (usb_ep_queue(ep, req, GFP_ATOMIC)) req->length = 0; + spin_lock_irq(&cons->lock); } static void gs_console_work(struct work_struct *work) From 776b34615a29551d69d82a0082e7319d5ea284bd Mon Sep 17 00:00:00 2001 From: Hans Verkuil Date: Wed, 24 May 2023 13:11:47 +0100 Subject: [PATCH 395/513] media: platform: mediatek: vpu: fix NULL ptr dereference [ Upstream commit 3df55cd773e8603b623425cc97b05e542854ad27 ] If pdev is NULL, then it is still dereferenced. This fixes this smatch warning: drivers/media/platform/mediatek/vpu/mtk_vpu.c:570 vpu_load_firmware() warn: address of NULL pointer 'pdev' Signed-off-by: Hans Verkuil Cc: Yunfei Dong Signed-off-by: Mauro Carvalho Chehab Signed-off-by: Sasha Levin --- drivers/media/platform/mtk-vpu/mtk_vpu.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/media/platform/mtk-vpu/mtk_vpu.c b/drivers/media/platform/mtk-vpu/mtk_vpu.c index 7f1647da0ade..af59cc52fdd7 100644 --- a/drivers/media/platform/mtk-vpu/mtk_vpu.c +++ b/drivers/media/platform/mtk-vpu/mtk_vpu.c @@ -562,15 +562,17 @@ static int load_requested_vpu(struct mtk_vpu *vpu, int vpu_load_firmware(struct platform_device *pdev) { struct mtk_vpu *vpu; - struct device *dev = &pdev->dev; + struct device *dev; struct vpu_run *run; int ret; if (!pdev) { - dev_err(dev, "VPU platform device is invalid\n"); + pr_err("VPU platform device is invalid\n"); return -EINVAL; } + dev = &pdev->dev; + vpu = platform_get_drvdata(pdev); run = &vpu->run; From 25decbbb66e48f108c8bd7bdac070b42b0e1a24b Mon Sep 17 00:00:00 2001 From: Mika Westerberg Date: Fri, 26 May 2023 14:46:44 +0300 Subject: [PATCH 396/513] thunderbolt: Read retimer NVM authentication status prior tb_retimer_set_inbound_sbtx() [ Upstream commit 1402ba08abae5cfa583ff1a40b99c098a0532d41 ] According to the USB4 retimer guide the correct order is immediately after sending ENUMERATE_RETIMERS so update the code to follow this. Signed-off-by: Mika Westerberg Signed-off-by: Sasha Levin --- drivers/thunderbolt/retimer.c | 29 +++++++++++++++++++++-------- 1 file changed, 21 insertions(+), 8 deletions(-) diff --git a/drivers/thunderbolt/retimer.c b/drivers/thunderbolt/retimer.c index 566c03105fb8..1b7ab0bbd132 100644 --- a/drivers/thunderbolt/retimer.c +++ b/drivers/thunderbolt/retimer.c @@ -208,6 +208,21 @@ static ssize_t nvm_authenticate_show(struct device *dev, return ret; } +static void tb_retimer_nvm_authenticate_status(struct tb_port *port, u32 *status) +{ + int i; + + tb_port_dbg(port, "reading NVM authentication status of retimers\n"); + + /* + * Before doing anything else, read the authentication status. + * If the retimer has it set, store it for the new retimer + * device instance. + */ + for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) + usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]); +} + static void tb_retimer_set_inbound_sbtx(struct tb_port *port) { int i; @@ -481,18 +496,16 @@ int tb_retimer_scan(struct tb_port *port, bool add) return ret; /* - * Enable sideband channel for each retimer. We can do this - * regardless whether there is device connected or not. + * Immediately after sending enumerate retimers read the + * authentication status of each retimer. */ - tb_retimer_set_inbound_sbtx(port); + tb_retimer_nvm_authenticate_status(port, status); /* - * Before doing anything else, read the authentication status. - * If the retimer has it set, store it for the new retimer - * device instance. + * Enable sideband channel for each retimer. We can do this + * regardless whether there is device connected or not. */ - for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) - usb4_port_retimer_nvm_authenticate_status(port, i, &status[i]); + tb_retimer_set_inbound_sbtx(port); for (i = 1; i <= TB_MAX_RETIMER_INDEX; i++) { /* From 7bdb4c96304937b865dc7d8142f71802d758907b Mon Sep 17 00:00:00 2001 From: Xu Yang Date: Tue, 30 May 2023 18:40:07 +0800 Subject: [PATCH 397/513] usb: chipidea: imx: don't request QoS for imx8ulp [ Upstream commit 9a070e8e208995a9d638b538ed7abf28bd6ea6f0 ] Use dedicated imx8ulp usb compatible to remove QoS request since imx8ulp has no such limitation of imx7ulp: DMA will not work if system enters idle. Signed-off-by: Xu Yang Signed-off-by: Li Jun Acked-by: Peter Chen Message-ID: <20230530104007.1294702-2-xu.yang_2@nxp.com> Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/usb/chipidea/ci_hdrc_imx.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index 097142ffb184..669e7606651a 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -70,6 +70,10 @@ static const struct ci_hdrc_imx_platform_flag imx7ulp_usb_data = { CI_HDRC_PMQOS, }; +static const struct ci_hdrc_imx_platform_flag imx8ulp_usb_data = { + .flags = CI_HDRC_SUPPORTS_RUNTIME_PM, +}; + static const struct of_device_id ci_hdrc_imx_dt_ids[] = { { .compatible = "fsl,imx23-usb", .data = &imx23_usb_data}, { .compatible = "fsl,imx28-usb", .data = &imx28_usb_data}, @@ -80,6 +84,7 @@ static const struct of_device_id ci_hdrc_imx_dt_ids[] = { { .compatible = "fsl,imx6ul-usb", .data = &imx6ul_usb_data}, { .compatible = "fsl,imx7d-usb", .data = &imx7d_usb_data}, { .compatible = "fsl,imx7ulp-usb", .data = &imx7ulp_usb_data}, + { .compatible = "fsl,imx8ulp-usb", .data = &imx8ulp_usb_data}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ci_hdrc_imx_dt_ids); From eafb79d2448a027deb30bc0a486028ed190a9f7a Mon Sep 17 00:00:00 2001 From: Xu Yang Date: Wed, 17 May 2023 16:19:07 +0800 Subject: [PATCH 398/513] usb: chipidea: imx: add missing USB PHY DPDM wakeup setting [ Upstream commit 53d061c19dc4cb68409df6dc11c40389c8c42a75 ] USB PHY DPDM wakeup bit is enabled by default, when USB wakeup is not required(/sys/.../wakeup is disabled), this bit should be disabled, otherwise we will have unexpected wakeup if do USB device connect/disconnect while system sleep. This bit can be enabled for both host and device mode. Signed-off-by: Li Jun Signed-off-by: Xu Yang Acked-by: Peter Chen Message-ID: <20230517081907.3410465-3-xu.yang_2@nxp.com> Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/usb/chipidea/usbmisc_imx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index bac0f5458cab..2318c7906acd 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c @@ -135,7 +135,7 @@ #define TXVREFTUNE0_MASK (0xf << 20) #define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \ - MX6_BM_ID_WAKEUP) + MX6_BM_ID_WAKEUP | MX6SX_BM_DPDM_WAKEUP_EN) struct usbmisc_ops { /* It's called once when probe a usb device */ From 85e888150075cb221270b64bf772341fc6bd11d9 Mon Sep 17 00:00:00 2001 From: Tuo Li Date: Tue, 13 Jun 2023 11:06:37 +0800 Subject: [PATCH 399/513] gfs2: Fix possible data races in gfs2_show_options() [ Upstream commit 6fa0a72cbbe45db4ed967a51f9e6f4e3afe61d20 ] Some fields such as gt_logd_secs of the struct gfs2_tune are accessed without holding the lock gt_spin in gfs2_show_options(): val = sdp->sd_tune.gt_logd_secs; if (val != 30) seq_printf(s, ",commit=%d", val); And thus can cause data races when gfs2_show_options() and other functions such as gfs2_reconfigure() are concurrently executed: spin_lock(>->gt_spin); gt->gt_logd_secs = newargs->ar_commit; To fix these possible data races, the lock sdp->sd_tune.gt_spin is acquired before accessing the fields of gfs2_tune and released after these accesses. Further changes by Andreas: - Don't hold the spin lock over the seq_printf operations. Reported-by: BassCheck Signed-off-by: Tuo Li Signed-off-by: Andreas Gruenbacher Signed-off-by: Sasha Levin --- fs/gfs2/super.c | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c index ca6ee1cbccd5..51b44da4a0d6 100644 --- a/fs/gfs2/super.c +++ b/fs/gfs2/super.c @@ -980,7 +980,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root) { struct gfs2_sbd *sdp = root->d_sb->s_fs_info; struct gfs2_args *args = &sdp->sd_args; - int val; + unsigned int logd_secs, statfs_slow, statfs_quantum, quota_quantum; + + spin_lock(&sdp->sd_tune.gt_spin); + logd_secs = sdp->sd_tune.gt_logd_secs; + quota_quantum = sdp->sd_tune.gt_quota_quantum; + statfs_quantum = sdp->sd_tune.gt_statfs_quantum; + statfs_slow = sdp->sd_tune.gt_statfs_slow; + spin_unlock(&sdp->sd_tune.gt_spin); if (is_ancestor(root, sdp->sd_master_dir)) seq_puts(s, ",meta"); @@ -1035,17 +1042,14 @@ static int gfs2_show_options(struct seq_file *s, struct dentry *root) } if (args->ar_discard) seq_puts(s, ",discard"); - val = sdp->sd_tune.gt_logd_secs; - if (val != 30) - seq_printf(s, ",commit=%d", val); - val = sdp->sd_tune.gt_statfs_quantum; - if (val != 30) - seq_printf(s, ",statfs_quantum=%d", val); - else if (sdp->sd_tune.gt_statfs_slow) + if (logd_secs != 30) + seq_printf(s, ",commit=%d", logd_secs); + if (statfs_quantum != 30) + seq_printf(s, ",statfs_quantum=%d", statfs_quantum); + else if (statfs_slow) seq_puts(s, ",statfs_quantum=0"); - val = sdp->sd_tune.gt_quota_quantum; - if (val != 60) - seq_printf(s, ",quota_quantum=%d", val); + if (quota_quantum != 60) + seq_printf(s, ",quota_quantum=%d", quota_quantum); if (args->ar_statfs_percent) seq_printf(s, ",statfs_percent=%d", args->ar_statfs_percent); if (args->ar_errors != GFS2_ERRORS_DEFAULT) { From 97fd1c8e9c5aa833aab7e836760bc13103afa892 Mon Sep 17 00:00:00 2001 From: Armin Wolf Date: Fri, 12 May 2023 20:45:29 +0200 Subject: [PATCH 400/513] pcmcia: rsrc_nonstatic: Fix memory leak in nonstatic_release_resource_db() [ Upstream commit c85fd9422fe0f5d667305efb27f56d09eab120b0 ] When nonstatic_release_resource_db() frees all resources associated with an PCMCIA socket, it forgets to free socket_data too, causing a memory leak observable with kmemleak: unreferenced object 0xc28d1000 (size 64): comm "systemd-udevd", pid 297, jiffies 4294898478 (age 194.484s) hex dump (first 32 bytes): 00 00 00 00 00 00 00 00 f0 85 0e c3 00 00 00 00 ................ 00 00 00 00 0c 10 8d c2 00 00 00 00 00 00 00 00 ................ backtrace: [] __kmem_cache_alloc_node+0x2d7/0x4a0 [<7e51f0c8>] kmalloc_trace+0x31/0xa4 [] nonstatic_init+0x24/0x1a4 [pcmcia_rsrc] [] pcmcia_register_socket+0x200/0x35c [pcmcia_core] [] yenta_probe+0x4d8/0xa70 [yenta_socket] [] pci_device_probe+0x99/0x194 [<84b7c690>] really_probe+0x181/0x45c [<8060fe6e>] __driver_probe_device+0x75/0x1f4 [] driver_probe_device+0x28/0xac [<648b766f>] __driver_attach+0xeb/0x1e4 [<6e9659eb>] bus_for_each_dev+0x61/0xb4 [<25a669f3>] driver_attach+0x1e/0x28 [] bus_add_driver+0x102/0x20c [] driver_register+0x5b/0x120 [<942cd8a4>] __pci_register_driver+0x44/0x4c [] __UNIQUE_ID___addressable_cleanup_module188+0x1c/0xfffff000 [iTCO_vendor_support] Fix this by freeing socket_data too. Tested on a Acer Travelmate 4002WLMi by manually binding/unbinding the yenta_cardbus driver (yenta_socket). Signed-off-by: Armin Wolf Message-ID: <20230512184529.5094-1-W_Armin@gmx.de> Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/pcmcia/rsrc_nonstatic.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c index 1cac52870711..e6c90c0bb764 100644 --- a/drivers/pcmcia/rsrc_nonstatic.c +++ b/drivers/pcmcia/rsrc_nonstatic.c @@ -1053,6 +1053,8 @@ static void nonstatic_release_resource_db(struct pcmcia_socket *s) q = p->next; kfree(p); } + + kfree(data); } From 2ea70379e4f4efa95c9daa7f3f9bdd4d40aec927 Mon Sep 17 00:00:00 2001 From: Zhang Shurong Date: Fri, 23 Jun 2023 13:39:35 +0800 Subject: [PATCH 401/513] firewire: net: fix use after free in fwnet_finish_incoming_packet() [ Upstream commit 3ff256751a2853e1ffaa36958ff933ccc98c6cb5 ] The netif_rx() function frees the skb so we can't dereference it to save the skb->len. Signed-off-by: Zhang Shurong Link: https://lore.kernel.org/r/tencent_3B3D24B66ED66A6BB73CC0E63C6A14E45109@qq.com Signed-off-by: Takashi Sakamoto Signed-off-by: Sasha Levin --- drivers/firewire/net.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 4c3fd2eed1da..beba0a56bb9a 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -488,7 +488,7 @@ static int fwnet_finish_incoming_packet(struct net_device *net, struct sk_buff *skb, u16 source_node_id, bool is_broadcast, u16 ether_type) { - int status; + int status, len; switch (ether_type) { case ETH_P_ARP: @@ -542,13 +542,15 @@ static int fwnet_finish_incoming_packet(struct net_device *net, } skb->protocol = protocol; } + + len = skb->len; status = netif_rx(skb); if (status == NET_RX_DROP) { net->stats.rx_errors++; net->stats.rx_dropped++; } else { net->stats.rx_packets++; - net->stats.rx_bytes += skb->len; + net->stats.rx_bytes += len; } return 0; From 302d04596364e073351bdcfe91bdeeb3ebd64242 Mon Sep 17 00:00:00 2001 From: Yuechao Zhao Date: Mon, 12 Jun 2023 11:19:07 +0800 Subject: [PATCH 402/513] watchdog: sp5100_tco: support Hygon FCH/SCH (Server Controller Hub) [ Upstream commit 009637de1f65cff452ad49554d1e8ef9fda99e43 ] Add PCI_VENDOR_ID_HYGON(Hygon vendor id [0x1d94]) in this driver Signed-off-by: Yuechao Zhao Reviewed-by: Guenter Roeck Link: https://lkml.kernel.org/r/20230612031907.796461-1-a345351830@gmail.com Signed-off-by: Guenter Roeck Signed-off-by: Wim Van Sebroeck Signed-off-by: Sasha Levin --- drivers/watchdog/sp5100_tco.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/watchdog/sp5100_tco.c b/drivers/watchdog/sp5100_tco.c index 1e327fb1ad20..0141858188c5 100644 --- a/drivers/watchdog/sp5100_tco.c +++ b/drivers/watchdog/sp5100_tco.c @@ -89,7 +89,7 @@ static enum tco_reg_layout tco_reg_layout(struct pci_dev *dev) sp5100_tco_pci->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS && sp5100_tco_pci->revision >= AMD_ZEN_SMBUS_PCI_REV) { return efch_mmio; - } else if (dev->vendor == PCI_VENDOR_ID_AMD && + } else if ((dev->vendor == PCI_VENDOR_ID_AMD || dev->vendor == PCI_VENDOR_ID_HYGON) && ((dev->device == PCI_DEVICE_ID_AMD_HUDSON2_SMBUS && dev->revision >= 0x41) || (dev->device == PCI_DEVICE_ID_AMD_KERNCZ_SMBUS && @@ -561,6 +561,8 @@ static const struct pci_device_id sp5100_tco_pci_tbl[] = { PCI_ANY_ID, }, { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, PCI_ANY_ID, PCI_ANY_ID, }, + { PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_KERNCZ_SMBUS, PCI_ANY_ID, + PCI_ANY_ID, }, { 0, }, /* End of list */ }; MODULE_DEVICE_TABLE(pci, sp5100_tco_pci_tbl); From 548a6b64b3c0688f01119a6fcccceb41f8c984e4 Mon Sep 17 00:00:00 2001 From: Zhengping Jiang Date: Wed, 24 May 2023 17:04:15 -0700 Subject: [PATCH 403/513] Bluetooth: L2CAP: Fix use-after-free [ Upstream commit f752a0b334bb95fe9b42ecb511e0864e2768046f ] Fix potential use-after-free in l2cap_le_command_rej. Signed-off-by: Zhengping Jiang Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- net/bluetooth/l2cap_core.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 9dd54247029a..0770286ecf0b 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -6375,9 +6375,14 @@ static inline int l2cap_le_command_rej(struct l2cap_conn *conn, if (!chan) goto done; + chan = l2cap_chan_hold_unless_zero(chan); + if (!chan) + goto done; + l2cap_chan_lock(chan); l2cap_chan_del(chan, ECONNREFUSED); l2cap_chan_unlock(chan); + l2cap_chan_put(chan); done: mutex_unlock(&conn->chan_lock); From 5720c7e185d4b421cb317feb73b36896e0751272 Mon Sep 17 00:00:00 2001 From: Matthew Anderson Date: Sat, 24 Jun 2023 12:08:10 -0500 Subject: [PATCH 404/513] Bluetooth: btusb: Add MT7922 bluetooth ID for the Asus Ally [ Upstream commit fa01eba11f0e57c767a5eab5291c7a01407a00be ] Adding the device ID from the Asus Ally gets the bluetooth working on the device. Signed-off-by: Matthew Anderson Signed-off-by: Luiz Augusto von Dentz Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/bluetooth/btusb.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 9eb2267bd3a0..15d253325fd8 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -475,6 +475,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, From ef568da1fd843581e855c79a368209b752dea2c1 Mon Sep 17 00:00:00 2001 From: shanzhulig Date: Tue, 27 Jun 2023 18:10:47 -0700 Subject: [PATCH 405/513] drm/amdgpu: Fix potential fence use-after-free v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 2e54154b9f27262efd0cb4f903cc7d5ad1fe9628 ] fence Decrements the reference count before exiting. Avoid Race Vulnerabilities for fence use-after-free. v2 (chk): actually fix the use after free and not just move it. Signed-off-by: shanzhulig Signed-off-by: Christian König Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 2fd4d8ad7e40..4b01188385b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1541,15 +1541,15 @@ static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, continue; r = dma_fence_wait_timeout(fence, true, timeout); + if (r > 0 && fence->error) + r = fence->error; + dma_fence_put(fence); if (r < 0) return r; if (r == 0) break; - - if (fence->error) - return fence->error; } memset(wait, 0, sizeof(*wait)); From e7799bb4dbe26bfb665f29ea87981708fd6012d8 Mon Sep 17 00:00:00 2001 From: Edward Lo Date: Thu, 16 Mar 2023 10:56:55 +0800 Subject: [PATCH 406/513] fs/ntfs3: Enhance sanity check while generating attr_list [ Upstream commit fdec309c7672cbee4dc0229ee4cbb33c948a1bdd ] ni_create_attr_list uses WARN_ON to catch error cases while generating attribute list, which only prints out stack trace and may not be enough. This repalces them with more proper error handling flow. [ 59.666332] BUG: kernel NULL pointer dereference, address: 000000000000000e [ 59.673268] #PF: supervisor read access in kernel mode [ 59.678354] #PF: error_code(0x0000) - not-present page [ 59.682831] PGD 8000000005ff1067 P4D 8000000005ff1067 PUD 7dee067 PMD 0 [ 59.688556] Oops: 0000 [#1] PREEMPT SMP KASAN PTI [ 59.692642] CPU: 0 PID: 198 Comm: poc Tainted: G B W 6.2.0-rc1+ #4 [ 59.698868] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 [ 59.708795] RIP: 0010:ni_create_attr_list+0x505/0x860 [ 59.713657] Code: 7e 10 e8 5e d0 d0 ff 45 0f b7 76 10 48 8d 7b 16 e8 00 d1 d0 ff 66 44 89 73 16 4d 8d 75 0e 4c 89 f7 e8 3f d0 d0 ff 4c 8d8 [ 59.731559] RSP: 0018:ffff88800a56f1e0 EFLAGS: 00010282 [ 59.735691] RAX: 0000000000000001 RBX: ffff88800b7b5088 RCX: ffffffffb83079fe [ 59.741792] RDX: 0000000000000001 RSI: 0000000000000008 RDI: ffffffffbb7f9fc0 [ 59.748423] RBP: ffff88800a56f3a8 R08: ffff88800b7b50a0 R09: fffffbfff76ff3f9 [ 59.754654] R10: ffffffffbb7f9fc7 R11: fffffbfff76ff3f8 R12: ffff88800b756180 [ 59.761552] R13: 0000000000000000 R14: 000000000000000e R15: 0000000000000050 [ 59.768323] FS: 00007feaa8c96440(0000) GS:ffff88806d400000(0000) knlGS:0000000000000000 [ 59.776027] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 59.781395] CR2: 00007f3a2e0b1000 CR3: 000000000a5bc000 CR4: 00000000000006f0 [ 59.787607] Call Trace: [ 59.790271] [ 59.792488] ? __pfx_ni_create_attr_list+0x10/0x10 [ 59.797235] ? kernel_text_address+0xd3/0xe0 [ 59.800856] ? unwind_get_return_address+0x3e/0x60 [ 59.805101] ? __kasan_check_write+0x18/0x20 [ 59.809296] ? preempt_count_sub+0x1c/0xd0 [ 59.813421] ni_ins_attr_ext+0x52c/0x5c0 [ 59.817034] ? __pfx_ni_ins_attr_ext+0x10/0x10 [ 59.821926] ? __vfs_setxattr+0x121/0x170 [ 59.825718] ? __vfs_setxattr_noperm+0x97/0x300 [ 59.829562] ? __vfs_setxattr_locked+0x145/0x170 [ 59.833987] ? vfs_setxattr+0x137/0x2a0 [ 59.836732] ? do_setxattr+0xce/0x150 [ 59.839807] ? setxattr+0x126/0x140 [ 59.842353] ? path_setxattr+0x164/0x180 [ 59.845275] ? __x64_sys_setxattr+0x71/0x90 [ 59.848838] ? do_syscall_64+0x3f/0x90 [ 59.851898] ? entry_SYSCALL_64_after_hwframe+0x72/0xdc [ 59.857046] ? stack_depot_save+0x17/0x20 [ 59.860299] ni_insert_attr+0x1ba/0x420 [ 59.863104] ? __pfx_ni_insert_attr+0x10/0x10 [ 59.867069] ? preempt_count_sub+0x1c/0xd0 [ 59.869897] ? _raw_spin_unlock_irqrestore+0x2b/0x50 [ 59.874088] ? __create_object+0x3ae/0x5d0 [ 59.877865] ni_insert_resident+0xc4/0x1c0 [ 59.881430] ? __pfx_ni_insert_resident+0x10/0x10 [ 59.886355] ? kasan_save_alloc_info+0x1f/0x30 [ 59.891117] ? __kasan_kmalloc+0x8b/0xa0 [ 59.894383] ntfs_set_ea+0x90d/0xbf0 [ 59.897703] ? __pfx_ntfs_set_ea+0x10/0x10 [ 59.901011] ? kernel_text_address+0xd3/0xe0 [ 59.905308] ? __kernel_text_address+0x16/0x50 [ 59.909811] ? unwind_get_return_address+0x3e/0x60 [ 59.914898] ? __pfx_stack_trace_consume_entry+0x10/0x10 [ 59.920250] ? arch_stack_walk+0xa2/0x100 [ 59.924560] ? filter_irq_stacks+0x27/0x80 [ 59.928722] ntfs_setxattr+0x405/0x440 [ 59.932512] ? __pfx_ntfs_setxattr+0x10/0x10 [ 59.936634] ? kvmalloc_node+0x2d/0x120 [ 59.940378] ? kasan_save_stack+0x41/0x60 [ 59.943870] ? kasan_save_stack+0x2a/0x60 [ 59.947719] ? kasan_set_track+0x29/0x40 [ 59.951417] ? kasan_save_alloc_info+0x1f/0x30 [ 59.955733] ? __kasan_kmalloc+0x8b/0xa0 [ 59.959598] ? __kmalloc_node+0x68/0x150 [ 59.963163] ? kvmalloc_node+0x2d/0x120 [ 59.966490] ? vmemdup_user+0x2b/0xa0 [ 59.969060] __vfs_setxattr+0x121/0x170 [ 59.972456] ? __pfx___vfs_setxattr+0x10/0x10 [ 59.976008] __vfs_setxattr_noperm+0x97/0x300 [ 59.981562] __vfs_setxattr_locked+0x145/0x170 [ 59.986100] vfs_setxattr+0x137/0x2a0 [ 59.989964] ? __pfx_vfs_setxattr+0x10/0x10 [ 59.993616] ? __kasan_check_write+0x18/0x20 [ 59.997425] do_setxattr+0xce/0x150 [ 60.000304] setxattr+0x126/0x140 [ 60.002967] ? __pfx_setxattr+0x10/0x10 [ 60.006471] ? __virt_addr_valid+0xcb/0x140 [ 60.010461] ? __call_rcu_common.constprop.0+0x1c7/0x330 [ 60.016037] ? debug_smp_processor_id+0x1b/0x30 [ 60.021008] ? kasan_quarantine_put+0x5b/0x190 [ 60.025545] ? putname+0x84/0xa0 [ 60.027910] ? __kasan_slab_free+0x11e/0x1b0 [ 60.031483] ? putname+0x84/0xa0 [ 60.033986] ? preempt_count_sub+0x1c/0xd0 [ 60.036876] ? __mnt_want_write+0xae/0x100 [ 60.040738] ? mnt_want_write+0x8f/0x150 [ 60.044317] path_setxattr+0x164/0x180 [ 60.048096] ? __pfx_path_setxattr+0x10/0x10 [ 60.052096] ? strncpy_from_user+0x175/0x1c0 [ 60.056482] ? debug_smp_processor_id+0x1b/0x30 [ 60.059848] ? fpregs_assert_state_consistent+0x6b/0x80 [ 60.064557] __x64_sys_setxattr+0x71/0x90 [ 60.068892] do_syscall_64+0x3f/0x90 [ 60.072868] entry_SYSCALL_64_after_hwframe+0x72/0xdc [ 60.077523] RIP: 0033:0x7feaa86e4469 [ 60.080915] Code: 00 f3 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 088 [ 60.097353] RSP: 002b:00007ffdbd8311e8 EFLAGS: 00000286 ORIG_RAX: 00000000000000bc [ 60.103386] RAX: ffffffffffffffda RBX: 9461c5e290baac00 RCX: 00007feaa86e4469 [ 60.110322] RDX: 00007ffdbd831fe0 RSI: 00007ffdbd831305 RDI: 00007ffdbd831263 [ 60.116808] RBP: 00007ffdbd836180 R08: 0000000000000001 R09: 00007ffdbd836268 [ 60.123879] R10: 000000000000007d R11: 0000000000000286 R12: 0000000000400500 [ 60.130540] R13: 00007ffdbd836260 R14: 0000000000000000 R15: 0000000000000000 [ 60.136553] [ 60.138818] Modules linked in: [ 60.141839] CR2: 000000000000000e [ 60.144831] ---[ end trace 0000000000000000 ]--- [ 60.149058] RIP: 0010:ni_create_attr_list+0x505/0x860 [ 60.153975] Code: 7e 10 e8 5e d0 d0 ff 45 0f b7 76 10 48 8d 7b 16 e8 00 d1 d0 ff 66 44 89 73 16 4d 8d 75 0e 4c 89 f7 e8 3f d0 d0 ff 4c 8d8 [ 60.172443] RSP: 0018:ffff88800a56f1e0 EFLAGS: 00010282 [ 60.176246] RAX: 0000000000000001 RBX: ffff88800b7b5088 RCX: ffffffffb83079fe [ 60.182752] RDX: 0000000000000001 RSI: 0000000000000008 RDI: ffffffffbb7f9fc0 [ 60.189949] RBP: ffff88800a56f3a8 R08: ffff88800b7b50a0 R09: fffffbfff76ff3f9 [ 60.196950] R10: ffffffffbb7f9fc7 R11: fffffbfff76ff3f8 R12: ffff88800b756180 [ 60.203671] R13: 0000000000000000 R14: 000000000000000e R15: 0000000000000050 [ 60.209595] FS: 00007feaa8c96440(0000) GS:ffff88806d400000(0000) knlGS:0000000000000000 [ 60.216299] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 60.222276] CR2: 00007f3a2e0b1000 CR3: 000000000a5bc000 CR4: 00000000000006f0 Signed-off-by: Edward Lo Signed-off-by: Konstantin Komarov Signed-off-by: Sasha Levin --- fs/ntfs3/frecord.c | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/fs/ntfs3/frecord.c b/fs/ntfs3/frecord.c index d24e12d348d4..9a1744955d1c 100644 --- a/fs/ntfs3/frecord.c +++ b/fs/ntfs3/frecord.c @@ -849,6 +849,7 @@ int ni_create_attr_list(struct ntfs_inode *ni) if (err) goto out1; + err = -EINVAL; /* Call mi_remove_attr() in reverse order to keep pointers 'arr_move' valid. */ while (to_free > 0) { struct ATTRIB *b = arr_move[--nb]; @@ -857,7 +858,8 @@ int ni_create_attr_list(struct ntfs_inode *ni) attr = mi_insert_attr(mi, b->type, Add2Ptr(b, name_off), b->name_len, asize, name_off); - WARN_ON(!attr); + if (!attr) + goto out1; mi_get_ref(mi, &le_b[nb]->ref); le_b[nb]->id = attr->id; @@ -867,17 +869,20 @@ int ni_create_attr_list(struct ntfs_inode *ni) attr->id = le_b[nb]->id; /* Remove from primary record. */ - WARN_ON(!mi_remove_attr(NULL, &ni->mi, b)); + if (!mi_remove_attr(NULL, &ni->mi, b)) + goto out1; if (to_free <= asize) break; to_free -= asize; - WARN_ON(!nb); + if (!nb) + goto out1; } attr = mi_insert_attr(&ni->mi, ATTR_LIST, NULL, 0, lsize + SIZEOF_RESIDENT, SIZEOF_RESIDENT); - WARN_ON(!attr); + if (!attr) + goto out1; attr->non_res = 0; attr->flags = 0; @@ -897,9 +902,10 @@ int ni_create_attr_list(struct ntfs_inode *ni) kfree(ni->attr_list.le); ni->attr_list.le = NULL; ni->attr_list.size = 0; + return err; out: - return err; + return 0; } /* From 3a00ec562f8cb4c926583f1a18386c830ce3381f Mon Sep 17 00:00:00 2001 From: Jia-Ju Bai Date: Tue, 21 Mar 2023 21:22:11 +0800 Subject: [PATCH 407/513] fs: ntfs3: Fix possible null-pointer dereferences in mi_read() [ Upstream commit 97498cd610c0d030a7bd49a7efad974790661162 ] In a previous commit 2681631c2973 ("fs/ntfs3: Add null pointer check to attr_load_runs_vcn"), ni can be NULL in attr_load_runs_vcn(), and thus it should be checked before being used. However, in the call stack of this commit, mft_ni in mi_read() is aliased with ni in attr_load_runs_vcn(), and it is also used in mi_read() at two places: mi_read() rw_lock = &mft_ni->file.run_lock -> No check attr_load_runs_vcn(mft_ni, ...) ni (namely mft_ni) is checked in the previous commit attr_load_runs_vcn(..., &mft_ni->file.run) -> No check Thus, to avoid possible null-pointer dereferences, the related checks should be added. These bugs are reported by a static analysis tool implemented by myself, and they are found by extending a known bug fixed in the previous commit. Thus, they could be theoretical bugs. Signed-off-by: Jia-Ju Bai Signed-off-by: Konstantin Komarov Signed-off-by: Sasha Levin --- fs/ntfs3/record.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c index 41f6e578966b..3d222b1c8f03 100644 --- a/fs/ntfs3/record.c +++ b/fs/ntfs3/record.c @@ -124,7 +124,7 @@ int mi_read(struct mft_inode *mi, bool is_mft) struct rw_semaphore *rw_lock = NULL; if (is_mounted(sbi)) { - if (!is_mft) { + if (!is_mft && mft_ni) { rw_lock = &mft_ni->file.run_lock; down_read(rw_lock); } @@ -148,7 +148,7 @@ int mi_read(struct mft_inode *mi, bool is_mft) ni_lock(mft_ni); down_write(rw_lock); } - err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, &mft_ni->file.run, + err = attr_load_runs_vcn(mft_ni, ATTR_DATA, NULL, 0, run, vbo >> sbi->cluster_bits); if (rw_lock) { up_write(rw_lock); From c7d8b5f46f06faad683dae416540d1ac6c5964e4 Mon Sep 17 00:00:00 2001 From: Konstantin Komarov Date: Mon, 8 May 2023 11:36:28 +0400 Subject: [PATCH 408/513] fs/ntfs3: Mark ntfs dirty when on-disk struct is corrupted [ Upstream commit e0f363a98830e8d7d70fbaf91c07ae0b7c57aafe ] Signed-off-by: Konstantin Komarov Signed-off-by: Sasha Levin --- fs/ntfs3/fsntfs.c | 2 +- fs/ntfs3/index.c | 6 ++++++ fs/ntfs3/ntfs_fs.h | 2 ++ fs/ntfs3/record.c | 6 ++++++ 4 files changed, 15 insertions(+), 1 deletion(-) diff --git a/fs/ntfs3/fsntfs.c b/fs/ntfs3/fsntfs.c index 3c823613de97..0ae70010b01d 100644 --- a/fs/ntfs3/fsntfs.c +++ b/fs/ntfs3/fsntfs.c @@ -154,7 +154,7 @@ int ntfs_fix_post_read(struct NTFS_RECORD_HEADER *rhdr, size_t bytes, /* Check errors. */ if ((fo & 1) || fo + fn * sizeof(short) > SECTOR_SIZE || !fn-- || fn * SECTOR_SIZE > bytes) { - return -EINVAL; /* Native chkntfs returns ok! */ + return -E_NTFS_CORRUPT; } /* Get fixup pointer. */ diff --git a/fs/ntfs3/index.c b/fs/ntfs3/index.c index 124eba7238fd..7705adc926b8 100644 --- a/fs/ntfs3/index.c +++ b/fs/ntfs3/index.c @@ -1112,6 +1112,12 @@ int indx_read(struct ntfs_index *indx, struct ntfs_inode *ni, CLST vbn, *node = in; out: + if (err == -E_NTFS_CORRUPT) { + ntfs_inode_err(&ni->vfs_inode, "directory corrupted"); + ntfs_set_state(ni->mi.sbi, NTFS_DIRTY_ERROR); + err = -EINVAL; + } + if (ib != in->index) kfree(ib); diff --git a/fs/ntfs3/ntfs_fs.h b/fs/ntfs3/ntfs_fs.h index fc0eb93c76de..510ed2ea1c48 100644 --- a/fs/ntfs3/ntfs_fs.h +++ b/fs/ntfs3/ntfs_fs.h @@ -54,6 +54,8 @@ enum utf16_endian; #define E_NTFS_NONRESIDENT 556 /* NTFS specific error code about punch hole. */ #define E_NTFS_NOTALIGNED 557 +/* NTFS specific error code when on-disk struct is corrupted. */ +#define E_NTFS_CORRUPT 558 /* sbi->flags */ diff --git a/fs/ntfs3/record.c b/fs/ntfs3/record.c index 3d222b1c8f03..938fc286963f 100644 --- a/fs/ntfs3/record.c +++ b/fs/ntfs3/record.c @@ -180,6 +180,12 @@ int mi_read(struct mft_inode *mi, bool is_mft) return 0; out: + if (err == -E_NTFS_CORRUPT) { + ntfs_err(sbi->sb, "mft corrupted"); + ntfs_set_state(sbi, NTFS_DIRTY_ERROR); + err = -EINVAL; + } + return err; } From 5ed4dbc137267ec55f6e51a19b66de2bbaa993d9 Mon Sep 17 00:00:00 2001 From: dengxiang Date: Mon, 3 Jul 2023 10:17:51 +0800 Subject: [PATCH 409/513] ALSA: hda/realtek: Add quirks for Unis H3C Desktop B760 & Q760 [ Upstream commit 73f1c75d5e6bd8ce2a887ef493a66ad1b16ed704 ] These models use NSIWAY amplifiers for internal speaker, but cannot put sound outside from these amplifiers. So eapd verbs are needed to initialize the amplifiers. They can be added during boot to get working sound out of internal speaker. Signed-off-by: dengxiang Link: https://lore.kernel.org/r/20230703021751.2945750-1-dengxiang@nfschina.com Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin --- sound/pci/hda/patch_realtek.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 965720b1d1b1..e335f3b5338f 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -10809,6 +10809,7 @@ enum { ALC897_FIXUP_HP_HSMIC_VERB, ALC897_FIXUP_LENOVO_HEADSET_MODE, ALC897_FIXUP_HEADSET_MIC_PIN2, + ALC897_FIXUP_UNIS_H3C_X500S, }; static const struct hda_fixup alc662_fixups[] = { @@ -11248,6 +11249,13 @@ static const struct hda_fixup alc662_fixups[] = { .chained = true, .chain_id = ALC897_FIXUP_LENOVO_HEADSET_MODE }, + [ALC897_FIXUP_UNIS_H3C_X500S] = { + .type = HDA_FIXUP_VERBS, + .v.verbs = (const struct hda_verb[]) { + { 0x14, AC_VERB_SET_EAPD_BTLENABLE, 0 }, + {} + }, + }, }; static const struct snd_pci_quirk alc662_fixup_tbl[] = { @@ -11409,6 +11417,7 @@ static const struct hda_model_fixup alc662_fixup_models[] = { {.id = ALC662_FIXUP_USI_HEADSET_MODE, .name = "usi-headset"}, {.id = ALC662_FIXUP_LENOVO_MULTI_CODECS, .name = "dual-codecs"}, {.id = ALC669_FIXUP_ACER_ASPIRE_ETHOS, .name = "aspire-ethos"}, + {.id = ALC897_FIXUP_UNIS_H3C_X500S, .name = "unis-h3c-x500s"}, {} }; From 8703b26387e1fa4f8749db98d24c67617b873acb Mon Sep 17 00:00:00 2001 From: Tuo Li Date: Mon, 3 Jul 2023 11:10:16 +0800 Subject: [PATCH 410/513] ALSA: hda: fix a possible null-pointer dereference due to data race in snd_hdac_regmap_sync() [ Upstream commit 1f4a08fed450db87fbb5ff5105354158bdbe1a22 ] The variable codec->regmap is often protected by the lock codec->regmap_lock when is accessed. However, it is accessed without holding the lock when is accessed in snd_hdac_regmap_sync(): if (codec->regmap) In my opinion, this may be a harmful race, because if codec->regmap is set to NULL right after the condition is checked, a null-pointer dereference can occur in the called function regcache_sync(): map->lock(map->lock_arg); --> Line 360 in drivers/base/regmap/regcache.c To fix this possible null-pointer dereference caused by data race, the mutex_lock coverage is extended to protect the if statement as well as the function call to regcache_sync(). [ Note: the lack of the regmap_lock itself is harmless for the current codec driver implementations, as snd_hdac_regmap_sync() is only for PM runtime resume that is prohibited during the codec probe. But the change makes the whole code more consistent, so it's merged as is -- tiwai ] Reported-by: BassCheck Signed-off-by: Tuo Li Link: https://lore.kernel.org/r/20230703031016.1184711-1-islituo@gmail.com Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin --- sound/hda/hdac_regmap.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/sound/hda/hdac_regmap.c b/sound/hda/hdac_regmap.c index fe3587547cfe..39610a15bcc9 100644 --- a/sound/hda/hdac_regmap.c +++ b/sound/hda/hdac_regmap.c @@ -597,10 +597,9 @@ EXPORT_SYMBOL_GPL(snd_hdac_regmap_update_raw_once); */ void snd_hdac_regmap_sync(struct hdac_device *codec) { - if (codec->regmap) { - mutex_lock(&codec->regmap_lock); + mutex_lock(&codec->regmap_lock); + if (codec->regmap) regcache_sync(codec->regmap); - mutex_unlock(&codec->regmap_lock); - } + mutex_unlock(&codec->regmap_lock); } EXPORT_SYMBOL_GPL(snd_hdac_regmap_sync); From 6657ecbcc39cccf51a843538006b5eefcad927d8 Mon Sep 17 00:00:00 2001 From: Benjamin Gray Date: Mon, 10 Jul 2023 14:41:43 +1000 Subject: [PATCH 411/513] powerpc/kasan: Disable KCOV in KASAN code [ Upstream commit ccb381e1af1ace292153c88eb1fffa5683d16a20 ] As per the generic KASAN code in mm/kasan, disable KCOV with KCOV_INSTRUMENT := n in the makefile. This fixes a ppc64 boot hang when KCOV and KASAN are enabled. kasan_early_init() gets called before a PACA is initialised, but the KCOV hook expects a valid PACA. Suggested-by: Christophe Leroy Signed-off-by: Benjamin Gray Signed-off-by: Michael Ellerman Link: https://msgid.link/20230710044143.146840-1-bgray@linux.ibm.com Signed-off-by: Sasha Levin --- arch/powerpc/mm/kasan/Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/powerpc/mm/kasan/Makefile b/arch/powerpc/mm/kasan/Makefile index bb1a5408b86b..8636b17c6a20 100644 --- a/arch/powerpc/mm/kasan/Makefile +++ b/arch/powerpc/mm/kasan/Makefile @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 KASAN_SANITIZE := n +KCOV_INSTRUMENT := n obj-$(CONFIG_PPC32) += kasan_init_32.o obj-$(CONFIG_PPC_8xx) += 8xx.o From 49b830d75f03d5dd41146d10e4d3e2a8211c4b94 Mon Sep 17 00:00:00 2001 From: Chen Lin Date: Wed, 19 Jul 2023 15:58:47 +0800 Subject: [PATCH 412/513] ring-buffer: Do not swap cpu_buffer during resize process [ Upstream commit 8a96c0288d0737ad77882024974c075345c72011 ] When ring_buffer_swap_cpu was called during resize process, the cpu buffer was swapped in the middle, resulting in incorrect state. Continuing to run in the wrong state will result in oops. This issue can be easily reproduced using the following two scripts: /tmp # cat test1.sh //#! /bin/sh for i in `seq 0 100000` do echo 2000 > /sys/kernel/debug/tracing/buffer_size_kb sleep 0.5 echo 5000 > /sys/kernel/debug/tracing/buffer_size_kb sleep 0.5 done /tmp # cat test2.sh //#! /bin/sh for i in `seq 0 100000` do echo irqsoff > /sys/kernel/debug/tracing/current_tracer sleep 1 echo nop > /sys/kernel/debug/tracing/current_tracer sleep 1 done /tmp # ./test1.sh & /tmp # ./test2.sh & A typical oops log is as follows, sometimes with other different oops logs. [ 231.711293] WARNING: CPU: 0 PID: 9 at kernel/trace/ring_buffer.c:2026 rb_update_pages+0x378/0x3f8 [ 231.713375] Modules linked in: [ 231.714735] CPU: 0 PID: 9 Comm: kworker/0:1 Tainted: G W 6.5.0-rc1-00276-g20edcec23f92 #15 [ 231.716750] Hardware name: linux,dummy-virt (DT) [ 231.718152] Workqueue: events update_pages_handler [ 231.719714] pstate: 60000005 (nZCv daif -PAN -UAO -TCO -DIT -SSBS BTYPE=--) [ 231.721171] pc : rb_update_pages+0x378/0x3f8 [ 231.722212] lr : rb_update_pages+0x25c/0x3f8 [ 231.723248] sp : ffff800082b9bd50 [ 231.724169] x29: ffff800082b9bd50 x28: ffff8000825f7000 x27: 0000000000000000 [ 231.726102] x26: 0000000000000001 x25: fffffffffffff010 x24: 0000000000000ff0 [ 231.728122] x23: ffff0000c3a0b600 x22: ffff0000c3a0b5c0 x21: fffffffffffffe0a [ 231.730203] x20: ffff0000c3a0b600 x19: ffff0000c0102400 x18: 0000000000000000 [ 231.732329] x17: 0000000000000000 x16: 0000000000000000 x15: 0000ffffe7aa8510 [ 231.734212] x14: 0000000000000000 x13: 0000000000000000 x12: 0000000000000002 [ 231.736291] x11: ffff8000826998a8 x10: ffff800082b9baf0 x9 : ffff800081137558 [ 231.738195] x8 : fffffc00030e82c8 x7 : 0000000000000000 x6 : 0000000000000001 [ 231.740192] x5 : ffff0000ffbafe00 x4 : 0000000000000000 x3 : 0000000000000000 [ 231.742118] x2 : 00000000000006aa x1 : 0000000000000001 x0 : ffff0000c0007208 [ 231.744196] Call trace: [ 231.744892] rb_update_pages+0x378/0x3f8 [ 231.745893] update_pages_handler+0x1c/0x38 [ 231.746893] process_one_work+0x1f0/0x468 [ 231.747852] worker_thread+0x54/0x410 [ 231.748737] kthread+0x124/0x138 [ 231.749549] ret_from_fork+0x10/0x20 [ 231.750434] ---[ end trace 0000000000000000 ]--- [ 233.720486] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000 [ 233.721696] Mem abort info: [ 233.721935] ESR = 0x0000000096000004 [ 233.722283] EC = 0x25: DABT (current EL), IL = 32 bits [ 233.722596] SET = 0, FnV = 0 [ 233.722805] EA = 0, S1PTW = 0 [ 233.723026] FSC = 0x04: level 0 translation fault [ 233.723458] Data abort info: [ 233.723734] ISV = 0, ISS = 0x00000004, ISS2 = 0x00000000 [ 233.724176] CM = 0, WnR = 0, TnD = 0, TagAccess = 0 [ 233.724589] GCS = 0, Overlay = 0, DirtyBit = 0, Xs = 0 [ 233.725075] user pgtable: 4k pages, 48-bit VAs, pgdp=0000000104943000 [ 233.725592] [0000000000000000] pgd=0000000000000000, p4d=0000000000000000 [ 233.726231] Internal error: Oops: 0000000096000004 [#1] PREEMPT SMP [ 233.726720] Modules linked in: [ 233.727007] CPU: 0 PID: 9 Comm: kworker/0:1 Tainted: G W 6.5.0-rc1-00276-g20edcec23f92 #15 [ 233.727777] Hardware name: linux,dummy-virt (DT) [ 233.728225] Workqueue: events update_pages_handler [ 233.728655] pstate: 200000c5 (nzCv daIF -PAN -UAO -TCO -DIT -SSBS BTYPE=--) [ 233.729054] pc : rb_update_pages+0x1a8/0x3f8 [ 233.729334] lr : rb_update_pages+0x154/0x3f8 [ 233.729592] sp : ffff800082b9bd50 [ 233.729792] x29: ffff800082b9bd50 x28: ffff8000825f7000 x27: 0000000000000000 [ 233.730220] x26: 0000000000000000 x25: ffff800082a8b840 x24: ffff0000c0102418 [ 233.730653] x23: 0000000000000000 x22: fffffc000304c880 x21: 0000000000000003 [ 233.731105] x20: 00000000000001f4 x19: ffff0000c0102400 x18: ffff800082fcbc58 [ 233.731727] x17: 0000000000000000 x16: 0000000000000001 x15: 0000000000000001 [ 233.732282] x14: ffff8000825fe0c8 x13: 0000000000000001 x12: 0000000000000000 [ 233.732709] x11: ffff8000826998a8 x10: 0000000000000ae0 x9 : ffff8000801b760c [ 233.733148] x8 : fefefefefefefeff x7 : 0000000000000018 x6 : ffff0000c03298c0 [ 233.733553] x5 : 0000000000000002 x4 : 0000000000000000 x3 : 0000000000000000 [ 233.733972] x2 : ffff0000c3a0b600 x1 : 0000000000000000 x0 : 0000000000000000 [ 233.734418] Call trace: [ 233.734593] rb_update_pages+0x1a8/0x3f8 [ 233.734853] update_pages_handler+0x1c/0x38 [ 233.735148] process_one_work+0x1f0/0x468 [ 233.735525] worker_thread+0x54/0x410 [ 233.735852] kthread+0x124/0x138 [ 233.736064] ret_from_fork+0x10/0x20 [ 233.736387] Code: 92400000 910006b5 aa000021 aa0303f7 (f9400060) [ 233.736959] ---[ end trace 0000000000000000 ]--- After analysis, the seq of the error is as follows [1-5]: int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu_id) { for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; //1. get cpu_buffer, aka cpu_buffer(A) ... ... schedule_work_on(cpu, &cpu_buffer->update_pages_work); //2. 'update_pages_work' is queue on 'cpu', cpu_buffer(A) is passed to // update_pages_handler, do the update process, set 'update_done' in // complete(&cpu_buffer->update_done) and to wakeup resize process. //----> //3. Just at this moment, ring_buffer_swap_cpu is triggered, //cpu_buffer(A) be swaped to cpu_buffer(B), the max_buffer. //ring_buffer_swap_cpu is called as the 'Call trace' below. Call trace: dump_backtrace+0x0/0x2f8 show_stack+0x18/0x28 dump_stack+0x12c/0x188 ring_buffer_swap_cpu+0x2f8/0x328 update_max_tr_single+0x180/0x210 check_critical_timing+0x2b4/0x2c8 tracer_hardirqs_on+0x1c0/0x200 trace_hardirqs_on+0xec/0x378 el0_svc_common+0x64/0x260 do_el0_svc+0x90/0xf8 el0_svc+0x20/0x30 el0_sync_handler+0xb0/0xb8 el0_sync+0x180/0x1c0 //<---- /* wait for all the updates to complete */ for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; //4. get cpu_buffer, cpu_buffer(B) is used in the following process, //the state of cpu_buffer(A) and cpu_buffer(B) is totally wrong. //for example, cpu_buffer(A)->update_done will leave be set 1, and will //not 'wait_for_completion' at the next resize round. if (!cpu_buffer->nr_pages_to_update) continue; if (cpu_online(cpu)) wait_for_completion(&cpu_buffer->update_done); cpu_buffer->nr_pages_to_update = 0; } ... } //5. the state of cpu_buffer(A) and cpu_buffer(B) is totally wrong, //Continuing to run in the wrong state, then oops occurs. Link: https://lore.kernel.org/linux-trace-kernel/202307191558478409990@zte.com.cn Signed-off-by: Chen Lin Signed-off-by: Steven Rostedt (Google) Signed-off-by: Sasha Levin --- kernel/trace/ring_buffer.c | 14 +++++++++++++- kernel/trace/trace.c | 3 ++- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index e1cef097b0df..db7cefd196ce 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -535,6 +535,7 @@ struct trace_buffer { unsigned flags; int cpus; atomic_t record_disabled; + atomic_t resizing; cpumask_var_t cpumask; struct lock_class_key *reader_lock_key; @@ -2137,7 +2138,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, /* prevent another thread from changing buffer sizes */ mutex_lock(&buffer->mutex); - + atomic_inc(&buffer->resizing); if (cpu_id == RING_BUFFER_ALL_CPUS) { /* @@ -2276,6 +2277,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, atomic_dec(&buffer->record_disabled); } + atomic_dec(&buffer->resizing); mutex_unlock(&buffer->mutex); return 0; @@ -2296,6 +2298,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, } } out_err_unlock: + atomic_dec(&buffer->resizing); mutex_unlock(&buffer->mutex); return err; } @@ -5497,6 +5500,15 @@ int ring_buffer_swap_cpu(struct trace_buffer *buffer_a, if (local_read(&cpu_buffer_b->committing)) goto out_dec; + /* + * When resize is in progress, we cannot swap it because + * it will mess the state of the cpu buffer. + */ + if (atomic_read(&buffer_a->resizing)) + goto out_dec; + if (atomic_read(&buffer_b->resizing)) + goto out_dec; + buffer_a->buffers[cpu] = cpu_buffer_b; buffer_b->buffers[cpu] = cpu_buffer_a; diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ae7005af78c3..d4c381f06b7b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -1872,9 +1872,10 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) * place on this CPU. We fail to record, but we reset * the max trace buffer (no one writes directly to it) * and flag that it failed. + * Another reason is resize is in progress. */ trace_array_printk_buf(tr->max_buffer.buffer, _THIS_IP_, - "Failed to swap buffers due to commit in progress\n"); + "Failed to swap buffers due to commit or resize in progress\n"); } WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY); From 565b96d9a7ab68276d88a265ebe71c05026c7ceb Mon Sep 17 00:00:00 2001 From: Cosmin Tanislav Date: Sun, 5 Dec 2021 13:40:43 +0200 Subject: [PATCH 413/513] iio: add addac subdirectory [ Upstream commit b62e2e1763cda3a6c494ed754317f19be1249297 ] For IIO devices that expose both ADC and DAC functionality. Signed-off-by: Cosmin Tanislav Link: https://lore.kernel.org/r/20211205114045.173612-2-cosmin.tanislav@analog.com Signed-off-by: Jonathan Cameron Stable-dep-of: 4f9b80aefb9e ("iio: addac: stx104: Fix race condition when converting analog-to-digital") Signed-off-by: Sasha Levin --- drivers/iio/Kconfig | 1 + drivers/iio/Makefile | 1 + drivers/iio/addac/Kconfig | 8 ++++++++ drivers/iio/addac/Makefile | 6 ++++++ 4 files changed, 16 insertions(+) create mode 100644 drivers/iio/addac/Kconfig create mode 100644 drivers/iio/addac/Makefile diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig index 2334ad249b46..4fb4321a72cb 100644 --- a/drivers/iio/Kconfig +++ b/drivers/iio/Kconfig @@ -70,6 +70,7 @@ config IIO_TRIGGERED_EVENT source "drivers/iio/accel/Kconfig" source "drivers/iio/adc/Kconfig" +source "drivers/iio/addac/Kconfig" source "drivers/iio/afe/Kconfig" source "drivers/iio/amplifiers/Kconfig" source "drivers/iio/cdc/Kconfig" diff --git a/drivers/iio/Makefile b/drivers/iio/Makefile index 65e39bd4f934..8d48c70fee4d 100644 --- a/drivers/iio/Makefile +++ b/drivers/iio/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_IIO_TRIGGERED_EVENT) += industrialio-triggered-event.o obj-y += accel/ obj-y += adc/ +obj-y += addac/ obj-y += afe/ obj-y += amplifiers/ obj-y += buffer/ diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig new file mode 100644 index 000000000000..2e64d7755d5e --- /dev/null +++ b/drivers/iio/addac/Kconfig @@ -0,0 +1,8 @@ +# +# ADC DAC drivers +# +# When adding new entries keep the list in alphabetical order + +menu "Analog to digital and digital to analog converters" + +endmenu diff --git a/drivers/iio/addac/Makefile b/drivers/iio/addac/Makefile new file mode 100644 index 000000000000..b888b9ee12da --- /dev/null +++ b/drivers/iio/addac/Makefile @@ -0,0 +1,6 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# Makefile for industrial I/O ADDAC drivers +# + +# When adding new entries keep the list in alphabetical order From 6089d354346f8e04f4354f09f873ae5d94cd8e05 Mon Sep 17 00:00:00 2001 From: William Breathitt Gray Date: Tue, 10 May 2022 13:30:59 -0400 Subject: [PATCH 414/513] iio: adc: stx104: Utilize iomap interface [ Upstream commit 73b8390cc27e096ab157be261ccc4eaaa6db87af ] This driver doesn't need to access I/O ports directly via inb()/outb() and friends. This patch abstracts such access by calling ioport_map() to enable the use of more typical ioread8()/iowrite8() I/O memory accessor calls. Suggested-by: David Laight Signed-off-by: William Breathitt Gray Reviewed-by: Linus Walleij Link: https://lore.kernel.org/r/64673797df382c52fc32fce24348b25a0b05e73a.1652201921.git.william.gray@linaro.org Signed-off-by: Jonathan Cameron Stable-dep-of: 4f9b80aefb9e ("iio: addac: stx104: Fix race condition when converting analog-to-digital") Signed-off-by: Sasha Levin --- drivers/iio/adc/stx104.c | 56 +++++++++++++++++++++------------------- 1 file changed, 29 insertions(+), 27 deletions(-) diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/adc/stx104.c index 55bd2dc514e9..7552351bfed9 100644 --- a/drivers/iio/adc/stx104.c +++ b/drivers/iio/adc/stx104.c @@ -51,7 +51,7 @@ MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses"); */ struct stx104_iio { unsigned int chan_out_states[STX104_NUM_OUT_CHAN]; - unsigned int base; + void __iomem *base; }; /** @@ -64,7 +64,7 @@ struct stx104_iio { struct stx104_gpio { struct gpio_chip chip; spinlock_t lock; - unsigned int base; + void __iomem *base; unsigned int out_state; }; @@ -79,7 +79,7 @@ static int stx104_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_HARDWAREGAIN: /* get gain configuration */ - adc_config = inb(priv->base + 11); + adc_config = ioread8(priv->base + 11); gain = adc_config & 0x3; *val = 1 << gain; @@ -91,24 +91,24 @@ static int stx104_read_raw(struct iio_dev *indio_dev, } /* select ADC channel */ - outb(chan->channel | (chan->channel << 4), priv->base + 2); + iowrite8(chan->channel | (chan->channel << 4), priv->base + 2); /* trigger ADC sample capture and wait for completion */ - outb(0, priv->base); - while (inb(priv->base + 8) & BIT(7)); + iowrite8(0, priv->base); + while (ioread8(priv->base + 8) & BIT(7)); - *val = inw(priv->base); + *val = ioread16(priv->base); return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: /* get ADC bipolar/unipolar configuration */ - adc_config = inb(priv->base + 11); + adc_config = ioread8(priv->base + 11); adbu = !(adc_config & BIT(2)); *val = -32768 * adbu; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: /* get ADC bipolar/unipolar and gain configuration */ - adc_config = inb(priv->base + 11); + adc_config = ioread8(priv->base + 11); adbu = !(adc_config & BIT(2)); gain = adc_config & 0x3; @@ -130,16 +130,16 @@ static int stx104_write_raw(struct iio_dev *indio_dev, /* Only four gain states (x1, x2, x4, x8) */ switch (val) { case 1: - outb(0, priv->base + 11); + iowrite8(0, priv->base + 11); break; case 2: - outb(1, priv->base + 11); + iowrite8(1, priv->base + 11); break; case 4: - outb(2, priv->base + 11); + iowrite8(2, priv->base + 11); break; case 8: - outb(3, priv->base + 11); + iowrite8(3, priv->base + 11); break; default: return -EINVAL; @@ -153,7 +153,7 @@ static int stx104_write_raw(struct iio_dev *indio_dev, return -EINVAL; priv->chan_out_states[chan->channel] = val; - outw(val, priv->base + 4 + 2 * chan->channel); + iowrite16(val, priv->base + 4 + 2 * chan->channel); return 0; } @@ -222,7 +222,7 @@ static int stx104_gpio_get(struct gpio_chip *chip, unsigned int offset) if (offset >= 4) return -EINVAL; - return !!(inb(stx104gpio->base) & BIT(offset)); + return !!(ioread8(stx104gpio->base) & BIT(offset)); } static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, @@ -230,7 +230,7 @@ static int stx104_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, { struct stx104_gpio *const stx104gpio = gpiochip_get_data(chip); - *bits = inb(stx104gpio->base); + *bits = ioread8(stx104gpio->base); return 0; } @@ -252,7 +252,7 @@ static void stx104_gpio_set(struct gpio_chip *chip, unsigned int offset, else stx104gpio->out_state &= ~mask; - outb(stx104gpio->out_state, stx104gpio->base); + iowrite8(stx104gpio->out_state, stx104gpio->base); spin_unlock_irqrestore(&stx104gpio->lock, flags); } @@ -279,7 +279,7 @@ static void stx104_gpio_set_multiple(struct gpio_chip *chip, stx104gpio->out_state &= ~*mask; stx104gpio->out_state |= *mask & *bits; - outb(stx104gpio->out_state, stx104gpio->base); + iowrite8(stx104gpio->out_state, stx104gpio->base); spin_unlock_irqrestore(&stx104gpio->lock, flags); } @@ -306,11 +306,16 @@ static int stx104_probe(struct device *dev, unsigned int id) return -EBUSY; } + priv = iio_priv(indio_dev); + priv->base = devm_ioport_map(dev, base[id], STX104_EXTENT); + if (!priv->base) + return -ENOMEM; + indio_dev->info = &stx104_info; indio_dev->modes = INDIO_DIRECT_MODE; /* determine if differential inputs */ - if (inb(base[id] + 8) & BIT(5)) { + if (ioread8(priv->base + 8) & BIT(5)) { indio_dev->num_channels = ARRAY_SIZE(stx104_channels_diff); indio_dev->channels = stx104_channels_diff; } else { @@ -320,18 +325,15 @@ static int stx104_probe(struct device *dev, unsigned int id) indio_dev->name = dev_name(dev); - priv = iio_priv(indio_dev); - priv->base = base[id]; - /* configure device for software trigger operation */ - outb(0, base[id] + 9); + iowrite8(0, priv->base + 9); /* initialize gain setting to x1 */ - outb(0, base[id] + 11); + iowrite8(0, priv->base + 11); /* initialize DAC output to 0V */ - outw(0, base[id] + 4); - outw(0, base[id] + 6); + iowrite16(0, priv->base + 4); + iowrite16(0, priv->base + 6); stx104gpio->chip.label = dev_name(dev); stx104gpio->chip.parent = dev; @@ -346,7 +348,7 @@ static int stx104_probe(struct device *dev, unsigned int id) stx104gpio->chip.get_multiple = stx104_gpio_get_multiple; stx104gpio->chip.set = stx104_gpio_set; stx104gpio->chip.set_multiple = stx104_gpio_set_multiple; - stx104gpio->base = base[id] + 3; + stx104gpio->base = priv->base + 3; stx104gpio->out_state = 0x0; spin_lock_init(&stx104gpio->lock); From cadf8f2d7c9d06c3574a23745069fe4a73b62312 Mon Sep 17 00:00:00 2001 From: William Breathitt Gray Date: Thu, 7 Jul 2022 13:21:24 -0400 Subject: [PATCH 415/513] iio: adc: stx104: Implement and utilize register structures [ Upstream commit 6cfd14c54b1f42f29097244c1b6208f8268d7d5b ] Reduce magic numbers and improve code readability by implementing and utilizing named register data structures. Tested-by: Fred Eckert Signed-off-by: William Breathitt Gray Link: https://lore.kernel.org/r/8cb91d5b53e57b066120e42ea07000d6c7ef5543.1657213745.git.william.gray@linaro.org Signed-off-by: Jonathan Cameron Stable-dep-of: 4f9b80aefb9e ("iio: addac: stx104: Fix race condition when converting analog-to-digital") Signed-off-by: Sasha Levin --- drivers/iio/adc/stx104.c | 74 +++++++++++++++++++++++++++------------- 1 file changed, 50 insertions(+), 24 deletions(-) diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/adc/stx104.c index 7552351bfed9..48a91a95e597 100644 --- a/drivers/iio/adc/stx104.c +++ b/drivers/iio/adc/stx104.c @@ -16,6 +16,7 @@ #include #include #include +#include #define STX104_OUT_CHAN(chan) { \ .type = IIO_VOLTAGE, \ @@ -44,14 +45,36 @@ static unsigned int num_stx104; module_param_hw_array(base, uint, ioport, &num_stx104, 0); MODULE_PARM_DESC(base, "Apex Embedded Systems STX104 base addresses"); +/** + * struct stx104_reg - device register structure + * @ssr_ad: Software Strobe Register and ADC Data + * @achan: ADC Channel + * @dio: Digital I/O + * @dac: DAC Channels + * @cir_asr: Clear Interrupts and ADC Status + * @acr: ADC Control + * @pccr_fsh: Pacer Clock Control and FIFO Status MSB + * @acfg: ADC Configuration + */ +struct stx104_reg { + u16 ssr_ad; + u8 achan; + u8 dio; + u16 dac[2]; + u8 cir_asr; + u8 acr; + u8 pccr_fsh; + u8 acfg; +}; + /** * struct stx104_iio - IIO device private data structure * @chan_out_states: channels' output states - * @base: base port address of the IIO device + * @reg: I/O address offset for the device registers */ struct stx104_iio { unsigned int chan_out_states[STX104_NUM_OUT_CHAN]; - void __iomem *base; + struct stx104_reg __iomem *reg; }; /** @@ -64,7 +87,7 @@ struct stx104_iio { struct stx104_gpio { struct gpio_chip chip; spinlock_t lock; - void __iomem *base; + u8 __iomem *base; unsigned int out_state; }; @@ -72,6 +95,7 @@ static int stx104_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long mask) { struct stx104_iio *const priv = iio_priv(indio_dev); + struct stx104_reg __iomem *const reg = priv->reg; unsigned int adc_config; int adbu; int gain; @@ -79,7 +103,7 @@ static int stx104_read_raw(struct iio_dev *indio_dev, switch (mask) { case IIO_CHAN_INFO_HARDWAREGAIN: /* get gain configuration */ - adc_config = ioread8(priv->base + 11); + adc_config = ioread8(®->acfg); gain = adc_config & 0x3; *val = 1 << gain; @@ -91,24 +115,26 @@ static int stx104_read_raw(struct iio_dev *indio_dev, } /* select ADC channel */ - iowrite8(chan->channel | (chan->channel << 4), priv->base + 2); + iowrite8(chan->channel | (chan->channel << 4), ®->achan); - /* trigger ADC sample capture and wait for completion */ - iowrite8(0, priv->base); - while (ioread8(priv->base + 8) & BIT(7)); + /* trigger ADC sample capture by writing to the 8-bit + * Software Strobe Register and wait for completion + */ + iowrite8(0, ®->ssr_ad); + while (ioread8(®->cir_asr) & BIT(7)); - *val = ioread16(priv->base); + *val = ioread16(®->ssr_ad); return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: /* get ADC bipolar/unipolar configuration */ - adc_config = ioread8(priv->base + 11); + adc_config = ioread8(®->acfg); adbu = !(adc_config & BIT(2)); *val = -32768 * adbu; return IIO_VAL_INT; case IIO_CHAN_INFO_SCALE: /* get ADC bipolar/unipolar and gain configuration */ - adc_config = ioread8(priv->base + 11); + adc_config = ioread8(®->acfg); adbu = !(adc_config & BIT(2)); gain = adc_config & 0x3; @@ -130,16 +156,16 @@ static int stx104_write_raw(struct iio_dev *indio_dev, /* Only four gain states (x1, x2, x4, x8) */ switch (val) { case 1: - iowrite8(0, priv->base + 11); + iowrite8(0, &priv->reg->acfg); break; case 2: - iowrite8(1, priv->base + 11); + iowrite8(1, &priv->reg->acfg); break; case 4: - iowrite8(2, priv->base + 11); + iowrite8(2, &priv->reg->acfg); break; case 8: - iowrite8(3, priv->base + 11); + iowrite8(3, &priv->reg->acfg); break; default: return -EINVAL; @@ -153,7 +179,7 @@ static int stx104_write_raw(struct iio_dev *indio_dev, return -EINVAL; priv->chan_out_states[chan->channel] = val; - iowrite16(val, priv->base + 4 + 2 * chan->channel); + iowrite16(val, &priv->reg->dac[chan->channel]); return 0; } @@ -307,15 +333,15 @@ static int stx104_probe(struct device *dev, unsigned int id) } priv = iio_priv(indio_dev); - priv->base = devm_ioport_map(dev, base[id], STX104_EXTENT); - if (!priv->base) + priv->reg = devm_ioport_map(dev, base[id], STX104_EXTENT); + if (!priv->reg) return -ENOMEM; indio_dev->info = &stx104_info; indio_dev->modes = INDIO_DIRECT_MODE; /* determine if differential inputs */ - if (ioread8(priv->base + 8) & BIT(5)) { + if (ioread8(&priv->reg->cir_asr) & BIT(5)) { indio_dev->num_channels = ARRAY_SIZE(stx104_channels_diff); indio_dev->channels = stx104_channels_diff; } else { @@ -326,14 +352,14 @@ static int stx104_probe(struct device *dev, unsigned int id) indio_dev->name = dev_name(dev); /* configure device for software trigger operation */ - iowrite8(0, priv->base + 9); + iowrite8(0, &priv->reg->acr); /* initialize gain setting to x1 */ - iowrite8(0, priv->base + 11); + iowrite8(0, &priv->reg->acfg); /* initialize DAC output to 0V */ - iowrite16(0, priv->base + 4); - iowrite16(0, priv->base + 6); + iowrite16(0, &priv->reg->dac[0]); + iowrite16(0, &priv->reg->dac[1]); stx104gpio->chip.label = dev_name(dev); stx104gpio->chip.parent = dev; @@ -348,7 +374,7 @@ static int stx104_probe(struct device *dev, unsigned int id) stx104gpio->chip.get_multiple = stx104_gpio_get_multiple; stx104gpio->chip.set = stx104_gpio_set; stx104gpio->chip.set_multiple = stx104_gpio_set_multiple; - stx104gpio->base = priv->base + 3; + stx104gpio->base = &priv->reg->dio; stx104gpio->out_state = 0x0; spin_lock_init(&stx104gpio->lock); From 89a007f3a38359332136b48cf45a3d768fe76b1c Mon Sep 17 00:00:00 2001 From: William Breathitt Gray Date: Mon, 15 Aug 2022 18:29:21 -0400 Subject: [PATCH 416/513] iio: stx104: Move to addac subdirectory [ Upstream commit 955c2aa9cff2dd07ff798ca8c883398731687972 ] The stx104 driver supports both ADC and DAC functionality. Signed-off-by: William Breathitt Gray Link: https://lore.kernel.org/r/20220815222921.138945-1-william.gray@linaro.org Signed-off-by: Jonathan Cameron Stable-dep-of: 4f9b80aefb9e ("iio: addac: stx104: Fix race condition when converting analog-to-digital") Signed-off-by: Sasha Levin --- MAINTAINERS | 2 +- drivers/iio/adc/Kconfig | 16 ---------------- drivers/iio/adc/Makefile | 1 - drivers/iio/addac/Kconfig | 16 ++++++++++++++++ drivers/iio/addac/Makefile | 1 + drivers/iio/{adc => addac}/stx104.c | 0 6 files changed, 18 insertions(+), 18 deletions(-) rename drivers/iio/{adc => addac}/stx104.c (100%) diff --git a/MAINTAINERS b/MAINTAINERS index e6b53e76651b..9216b9c85ce9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1250,7 +1250,7 @@ APEX EMBEDDED SYSTEMS STX104 IIO DRIVER M: William Breathitt Gray L: linux-iio@vger.kernel.org S: Maintained -F: drivers/iio/adc/stx104.c +F: drivers/iio/addac/stx104.c APM DRIVER M: Jiri Kosina diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig index af168e1c9fdb..86b83dc7b7d9 100644 --- a/drivers/iio/adc/Kconfig +++ b/drivers/iio/adc/Kconfig @@ -991,22 +991,6 @@ config STMPE_ADC Say yes here to build support for ST Microelectronics STMPE built-in ADC block (stmpe811). -config STX104 - tristate "Apex Embedded Systems STX104 driver" - depends on PC104 && X86 - select ISA_BUS_API - select GPIOLIB - help - Say yes here to build support for the Apex Embedded Systems STX104 - integrated analog PC/104 card. - - This driver supports the 16 channels of single-ended (8 channels of - differential) analog inputs, 2 channels of analog output, 4 digital - inputs, and 4 digital outputs provided by the STX104. - - The base port addresses for the devices may be configured via the base - array module parameter. - config SUN4I_GPADC tristate "Support for the Allwinner SoCs GPADC" depends on IIO diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile index d68550f493e3..13668c4cfbaf 100644 --- a/drivers/iio/adc/Makefile +++ b/drivers/iio/adc/Makefile @@ -85,7 +85,6 @@ obj-$(CONFIG_ROCKCHIP_SARADC) += rockchip_saradc.o obj-$(CONFIG_RZG2L_ADC) += rzg2l_adc.o obj-$(CONFIG_SC27XX_ADC) += sc27xx_adc.o obj-$(CONFIG_SPEAR_ADC) += spear_adc.o -obj-$(CONFIG_STX104) += stx104.o obj-$(CONFIG_SUN4I_GPADC) += sun4i-gpadc-iio.o obj-$(CONFIG_STM32_ADC_CORE) += stm32-adc-core.o obj-$(CONFIG_STM32_ADC) += stm32-adc.o diff --git a/drivers/iio/addac/Kconfig b/drivers/iio/addac/Kconfig index 2e64d7755d5e..1f598670e84f 100644 --- a/drivers/iio/addac/Kconfig +++ b/drivers/iio/addac/Kconfig @@ -5,4 +5,20 @@ menu "Analog to digital and digital to analog converters" +config STX104 + tristate "Apex Embedded Systems STX104 driver" + depends on PC104 && X86 + select ISA_BUS_API + select GPIOLIB + help + Say yes here to build support for the Apex Embedded Systems STX104 + integrated analog PC/104 card. + + This driver supports the 16 channels of single-ended (8 channels of + differential) analog inputs, 2 channels of analog output, 4 digital + inputs, and 4 digital outputs provided by the STX104. + + The base port addresses for the devices may be configured via the base + array module parameter. + endmenu diff --git a/drivers/iio/addac/Makefile b/drivers/iio/addac/Makefile index b888b9ee12da..862914523354 100644 --- a/drivers/iio/addac/Makefile +++ b/drivers/iio/addac/Makefile @@ -4,3 +4,4 @@ # # When adding new entries keep the list in alphabetical order +obj-$(CONFIG_STX104) += stx104.o diff --git a/drivers/iio/adc/stx104.c b/drivers/iio/addac/stx104.c similarity index 100% rename from drivers/iio/adc/stx104.c rename to drivers/iio/addac/stx104.c From d31d04ec404c4464c043ce1a61358d0eaad05eeb Mon Sep 17 00:00:00 2001 From: William Breathitt Gray Date: Thu, 6 Apr 2023 10:40:10 -0400 Subject: [PATCH 417/513] iio: addac: stx104: Fix race condition for stx104_write_raw() [ Upstream commit 9740827468cea80c42db29e7171a50e99acf7328 ] The priv->chan_out_states array and actual DAC value can become mismatched if stx104_write_raw() is called concurrently. Prevent such a race condition by utilizing a mutex. Fixes: 97a445dad37a ("iio: Add IIO support for the DAC on the Apex Embedded Systems STX104") Signed-off-by: William Breathitt Gray Link: https://lore.kernel.org/r/c95c9a77fcef36b2a052282146950f23bbc1ebdc.1680790580.git.william.gray@linaro.org Cc: Signed-off-by: Jonathan Cameron Stable-dep-of: 4f9b80aefb9e ("iio: addac: stx104: Fix race condition when converting analog-to-digital") Signed-off-by: Sasha Levin --- drivers/iio/addac/stx104.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/iio/addac/stx104.c b/drivers/iio/addac/stx104.c index 48a91a95e597..e110a910235f 100644 --- a/drivers/iio/addac/stx104.c +++ b/drivers/iio/addac/stx104.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -69,10 +70,12 @@ struct stx104_reg { /** * struct stx104_iio - IIO device private data structure + * @lock: synchronization lock to prevent I/O race conditions * @chan_out_states: channels' output states * @reg: I/O address offset for the device registers */ struct stx104_iio { + struct mutex lock; unsigned int chan_out_states[STX104_NUM_OUT_CHAN]; struct stx104_reg __iomem *reg; }; @@ -178,9 +181,12 @@ static int stx104_write_raw(struct iio_dev *indio_dev, if ((unsigned int)val > 65535) return -EINVAL; + mutex_lock(&priv->lock); + priv->chan_out_states[chan->channel] = val; iowrite16(val, &priv->reg->dac[chan->channel]); + mutex_unlock(&priv->lock); return 0; } return -EINVAL; @@ -351,6 +357,8 @@ static int stx104_probe(struct device *dev, unsigned int id) indio_dev->name = dev_name(dev); + mutex_init(&priv->lock); + /* configure device for software trigger operation */ iowrite8(0, &priv->reg->acr); From 9fa82f031af8d30431ee0ce92ecae477be66585c Mon Sep 17 00:00:00 2001 From: William Breathitt Gray Date: Thu, 6 Apr 2023 10:40:11 -0400 Subject: [PATCH 418/513] iio: addac: stx104: Fix race condition when converting analog-to-digital [ Upstream commit 4f9b80aefb9e2f542a49d9ec087cf5919730e1dd ] The ADC conversion procedure requires several device I/O operations performed in a particular sequence. If stx104_read_raw() is called concurrently, the ADC conversion procedure could be clobbered. Prevent such a race condition by utilizing a mutex. Fixes: 4075a283ae83 ("iio: stx104: Add IIO support for the ADC channels") Signed-off-by: William Breathitt Gray Link: https://lore.kernel.org/r/2ae5e40eed5006ca735e4c12181a9ff5ced65547.1680790580.git.william.gray@linaro.org Cc: Signed-off-by: Jonathan Cameron Signed-off-by: Sasha Levin --- drivers/iio/addac/stx104.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/iio/addac/stx104.c b/drivers/iio/addac/stx104.c index e110a910235f..b658a75d4e3a 100644 --- a/drivers/iio/addac/stx104.c +++ b/drivers/iio/addac/stx104.c @@ -117,6 +117,8 @@ static int stx104_read_raw(struct iio_dev *indio_dev, return IIO_VAL_INT; } + mutex_lock(&priv->lock); + /* select ADC channel */ iowrite8(chan->channel | (chan->channel << 4), ®->achan); @@ -127,6 +129,8 @@ static int stx104_read_raw(struct iio_dev *indio_dev, while (ioread8(®->cir_asr) & BIT(7)); *val = ioread16(®->ssr_ad); + + mutex_unlock(&priv->lock); return IIO_VAL_INT; case IIO_CHAN_INFO_OFFSET: /* get ADC bipolar/unipolar configuration */ From f84c2ca3490c38b399d523f1f6a6c644eb46504b Mon Sep 17 00:00:00 2001 From: Song Yoong Siang Date: Tue, 2 May 2023 08:48:06 -0700 Subject: [PATCH 419/513] igc: read before write to SRRCTL register [ Upstream commit 3ce29c17dc847bf4245e16aad78a7617afa96297 ] igc_configure_rx_ring() function will be called as part of XDP program setup. If Rx hardware timestamp is enabled prio to XDP program setup, this timestamp enablement will be overwritten when buffer size is written into SRRCTL register. Thus, this commit read the register value before write to SRRCTL register. This commit is tested by using xdp_hw_metadata bpf selftest tool. The tool enables Rx hardware timestamp and then attach XDP program to igc driver. It will display hardware timestamp of UDP packet with port number 9092. Below are detail of test steps and results. Command on DUT: sudo ./xdp_hw_metadata Command on Link Partner: echo -n skb | nc -u -q1 9092 Result before this patch: skb hwtstamp is not found! Result after this patch: found skb hwtstamp = 1677800973.642836757 Optionally, read PHC to confirm the values obtained are almost the same: Command: sudo ./testptp -d /dev/ptp0 -g Result: clock time: 1677800973.913598978 or Fri Mar 3 07:49:33 2023 Fixes: fc9df2a0b520 ("igc: Enable RX via AF_XDP zero-copy") Cc: # 5.14+ Signed-off-by: Song Yoong Siang Reviewed-by: Jacob Keller Reviewed-by: Jesper Dangaard Brouer Tested-by: Jesper Dangaard Brouer Tested-by: Naama Meir Signed-off-by: Tony Nguyen Reviewed-by: Leon Romanovsky Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/igc/igc_base.h | 11 ++++++++--- drivers/net/ethernet/intel/igc/igc_main.c | 7 +++++-- 2 files changed, 13 insertions(+), 5 deletions(-) diff --git a/drivers/net/ethernet/intel/igc/igc_base.h b/drivers/net/ethernet/intel/igc/igc_base.h index ce530f5fd7bd..52849f5e8048 100644 --- a/drivers/net/ethernet/intel/igc/igc_base.h +++ b/drivers/net/ethernet/intel/igc/igc_base.h @@ -85,8 +85,13 @@ union igc_adv_rx_desc { #define IGC_RXDCTL_SWFLUSH 0x04000000 /* Receive Software Flush */ /* SRRCTL bit definitions */ -#define IGC_SRRCTL_BSIZEPKT_SHIFT 10 /* Shift _right_ */ -#define IGC_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* Shift _left_ */ -#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +#define IGC_SRRCTL_BSIZEPKT_MASK GENMASK(6, 0) +#define IGC_SRRCTL_BSIZEPKT(x) FIELD_PREP(IGC_SRRCTL_BSIZEPKT_MASK, \ + (x) / 1024) /* in 1 KB resolution */ +#define IGC_SRRCTL_BSIZEHDR_MASK GENMASK(13, 8) +#define IGC_SRRCTL_BSIZEHDR(x) FIELD_PREP(IGC_SRRCTL_BSIZEHDR_MASK, \ + (x) / 64) /* in 64 bytes resolution */ +#define IGC_SRRCTL_DESCTYPE_MASK GENMASK(27, 25) +#define IGC_SRRCTL_DESCTYPE_ADV_ONEBUF FIELD_PREP(IGC_SRRCTL_DESCTYPE_MASK, 1) #endif /* _IGC_BASE_H */ diff --git a/drivers/net/ethernet/intel/igc/igc_main.c b/drivers/net/ethernet/intel/igc/igc_main.c index a47dce10d3a7..a8c24a1c12b4 100644 --- a/drivers/net/ethernet/intel/igc/igc_main.c +++ b/drivers/net/ethernet/intel/igc/igc_main.c @@ -674,8 +674,11 @@ static void igc_configure_rx_ring(struct igc_adapter *adapter, else buf_size = IGC_RXBUFFER_2048; - srrctl = IGC_RX_HDR_LEN << IGC_SRRCTL_BSIZEHDRSIZE_SHIFT; - srrctl |= buf_size >> IGC_SRRCTL_BSIZEPKT_SHIFT; + srrctl = rd32(IGC_SRRCTL(reg_idx)); + srrctl &= ~(IGC_SRRCTL_BSIZEPKT_MASK | IGC_SRRCTL_BSIZEHDR_MASK | + IGC_SRRCTL_DESCTYPE_MASK); + srrctl |= IGC_SRRCTL_BSIZEHDR(IGC_RX_HDR_LEN); + srrctl |= IGC_SRRCTL_BSIZEPKT(buf_size); srrctl |= IGC_SRRCTL_DESCTYPE_ADV_ONEBUF; wr32(IGC_SRRCTL(reg_idx), srrctl); From 4f1beb75f6525516d2bc9efe7792c0a48d459b5f Mon Sep 17 00:00:00 2001 From: Zev Weiss Date: Thu, 23 Feb 2023 16:04:00 -0800 Subject: [PATCH 420/513] ARM: dts: aspeed: asrock: Correct firmware flash SPI clocks [ Upstream commit 9dedb724446913ea7b1591b4b3d2e3e909090980 ] While I'm not aware of any problems that have occurred running these at 100 MHz, the official word from ASRock is that 50 MHz is the correct speed to use, so let's be safe and use that instead. Signed-off-by: Zev Weiss Cc: stable@vger.kernel.org Fixes: 2b81613ce417 ("ARM: dts: aspeed: Add ASRock E3C246D4I BMC") Fixes: a9a3d60b937a ("ARM: dts: aspeed: Add ASRock ROMED8HM3 BMC") Link: https://lore.kernel.org/r/20230224000400.12226-4-zev@bewilderbeest.net Signed-off-by: Joel Stanley Signed-off-by: Sasha Levin --- arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts index 9b4cf5ebe6d5..c62aff908ab4 100644 --- a/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts +++ b/arch/arm/boot/dts/aspeed-bmc-asrock-e3c246d4i.dts @@ -63,7 +63,7 @@ status = "okay"; m25p,fast-read; label = "bmc"; - spi-max-frequency = <100000000>; /* 100 MHz */ + spi-max-frequency = <50000000>; /* 50 MHz */ #include "openbmc-flash-layout.dtsi" }; }; From 99c444d3c3c43db354b253d9bfac081073dcb484 Mon Sep 17 00:00:00 2001 From: hersen wu Date: Tue, 15 Nov 2022 10:39:55 -0500 Subject: [PATCH 421/513] drm/amd/display: save restore hdcp state when display is unplugged from mst hub [ Upstream commit 82986fd631fa04bcedaefe11a6b3767601cbe84f ] [Why] connector hdcp properties are lost after display is unplgged from mst hub. connector is destroyed with dm_dp_mst_connector_destroy. when display is plugged back, hdcp is not desired and it wouldnt be enabled. [How] save hdcp properties into hdcp_work within amdgpu_dm_atomic_commit_tail. If the same display is plugged back with same display index, its hdcp properties will be retrieved from hdcp_work within dm_dp_mst_get_modes. Acked-by: Aurabindo Pillai Signed-off-by: hersen wu Reviewed-by: Bhawanpreet Lakha Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher Stable-dep-of: cdff36a0217a ("drm/amd/display: fix access hdcp_workqueue assert") Signed-off-by: Sasha Levin --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 24 ++++++++++++++++- .../amd/display/amdgpu_dm/amdgpu_dm_hdcp.h | 14 ++++++++++ .../display/amdgpu_dm/amdgpu_dm_mst_types.c | 26 +++++++++++++++++++ 3 files changed, 63 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7bd38d927b18..41be9606726e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9612,11 +9612,33 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) continue; } - if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) + if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) { + /* when display is unplugged from mst hub, connctor will + * be destroyed within dm_dp_mst_connector_destroy. connector + * hdcp perperties, like type, undesired, desired, enabled, + * will be lost. So, save hdcp properties into hdcp_work within + * amdgpu_dm_atomic_commit_tail. if the same display is + * plugged back with same display index, its hdcp properties + * will be retrieved from hdcp_work within dm_dp_mst_get_modes + */ + + if (aconnector->dc_link && aconnector->dc_sink && + aconnector->dc_link->type == dc_connection_mst_branch) { + struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; + struct hdcp_workqueue *hdcp_w = + &hdcp_work[aconnector->dc_link->link_index]; + + hdcp_w->hdcp_content_type[connector->index] = + new_con_state->hdcp_content_type; + hdcp_w->content_protection[connector->index] = + new_con_state->content_protection; + } + hdcp_update_display( adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, new_con_state->hdcp_content_type, new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED); + } } #endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h index 09294ff122fe..bbbf7d0eff82 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -52,6 +52,20 @@ struct hdcp_workqueue { struct mod_hdcp_link link; enum mod_hdcp_encryption_status encryption_status; + + /* when display is unplugged from mst hub, connctor will be + * destroyed within dm_dp_mst_connector_destroy. connector + * hdcp perperties, like type, undesired, desired, enabled, + * will be lost. So, save hdcp properties into hdcp_work within + * amdgpu_dm_atomic_commit_tail. if the same display is + * plugged back with same display index, its hdcp properties + * will be retrieved from hdcp_work within dm_dp_mst_get_modes + */ + /* un-desired, desired, enabled */ + unsigned int content_protection[AMDGPU_DM_MAX_DISPLAY_INDEX]; + /* hdcp1.x, hdcp2.x */ + unsigned int hdcp_content_type[AMDGPU_DM_MAX_DISPLAY_INDEX]; + uint8_t max_link; uint8_t *srm; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 7a3fee71a867..e1e0be6dd22c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -32,6 +32,10 @@ #include "amdgpu_dm.h" #include "amdgpu_dm_mst_types.h" +#ifdef CONFIG_DRM_AMD_DC_HDCP +#include "amdgpu_dm_hdcp.h" +#endif + #include "dc.h" #include "dm_helpers.h" @@ -315,6 +319,28 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) /* dc_link_add_remote_sink returns a new reference */ aconnector->dc_sink = dc_sink; + /* when display is unplugged from mst hub, connctor will be + * destroyed within dm_dp_mst_connector_destroy. connector + * hdcp perperties, like type, undesired, desired, enabled, + * will be lost. So, save hdcp properties into hdcp_work within + * amdgpu_dm_atomic_commit_tail. if the same display is + * plugged back with same display index, its hdcp properties + * will be retrieved from hdcp_work within dm_dp_mst_get_modes + */ +#ifdef CONFIG_DRM_AMD_DC_HDCP + if (aconnector->dc_sink && connector->state) { + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; + struct hdcp_workqueue *hdcp_w = &hdcp_work[aconnector->dc_link->link_index]; + + connector->state->hdcp_content_type = + hdcp_w->hdcp_content_type[connector->index]; + connector->state->content_protection = + hdcp_w->content_protection[connector->index]; + } +#endif + if (aconnector->dc_sink) { amdgpu_dm_update_freesync_caps( connector, aconnector->edid); From 0ba6c7ba081e4ee55f326d46e56c3f6e3419fea1 Mon Sep 17 00:00:00 2001 From: hersen wu Date: Tue, 15 Nov 2022 14:20:56 -0500 Subject: [PATCH 422/513] drm/amd/display: phase3 mst hdcp for multiple displays [ Upstream commit e8fd3eeb5e8711af39b00642da06474e52f4780c ] [Why] multiple display hdcp are enabled within event_property_validate, event_property_update by looping all displays on mst hub. when one of display on mst hub in unplugged or disabled, hdcp are disabled for all displays on mst hub within hdcp_reset_display by looping all displays of mst link. for displays still active, their encryption status are off. kernel driver will not run hdcp authentication again. therefore, hdcp are not enabled automatically. [How] within is_content_protection_different, check drm_crtc_state changes of all displays on mst hub, if need, triger hdcp_update_display to re-run hdcp authentication. Acked-by: Aurabindo Pillai Signed-off-by: hersen wu Reviewed-by: Bhawanpreet Lakha Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher Stable-dep-of: cdff36a0217a ("drm/amd/display: fix access hdcp_workqueue assert") Signed-off-by: Sasha Levin --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 183 ++++++++++++++---- 1 file changed, 141 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 41be9606726e..65f9e7012f6c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -8566,27 +8566,55 @@ is_scaling_state_different(const struct dm_connector_state *dm_state, } #ifdef CONFIG_DRM_AMD_DC_HDCP -static bool is_content_protection_different(struct drm_connector_state *state, - const struct drm_connector_state *old_state, - const struct drm_connector *connector, struct hdcp_workqueue *hdcp_w) +static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, + struct drm_crtc_state *old_crtc_state, + struct drm_connector_state *new_conn_state, + struct drm_connector_state *old_conn_state, + const struct drm_connector *connector, + struct hdcp_workqueue *hdcp_w) { struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); - /* Handle: Type0/1 change */ - if (old_state->hdcp_content_type != state->hdcp_content_type && - state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { - state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", + connector->index, connector->status, connector->dpms); + pr_debug("[HDCP_DM] state protection old: %x new: %x\n", + old_conn_state->content_protection, new_conn_state->content_protection); + + if (old_crtc_state) + pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + old_crtc_state->enable, + old_crtc_state->active, + old_crtc_state->mode_changed, + old_crtc_state->active_changed, + old_crtc_state->connectors_changed); + + if (new_crtc_state) + pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + new_crtc_state->enable, + new_crtc_state->active, + new_crtc_state->mode_changed, + new_crtc_state->active_changed, + new_crtc_state->connectors_changed); + + /* hdcp content type change */ + if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && + new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); return true; } - /* CP is being re enabled, ignore this - * - * Handles: ENABLED -> DESIRED - */ - if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && - state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { - state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; + /* CP is being re enabled, ignore this */ + if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { + if (new_crtc_state && new_crtc_state->mode_changed) { + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); + return true; + }; + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; + pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); return false; } @@ -8594,9 +8622,9 @@ static bool is_content_protection_different(struct drm_connector_state *state, * * Handles: UNDESIRED -> ENABLED */ - if (old_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && - state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) - state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; /* Stream removed and re-enabled * @@ -8606,10 +8634,12 @@ static bool is_content_protection_different(struct drm_connector_state *state, * * Handles: DESIRED -> DESIRED (Special case) */ - if (!(old_state->crtc && old_state->crtc->enabled) && - state->crtc && state->crtc->enabled && + if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && + new_conn_state->crtc && new_conn_state->crtc->enabled && connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { dm_con_state->update_hdcp = false; + pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", + __func__); return true; } @@ -8621,35 +8651,42 @@ static bool is_content_protection_different(struct drm_connector_state *state, * * Handles: DESIRED -> DESIRED (Special case) */ - if (dm_con_state->update_hdcp && state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && - connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { + if (dm_con_state->update_hdcp && + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && + connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { dm_con_state->update_hdcp = false; + pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", + __func__); return true; } - /* - * Handles: UNDESIRED -> UNDESIRED - * DESIRED -> DESIRED - * ENABLED -> ENABLED - */ - if (old_state->content_protection == state->content_protection) + if (old_conn_state->content_protection == new_conn_state->content_protection) { + if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { + if (new_crtc_state && new_crtc_state->mode_changed) { + pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", + __func__); + return true; + }; + pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", + __func__); + return false; + }; + + pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); return false; + } - /* - * Handles: UNDESIRED -> DESIRED - * DESIRED -> UNDESIRED - * ENABLED -> UNDESIRED - */ - if (state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) + if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { + pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", + __func__); return true; + } - /* - * Handles: DESIRED -> ENABLED - */ + pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); return false; } - #endif + static void remove_stream(struct amdgpu_device *adev, struct amdgpu_crtc *acrtc, struct dc_stream_state *stream) @@ -9592,15 +9629,66 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) } } #ifdef CONFIG_DRM_AMD_DC_HDCP + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); + + if (!connector) + continue; + + pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", + connector->index, connector->status, connector->dpms); + pr_debug("[HDCP_DM] state protection old: %x new: %x\n", + old_con_state->content_protection, new_con_state->content_protection); + + if (aconnector->dc_sink) { + if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && + aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { + pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", + aconnector->dc_sink->edid_caps.display_name); + } + } + + new_crtc_state = NULL; + old_crtc_state = NULL; + + if (acrtc) { + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); + } + + if (old_crtc_state) + pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + old_crtc_state->enable, + old_crtc_state->active, + old_crtc_state->mode_changed, + old_crtc_state->active_changed, + old_crtc_state->connectors_changed); + + if (new_crtc_state) + pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + new_crtc_state->enable, + new_crtc_state->active, + new_crtc_state->mode_changed, + new_crtc_state->active_changed, + new_crtc_state->connectors_changed); + } + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); new_crtc_state = NULL; + old_crtc_state = NULL; - if (acrtc) + if (acrtc) { new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); + } dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); @@ -9612,7 +9700,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) continue; } - if (is_content_protection_different(new_con_state, old_con_state, connector, adev->dm.hdcp_workqueue)) { + if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, + old_con_state, connector, adev->dm.hdcp_workqueue)) { /* when display is unplugged from mst hub, connctor will * be destroyed within dm_dp_mst_connector_destroy. connector * hdcp perperties, like type, undesired, desired, enabled, @@ -9622,6 +9711,11 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) * will be retrieved from hdcp_work within dm_dp_mst_get_modes */ + bool enable_encryption = false; + + if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) + enable_encryption = true; + if (aconnector->dc_link && aconnector->dc_sink && aconnector->dc_link->type == dc_connection_mst_branch) { struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; @@ -9634,11 +9728,16 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) new_con_state->content_protection; } + if (new_crtc_state && new_crtc_state->mode_changed && + new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) + enable_encryption = true; + + DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); + hdcp_update_display( adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, - new_con_state->hdcp_content_type, - new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED); - } + new_con_state->hdcp_content_type, enable_encryption); + } } #endif From d3c82f24ee695e4e181ee420e7e7b77760faeb3c Mon Sep 17 00:00:00 2001 From: Hersen Wu Date: Tue, 28 Mar 2023 10:45:24 -0400 Subject: [PATCH 423/513] drm/amd/display: fix access hdcp_workqueue assert [ Upstream commit cdff36a0217aadf5cbc167893ad1c0da869619cb ] [Why] hdcp are enabled for asics from raven. for old asics which hdcp are not enabled, hdcp_workqueue are null. some access to hdcp work queue are not guarded with pointer check. [How] add hdcp_workqueue pointer check before access workqueue. Reviewed-by: Bhawanpreet Lakha Acked-by: Qingqing Zhuo Signed-off-by: Hersen Wu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher Signed-off-by: Sasha Levin --- .../gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 ++++++ .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 16 ++++++++++------ 2 files changed, 16 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 65f9e7012f6c..4cf33abfb7cc 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -9634,6 +9634,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + if (!adev->dm.hdcp_workqueue) + continue; + pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); if (!connector) @@ -9682,6 +9685,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + if (!adev->dm.hdcp_workqueue) + continue; + new_crtc_state = NULL; old_crtc_state = NULL; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index e1e0be6dd22c..0b58a9386449 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -331,13 +331,17 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (aconnector->dc_sink && connector->state) { struct drm_device *dev = connector->dev; struct amdgpu_device *adev = drm_to_adev(dev); - struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; - struct hdcp_workqueue *hdcp_w = &hdcp_work[aconnector->dc_link->link_index]; - connector->state->hdcp_content_type = - hdcp_w->hdcp_content_type[connector->index]; - connector->state->content_protection = - hdcp_w->content_protection[connector->index]; + if (adev->dm.hdcp_workqueue) { + struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; + struct hdcp_workqueue *hdcp_w = + &hdcp_work[aconnector->dc_link->link_index]; + + connector->state->hdcp_content_type = + hdcp_w->hdcp_content_type[connector->index]; + connector->state->content_protection = + hdcp_w->content_protection[connector->index]; + } } #endif From 7dfb384e76ee72e57c0df198baf0056533238741 Mon Sep 17 00:00:00 2001 From: Wesley Cheng Date: Wed, 17 Aug 2022 11:23:53 -0700 Subject: [PATCH 424/513] usb: dwc3: gadget: Synchronize IRQ between soft connect/disconnect [ Upstream commit 9711c67de7482c81e1daca3548fbc5c9603600e3 ] Ensure that there are no pending events being handled in between soft connect/disconnect transitions. As we are keeping interrupts enabled, and EP0 events are still being serviced, this avoids any stale events from being serviced. Reviewed-by: Thinh Nguyen Signed-off-by: Wesley Cheng Link: https://lore.kernel.org/r/20220817182359.13550-4-quic_wcheng@quicinc.com Signed-off-by: Greg Kroah-Hartman Stable-dep-of: c8540870af4c ("usb: dwc3: gadget: Improve dwc3_gadget_suspend() and dwc3_gadget_resume()") Signed-off-by: Sasha Levin --- drivers/usb/dwc3/gadget.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index d76a4837615d..b2ffc98c9e74 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2540,6 +2540,8 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) return 0; } + synchronize_irq(dwc->irq_gadget); + if (!is_on) { ret = dwc3_gadget_soft_disconnect(dwc); } else { From 2fa487a9466760a4fb6f147aed6219379dabfc2e Mon Sep 17 00:00:00 2001 From: Wesley Cheng Date: Thu, 1 Sep 2022 12:36:22 -0700 Subject: [PATCH 425/513] usb: dwc3: Remove DWC3 locking during gadget suspend/resume [ Upstream commit 5265397f94424eaea596026fd34dc7acf474dcec ] Remove the need for making dwc3_gadget_suspend() and dwc3_gadget_resume() to be called in a spinlock, as dwc3_gadget_run_stop() could potentially take some time to complete. Signed-off-by: Wesley Cheng Link: https://lore.kernel.org/r/20220901193625.8727-3-quic_wcheng@quicinc.com Signed-off-by: Greg Kroah-Hartman Stable-dep-of: c8540870af4c ("usb: dwc3: gadget: Improve dwc3_gadget_suspend() and dwc3_gadget_resume()") Signed-off-by: Sasha Levin --- drivers/usb/dwc3/core.c | 4 ---- drivers/usb/dwc3/gadget.c | 5 +++++ 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index f2e841bc05c7..ac119a88e6c7 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1766,9 +1766,7 @@ static int dwc3_suspend_common(struct dwc3 *dwc, pm_message_t msg) case DWC3_GCTL_PRTCAP_DEVICE: if (pm_runtime_suspended(dwc->dev)) break; - spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_suspend(dwc); - spin_unlock_irqrestore(&dwc->lock, flags); synchronize_irq(dwc->irq_gadget); dwc3_core_exit(dwc); break; @@ -1829,9 +1827,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg) return ret; dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_DEVICE); - spin_lock_irqsave(&dwc->lock, flags); dwc3_gadget_resume(dwc); - spin_unlock_irqrestore(&dwc->lock, flags); break; case DWC3_GCTL_PRTCAP_HOST: if (!PMSG_IS_AUTO(msg)) { diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index b2ffc98c9e74..e0c67a256c21 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -4439,12 +4439,17 @@ void dwc3_gadget_exit(struct dwc3 *dwc) int dwc3_gadget_suspend(struct dwc3 *dwc) { + unsigned long flags; + if (!dwc->gadget_driver) return 0; dwc3_gadget_run_stop(dwc, false, false); + + spin_lock_irqsave(&dwc->lock, flags); dwc3_disconnect_gadget(dwc); __dwc3_gadget_stop(dwc); + spin_unlock_irqrestore(&dwc->lock, flags); return 0; } From d9e004104e45c8c46715b2740725535baf8dfa7c Mon Sep 17 00:00:00 2001 From: Kushagra Verma Date: Tue, 13 Sep 2022 19:56:49 +0530 Subject: [PATCH 426/513] usb: dwc3: Fix typos in gadget.c [ Upstream commit af870d93c706c302a8742d7c751a60a832f7bc64 ] Fixes the following two typos: 1. reinitate -> reinitiate 2. revison -> revision Signed-off-by: Kushagra Verma Link: https://lore.kernel.org/r/HK0PR01MB280110FAB74B4B2ACE32EA5FF8479@HK0PR01MB2801.apcprd01.prod.exchangelabs.com Signed-off-by: Greg Kroah-Hartman Stable-dep-of: c8540870af4c ("usb: dwc3: gadget: Improve dwc3_gadget_suspend() and dwc3_gadget_resume()") Signed-off-by: Sasha Levin --- drivers/usb/dwc3/gadget.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index e0c67a256c21..2d5a4d0e63c6 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -3531,7 +3531,7 @@ static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep, * streams are updated, and the device controller will not be * triggered to generate ERDY to move the next stream data. To * workaround this and maintain compatibility with various - * hosts, force to reinitate the stream until the host is ready + * hosts, force to reinitiate the stream until the host is ready * instead of waiting for the host to prime the endpoint. */ if (DWC3_VER_IS_WITHIN(DWC32, 100A, ANY)) { @@ -4059,7 +4059,7 @@ static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, unsigned int is_ss = evtinfo & BIT(4); /* - * WORKAROUND: DWC3 revison 2.20a with hibernation support + * WORKAROUND: DWC3 revision 2.20a with hibernation support * have a known issue which can cause USB CV TD.9.23 to fail * randomly. * From 31a0e60fe16bc0c923aca5bd90f437046544be23 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 4 Apr 2023 09:25:17 +0200 Subject: [PATCH 427/513] USB: dwc3: gadget: drop dead hibernation code [ Upstream commit bdb19d01026a5cccfa437be8adcf2df472c5889e ] The hibernation code is broken and has never been enabled in mainline and should thus be dropped. Remove the hibernation bits from the gadget code, which effectively reverts commits e1dadd3b0f27 ("usb: dwc3: workaround: bogus hibernation events") and 7b2a0368bbc9 ("usb: dwc3: gadget: set KEEP_CONNECT in case of hibernation") except for the spurious interrupt warning. Acked-by: Thinh Nguyen Signed-off-by: Johan Hovold Link: https://lore.kernel.org/r/20230404072524.19014-5-johan+linaro@kernel.org Signed-off-by: Greg Kroah-Hartman Stable-dep-of: c8540870af4c ("usb: dwc3: gadget: Improve dwc3_gadget_suspend() and dwc3_gadget_resume()") Signed-off-by: Sasha Levin --- drivers/usb/dwc3/gadget.c | 46 +++++---------------------------------- 1 file changed, 6 insertions(+), 40 deletions(-) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index 2d5a4d0e63c6..a1be110f7ced 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2412,7 +2412,7 @@ static void __dwc3_gadget_set_speed(struct dwc3 *dwc) dwc3_writel(dwc->regs, DWC3_DCFG, reg); } -static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) +static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on) { u32 reg; u32 timeout = 500; @@ -2431,17 +2431,11 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) reg &= ~DWC3_DCTL_KEEP_CONNECT; reg |= DWC3_DCTL_RUN_STOP; - if (dwc->has_hibernation) - reg |= DWC3_DCTL_KEEP_CONNECT; - __dwc3_gadget_set_speed(dwc); dwc->pullups_connected = true; } else { reg &= ~DWC3_DCTL_RUN_STOP; - if (dwc->has_hibernation && !suspend) - reg &= ~DWC3_DCTL_KEEP_CONNECT; - dwc->pullups_connected = false; } @@ -2487,7 +2481,7 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc) * remaining event generated by the controller while polling for * DSTS.DEVCTLHLT. */ - return dwc3_gadget_run_stop(dwc, false, false); + return dwc3_gadget_run_stop(dwc, false); } static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) @@ -2555,7 +2549,7 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) dwc3_event_buffers_setup(dwc); __dwc3_gadget_start(dwc); - ret = dwc3_gadget_run_stop(dwc, true, false); + ret = dwc3_gadget_run_stop(dwc, true); } pm_runtime_put(dwc->dev); @@ -4053,30 +4047,6 @@ static void dwc3_gadget_suspend_interrupt(struct dwc3 *dwc, dwc->link_state = next; } -static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc, - unsigned int evtinfo) -{ - unsigned int is_ss = evtinfo & BIT(4); - - /* - * WORKAROUND: DWC3 revision 2.20a with hibernation support - * have a known issue which can cause USB CV TD.9.23 to fail - * randomly. - * - * Because of this issue, core could generate bogus hibernation - * events which SW needs to ignore. - * - * Refers to: - * - * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0 - * Device Fallback from SuperSpeed - */ - if (is_ss ^ (dwc->speed == USB_SPEED_SUPER)) - return; - - /* enter hibernation here */ -} - static void dwc3_gadget_interrupt(struct dwc3 *dwc, const struct dwc3_event_devt *event) { @@ -4094,11 +4064,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc, dwc3_gadget_wakeup_interrupt(dwc); break; case DWC3_DEVICE_EVENT_HIBER_REQ: - if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation, - "unexpected hibernation event\n")) - break; - - dwc3_gadget_hibernation_interrupt(dwc, event->event_info); + dev_WARN_ONCE(dwc->dev, true, "unexpected hibernation event\n"); break; case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE: dwc3_gadget_linksts_change_interrupt(dwc, event->event_info); @@ -4444,7 +4410,7 @@ int dwc3_gadget_suspend(struct dwc3 *dwc) if (!dwc->gadget_driver) return 0; - dwc3_gadget_run_stop(dwc, false, false); + dwc3_gadget_run_stop(dwc, false); spin_lock_irqsave(&dwc->lock, flags); dwc3_disconnect_gadget(dwc); @@ -4465,7 +4431,7 @@ int dwc3_gadget_resume(struct dwc3 *dwc) if (ret < 0) goto err0; - ret = dwc3_gadget_run_stop(dwc, true, false); + ret = dwc3_gadget_run_stop(dwc, true); if (ret < 0) goto err1; From 20351ddb1f41cfb3ae20e105425ef43a28393d76 Mon Sep 17 00:00:00 2001 From: Roger Quadros Date: Wed, 3 May 2023 14:00:48 +0300 Subject: [PATCH 428/513] usb: dwc3: gadget: Improve dwc3_gadget_suspend() and dwc3_gadget_resume() [ Upstream commit c8540870af4ce6ddeb27a7bb5498b75fb29b643c ] Prevent -ETIMEDOUT error on .suspend(). e.g. If gadget driver is loaded and we are connected to a USB host, all transfers must be stopped before stopping the controller else we will not get a clean stop i.e. dwc3_gadget_run_stop() will take several seconds to complete and will return -ETIMEDOUT. Handle error cases properly in dwc3_gadget_suspend(). Simplify dwc3_gadget_resume() by using the introduced helper function. Fixes: 9f8a67b65a49 ("usb: dwc3: gadget: fix gadget suspend/resume") Cc: stable@vger.kernel.org Suggested-by: Thinh Nguyen Signed-off-by: Roger Quadros Acked-by: Thinh Nguyen Link: https://lore.kernel.org/r/20230503110048.30617-1-rogerq@kernel.org Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/usb/dwc3/gadget.c | 67 ++++++++++++++++++++------------------- 1 file changed, 34 insertions(+), 33 deletions(-) diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index a1be110f7ced..8ada601901cf 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -2484,6 +2484,21 @@ static int dwc3_gadget_soft_disconnect(struct dwc3 *dwc) return dwc3_gadget_run_stop(dwc, false); } +static int dwc3_gadget_soft_connect(struct dwc3 *dwc) +{ + /* + * In the Synopsys DWC_usb31 1.90a programming guide section + * 4.1.9, it specifies that for a reconnect after a + * device-initiated disconnect requires a core soft reset + * (DCTL.CSftRst) before enabling the run/stop bit. + */ + dwc3_core_soft_reset(dwc); + + dwc3_event_buffers_setup(dwc); + __dwc3_gadget_start(dwc); + return dwc3_gadget_run_stop(dwc, true); +} + static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) { struct dwc3 *dwc = gadget_to_dwc(g); @@ -2536,21 +2551,10 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) synchronize_irq(dwc->irq_gadget); - if (!is_on) { + if (!is_on) ret = dwc3_gadget_soft_disconnect(dwc); - } else { - /* - * In the Synopsys DWC_usb31 1.90a programming guide section - * 4.1.9, it specifies that for a reconnect after a - * device-initiated disconnect requires a core soft reset - * (DCTL.CSftRst) before enabling the run/stop bit. - */ - dwc3_core_soft_reset(dwc); - - dwc3_event_buffers_setup(dwc); - __dwc3_gadget_start(dwc); - ret = dwc3_gadget_run_stop(dwc, true); - } + else + ret = dwc3_gadget_soft_connect(dwc); pm_runtime_put(dwc->dev); @@ -4406,42 +4410,39 @@ void dwc3_gadget_exit(struct dwc3 *dwc) int dwc3_gadget_suspend(struct dwc3 *dwc) { unsigned long flags; + int ret; if (!dwc->gadget_driver) return 0; - dwc3_gadget_run_stop(dwc, false); + ret = dwc3_gadget_soft_disconnect(dwc); + if (ret) + goto err; spin_lock_irqsave(&dwc->lock, flags); dwc3_disconnect_gadget(dwc); - __dwc3_gadget_stop(dwc); spin_unlock_irqrestore(&dwc->lock, flags); return 0; + +err: + /* + * Attempt to reset the controller's state. Likely no + * communication can be established until the host + * performs a port reset. + */ + if (dwc->softconnect) + dwc3_gadget_soft_connect(dwc); + + return ret; } int dwc3_gadget_resume(struct dwc3 *dwc) { - int ret; - if (!dwc->gadget_driver || !dwc->softconnect) return 0; - ret = __dwc3_gadget_start(dwc); - if (ret < 0) - goto err0; - - ret = dwc3_gadget_run_stop(dwc, true); - if (ret < 0) - goto err1; - - return 0; - -err1: - __dwc3_gadget_stop(dwc); - -err0: - return ret; + return dwc3_gadget_soft_connect(dwc); } void dwc3_gadget_process_pending_events(struct dwc3 *dwc) From fa254ab7d02d52271187eccfea82f8b17771a78a Mon Sep 17 00:00:00 2001 From: Jesse Taube Date: Wed, 15 Dec 2021 17:05:36 -0500 Subject: [PATCH 429/513] tty: serial: fsl_lpuart: Add i.MXRT1050 support [ Upstream commit 443df57b31d14a920f23eaa265f4cb0dc3f94823 ] Add support for i.MXRT1050's uart. Cc: Giulio Benetti Signed-off-by: Jesse Taube Link: https://lore.kernel.org/r/20211215220538.4180616-8-Mr.Bossman075@gmail.com Signed-off-by: Greg Kroah-Hartman Stable-dep-of: a82c3df955f8 ("tty: serial: fsl_lpuart: reduce RX watermark to 0 on LS1028A") Signed-off-by: Sasha Levin --- drivers/tty/serial/fsl_lpuart.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index b0b27808c7c3..bf709ea93ec9 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -246,6 +246,7 @@ enum lpuart_type { LS1028A_LPUART, IMX7ULP_LPUART, IMX8QXP_LPUART, + IMXRT1050_LPUART, }; struct lpuart_port { @@ -308,6 +309,11 @@ static struct lpuart_soc_data imx8qxp_data = { .iotype = UPIO_MEM32, .reg_off = IMX_REG_OFF, }; +static struct lpuart_soc_data imxrt1050_data = { + .devtype = IMXRT1050_LPUART, + .iotype = UPIO_MEM32, + .reg_off = IMX_REG_OFF, +}; static const struct of_device_id lpuart_dt_ids[] = { { .compatible = "fsl,vf610-lpuart", .data = &vf_data, }, @@ -315,6 +321,7 @@ static const struct of_device_id lpuart_dt_ids[] = { { .compatible = "fsl,ls1028a-lpuart", .data = &ls1028a_data, }, { .compatible = "fsl,imx7ulp-lpuart", .data = &imx7ulp_data, }, { .compatible = "fsl,imx8qxp-lpuart", .data = &imx8qxp_data, }, + { .compatible = "fsl,imxrt1050-lpuart", .data = &imxrt1050_data}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, lpuart_dt_ids); @@ -2634,6 +2641,7 @@ OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1028a-lpuart", ls1028a_early_console_setup) OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup); OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8ulp-lpuart", lpuart32_imx_early_console_setup); OF_EARLYCON_DECLARE(lpuart32, "fsl,imx8qxp-lpuart", lpuart32_imx_early_console_setup); +OF_EARLYCON_DECLARE(lpuart32, "fsl,imxrt1050-lpuart", lpuart32_imx_early_console_setup); EARLYCON_DECLARE(lpuart, lpuart_early_console_setup); EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup); From 701bb5fee7c3e1a22793ce872d81fbf56440756c Mon Sep 17 00:00:00 2001 From: Sherry Sun Date: Mon, 30 Jan 2023 14:44:44 +0800 Subject: [PATCH 430/513] tty: serial: fsl_lpuart: make rx_watermark configurable for different platforms [ Upstream commit 34ebb26f12a84b744f43c5c4869516f122a2dfaa ] Add rx_watermark parameter for struct lpuart_port to make the receive watermark configurable for different platforms. No function changed. Signed-off-by: Sherry Sun Link: https://lore.kernel.org/r/20230130064449.9564-2-sherry.sun@nxp.com Signed-off-by: Greg Kroah-Hartman Stable-dep-of: a82c3df955f8 ("tty: serial: fsl_lpuart: reduce RX watermark to 0 on LS1028A") Signed-off-by: Sasha Levin --- drivers/tty/serial/fsl_lpuart.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index bf709ea93ec9..380d9237989b 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -257,6 +257,7 @@ struct lpuart_port { unsigned int txfifo_size; unsigned int rxfifo_size; + u8 rx_watermark; bool lpuart_dma_tx_use; bool lpuart_dma_rx_use; struct dma_chan *dma_tx_chan; @@ -281,38 +282,45 @@ struct lpuart_soc_data { enum lpuart_type devtype; char iotype; u8 reg_off; + u8 rx_watermark; }; static const struct lpuart_soc_data vf_data = { .devtype = VF610_LPUART, .iotype = UPIO_MEM, + .rx_watermark = 1, }; static const struct lpuart_soc_data ls1021a_data = { .devtype = LS1021A_LPUART, .iotype = UPIO_MEM32BE, + .rx_watermark = 1, }; static const struct lpuart_soc_data ls1028a_data = { .devtype = LS1028A_LPUART, .iotype = UPIO_MEM32, + .rx_watermark = 1, }; static struct lpuart_soc_data imx7ulp_data = { .devtype = IMX7ULP_LPUART, .iotype = UPIO_MEM32, .reg_off = IMX_REG_OFF, + .rx_watermark = 1, }; static struct lpuart_soc_data imx8qxp_data = { .devtype = IMX8QXP_LPUART, .iotype = UPIO_MEM32, .reg_off = IMX_REG_OFF, + .rx_watermark = 1, }; static struct lpuart_soc_data imxrt1050_data = { .devtype = IMXRT1050_LPUART, .iotype = UPIO_MEM32, .reg_off = IMX_REG_OFF, + .rx_watermark = 1, }; static const struct of_device_id lpuart_dt_ids[] = { @@ -1556,7 +1564,7 @@ static void lpuart_setup_watermark(struct lpuart_port *sport) } writeb(0, sport->port.membase + UARTTWFIFO); - writeb(1, sport->port.membase + UARTRWFIFO); + writeb(sport->rx_watermark, sport->port.membase + UARTRWFIFO); /* Restore cr2 */ writeb(cr2_saved, sport->port.membase + UARTCR2); @@ -1591,7 +1599,8 @@ static void lpuart32_setup_watermark(struct lpuart_port *sport) lpuart32_write(&sport->port, val, UARTFIFO); /* set the watermark */ - val = (0x1 << UARTWATER_RXWATER_OFF) | (0x0 << UARTWATER_TXWATER_OFF); + val = (sport->rx_watermark << UARTWATER_RXWATER_OFF) | + (0x0 << UARTWATER_TXWATER_OFF); lpuart32_write(&sport->port, val, UARTWATER); /* Restore cr2 */ @@ -2736,6 +2745,7 @@ static int lpuart_probe(struct platform_device *pdev) sport->port.dev = &pdev->dev; sport->port.type = PORT_LPUART; sport->devtype = sdata->devtype; + sport->rx_watermark = sdata->rx_watermark; ret = platform_get_irq(pdev, 0); if (ret < 0) return ret; From 8a6b7534020de17676066aecf53b0b2387e2151e Mon Sep 17 00:00:00 2001 From: Robert Hodaszi Date: Fri, 9 Jun 2023 14:13:34 +0200 Subject: [PATCH 431/513] tty: serial: fsl_lpuart: reduce RX watermark to 0 on LS1028A [ Upstream commit a82c3df955f8c1c726e4976527aa6ae924a67dd9 ] LS1028A is using DMA with LPUART. Having RX watermark set to 1, means DMA transactions are started only after receiving the second character. On other platforms with newer LPUART IP, Receiver Idle Empty function initiates the DMA request after the receiver is idling for 4 characters. But this feature is missing on LS1028A, which is causing a 1-character delay in the RX direction on this platform. Set RX watermark to 0 to initiate RX DMA after each character. Link: https://lore.kernel.org/linux-serial/20230607103459.1222426-1-robert.hodaszi@digi.com/ Fixes: 9ad9df844754 ("tty: serial: fsl_lpuart: Fix the wrong RXWATER setting for rx dma case") Cc: stable Signed-off-by: Robert Hodaszi Message-ID: <20230609121334.1878626-1-robert.hodaszi@digi.com> Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/tty/serial/fsl_lpuart.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 380d9237989b..74b445fb065b 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -300,7 +300,7 @@ static const struct lpuart_soc_data ls1021a_data = { static const struct lpuart_soc_data ls1028a_data = { .devtype = LS1028A_LPUART, .iotype = UPIO_MEM32, - .rx_watermark = 1, + .rx_watermark = 0, }; static struct lpuart_soc_data imx7ulp_data = { From a78fe5c9d8d52cb9ad2c88e60a8603c29382fc5b Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 7 Jun 2023 12:05:39 +0200 Subject: [PATCH 432/513] USB: dwc3: qcom: fix NULL-deref on suspend [ Upstream commit d2d69354226de0b333d4405981f3d9c41ba8430a ] The Qualcomm dwc3 glue driver is currently accessing the driver data of the child core device during suspend and on wakeup interrupts. This is clearly a bad idea as the child may not have probed yet or could have been unbound from its driver. The first such layering violation was part of the initial version of the driver, but this was later made worse when the hack that accesses the driver data of the grand child xhci device to configure the wakeup interrupts was added. Fixing this properly is not that easily done, so add a sanity check to make sure that the child driver data is non-NULL before dereferencing it for now. Note that this relies on subtleties like the fact that driver core is making sure that the parent is not suspended while the child is probing. Reported-by: Manivannan Sadhasivam Link: https://lore.kernel.org/all/20230325165217.31069-4-manivannan.sadhasivam@linaro.org/ Fixes: d9152161b4bf ("usb: dwc3: Add Qualcomm DWC3 glue layer driver") Fixes: 6895ea55c385 ("usb: dwc3: qcom: Configure wakeup interrupts during suspend") Cc: stable@vger.kernel.org # 3.18: a872ab303d5d: "usb: dwc3: qcom: fix use-after-free on runtime-PM wakeup" Cc: Sandeep Maheswaram Cc: Krishna Kurapati Signed-off-by: Johan Hovold Acked-by: Thinh Nguyen Reviewed-by: Manivannan Sadhasivam Message-ID: <20230607100540.31045-2-johan+linaro@kernel.org> Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/usb/dwc3/dwc3-qcom.c | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index 73c0c381e5d0..0180350a2c95 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -306,7 +306,16 @@ static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom) /* Only usable in contexts where the role can not change. */ static bool dwc3_qcom_is_host(struct dwc3_qcom *qcom) { - struct dwc3 *dwc = platform_get_drvdata(qcom->dwc3); + struct dwc3 *dwc; + + /* + * FIXME: Fix this layering violation. + */ + dwc = platform_get_drvdata(qcom->dwc3); + + /* Core driver may not have probed yet. */ + if (!dwc) + return false; return dwc->xhci; } From 4eeba5d476fe224785838e99b20a72f0c4caa6c8 Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Wed, 7 Jun 2023 12:05:40 +0200 Subject: [PATCH 433/513] USB: dwc3: fix use-after-free on core driver unbind [ Upstream commit e3dbb657571509044be15184a13134fa7c1fdca1 ] Some dwc3 glue drivers are currently accessing the driver data of the child core device directly, which is clearly a bad idea as the child may not have probed yet or may have been unbound from its driver. As a workaround until the glue drivers have been fixed, clear the driver data pointer before allowing the glue parent device to runtime suspend to prevent its driver from accessing data that has been freed during unbind. Fixes: 6dd2565989b4 ("usb: dwc3: add imx8mp dwc3 glue layer driver") Fixes: 6895ea55c385 ("usb: dwc3: qcom: Configure wakeup interrupts during suspend") Cc: stable@vger.kernel.org # 5.12 Cc: Li Jun Cc: Sandeep Maheswaram Cc: Krishna Kurapati Signed-off-by: Johan Hovold Acked-by: Thinh Nguyen Reviewed-by: Manivannan Sadhasivam Message-ID: <20230607100540.31045-3-johan+linaro@kernel.org> Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/usb/dwc3/core.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index ac119a88e6c7..6377b9cf81a5 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -1719,6 +1719,11 @@ static int dwc3_remove(struct platform_device *pdev) pm_runtime_allow(&pdev->dev); pm_runtime_disable(&pdev->dev); pm_runtime_put_noidle(&pdev->dev); + /* + * HACK: Clear the driver data, which is currently accessed by parent + * glue drivers, before allowing the parent to suspend. + */ + platform_set_drvdata(pdev, NULL); pm_runtime_set_suspended(&pdev->dev); dwc3_free_event_buffers(dwc); From f93e8c5a7bd9fa4df7f6e71d2624b914f8e234a4 Mon Sep 17 00:00:00 2001 From: Sergey Shtylyov Date: Sat, 17 Jun 2023 23:36:11 +0300 Subject: [PATCH 434/513] mmc: bcm2835: fix deferred probing [ Upstream commit 71150ac12558bcd9d75e6e24cf7c872c2efd80f3 ] The driver overrides the error codes and IRQ0 returned by platform_get_irq() to -EINVAL, so if it returns -EPROBE_DEFER, the driver will fail the probe permanently instead of the deferred probing. Switch to propagating the error codes upstream. Since commit ce753ad1549c ("platform: finally disallow IRQ0 in platform_get_irq() and its ilk") IRQ0 is no longer returned by those APIs, so we now can safely ignore it... Fixes: 660fc733bd74 ("mmc: bcm2835: Add new driver for the sdhost controller.") Cc: stable@vger.kernel.org # v5.19+ Signed-off-by: Sergey Shtylyov Link: https://lore.kernel.org/r/20230617203622.6812-2-s.shtylyov@omp.ru Signed-off-by: Ulf Hansson Signed-off-by: Sasha Levin --- drivers/mmc/host/bcm2835.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/host/bcm2835.c b/drivers/mmc/host/bcm2835.c index 8c2361e66277..985079943be7 100644 --- a/drivers/mmc/host/bcm2835.c +++ b/drivers/mmc/host/bcm2835.c @@ -1413,8 +1413,8 @@ static int bcm2835_probe(struct platform_device *pdev) host->max_clk = clk_get_rate(clk); host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) { - ret = -EINVAL; + if (host->irq < 0) { + ret = host->irq; goto err; } From e2ff5cf681a8e28a71c11ca7a0c7b9b150b441b2 Mon Sep 17 00:00:00 2001 From: Sergey Shtylyov Date: Sat, 17 Jun 2023 23:36:21 +0300 Subject: [PATCH 435/513] mmc: sunxi: fix deferred probing [ Upstream commit c2df53c5806cfd746dae08e07bc8c4ad247c3b70 ] The driver overrides the error codes and IRQ0 returned by platform_get_irq() to -EINVAL, so if it returns -EPROBE_DEFER, the driver will fail the probe permanently instead of the deferred probing. Switch to propagating the error codes upstream. Since commit ce753ad1549c ("platform: finally disallow IRQ0 in platform_get_irq() and its ilk") IRQ0 is no longer returned by those APIs, so we now can safely ignore it... Fixes: 2408a08583d2 ("mmc: sunxi-mmc: Handle return value of platform_get_irq") Cc: stable@vger.kernel.org # v5.19+ Signed-off-by: Sergey Shtylyov Reviewed-by: Jernej Skrabec Link: https://lore.kernel.org/r/20230617203622.6812-12-s.shtylyov@omp.ru Signed-off-by: Ulf Hansson Signed-off-by: Sasha Levin --- drivers/mmc/host/sunxi-mmc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c index 032f2c03e8fb..3c213816db78 100644 --- a/drivers/mmc/host/sunxi-mmc.c +++ b/drivers/mmc/host/sunxi-mmc.c @@ -1341,8 +1341,8 @@ static int sunxi_mmc_resource_request(struct sunxi_mmc_host *host, return ret; host->irq = platform_get_irq(pdev, 0); - if (host->irq <= 0) { - ret = -EINVAL; + if (host->irq < 0) { + ret = host->irq; goto error_disable_mmc; } From 4ffbfe1c980f097ce5be97b0def096c24c6d14dd Mon Sep 17 00:00:00 2001 From: Andreas Kemnade Date: Fri, 24 Sep 2021 11:14:37 +0200 Subject: [PATCH 436/513] ARM: dts: imx6sll: fixup of operating points [ Upstream commit 1875903019ea6e32e6e544c1631b119e4fd60b20 ] Make operating point definitions comply with binding specifications. Signed-off-by: Andreas Kemnade Reviewed-by: Krzysztof Kozlowski Signed-off-by: Shawn Guo Stable-dep-of: ee70b908f77a ("ARM: dts: nxp/imx6sll: fix wrong property name in usbphy node") Signed-off-by: Sasha Levin --- arch/arm/boot/dts/imx6sll.dtsi | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi index eecb2f68a1c3..2873369a57c0 100644 --- a/arch/arm/boot/dts/imx6sll.dtsi +++ b/arch/arm/boot/dts/imx6sll.dtsi @@ -51,20 +51,18 @@ device_type = "cpu"; reg = <0>; next-level-cache = <&L2>; - operating-points = < + operating-points = /* kHz uV */ - 996000 1275000 - 792000 1175000 - 396000 1075000 - 198000 975000 - >; - fsl,soc-operating-points = < + <996000 1275000>, + <792000 1175000>, + <396000 1075000>, + <198000 975000>; + fsl,soc-operating-points = /* ARM kHz SOC-PU uV */ - 996000 1175000 - 792000 1175000 - 396000 1175000 - 198000 1175000 - >; + <996000 1175000>, + <792000 1175000>, + <396000 1175000>, + <198000 1175000>; clock-latency = <61036>; /* two CLK32 periods */ #cooling-cells = <2>; clocks = <&clks IMX6SLL_CLK_ARM>, From 170773563ef6c02acd0048ff1290b9f40d349e62 Mon Sep 17 00:00:00 2001 From: Xu Yang Date: Mon, 17 Jul 2023 10:28:33 +0800 Subject: [PATCH 437/513] ARM: dts: nxp/imx6sll: fix wrong property name in usbphy node [ Upstream commit ee70b908f77a9d8f689dea986f09e6d7dc481934 ] Property name "phy-3p0-supply" is used instead of "phy-reg_3p0-supply". Fixes: 9f30b6b1a957 ("ARM: dts: imx: Add basic dtsi file for imx6sll") cc: Signed-off-by: Xu Yang Signed-off-by: Shawn Guo Signed-off-by: Sasha Levin --- arch/arm/boot/dts/imx6sll.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/imx6sll.dtsi b/arch/arm/boot/dts/imx6sll.dtsi index 2873369a57c0..3659fd5ecfa6 100644 --- a/arch/arm/boot/dts/imx6sll.dtsi +++ b/arch/arm/boot/dts/imx6sll.dtsi @@ -552,7 +552,7 @@ reg = <0x020ca000 0x1000>; interrupts = ; clocks = <&clks IMX6SLL_CLK_USBPHY2>; - phy-reg_3p0-supply = <®_3p0>; + phy-3p0-supply = <®_3p0>; fsl,anatop = <&anatop>; }; From edf3b5aadb2515c808200b904baa5b70a727f0ac Mon Sep 17 00:00:00 2001 From: Naohiro Aota Date: Tue, 6 Jun 2023 14:36:34 +0900 Subject: [PATCH 438/513] btrfs: move out now unused BG from the reclaim list [ Upstream commit a9f189716cf15913c453299d72f69c51a9b0f86b ] An unused block group is easy to remove to free up space and should be reclaimed fast. Such block group can often already be a target of the reclaim process. As we check list_empty(&bg->bg_list), we keep it in the reclaim list. That block group is never reclaimed until the file system is filled e.g. up to 75%. Instead, we can move unused block group to the unused list and delete it fast. Fixes: 18bb8bbf13c1 ("btrfs: zoned: automatically reclaim zones") CC: stable@vger.kernel.org # 5.15+ Reviewed-by: Filipe Manana Reviewed-by: Johannes Thumshirn Signed-off-by: Naohiro Aota Signed-off-by: David Sterba Signed-off-by: Sasha Levin --- fs/btrfs/block-group.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index d24cef671c1a..4ca6828586af 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -1475,11 +1475,14 @@ void btrfs_mark_bg_unused(struct btrfs_block_group *bg) { struct btrfs_fs_info *fs_info = bg->fs_info; + trace_btrfs_add_unused_block_group(bg); spin_lock(&fs_info->unused_bgs_lock); if (list_empty(&bg->bg_list)) { btrfs_get_block_group(bg); - trace_btrfs_add_unused_block_group(bg); list_add_tail(&bg->bg_list, &fs_info->unused_bgs); + } else { + /* Pull out the block group from the reclaim_bgs list. */ + list_move_tail(&bg->bg_list, &fs_info->unused_bgs); } spin_unlock(&fs_info->unused_bgs_lock); } From 5b7d5c2dd664eb8b9a06ecbc06e28d39359c422e Mon Sep 17 00:00:00 2001 From: Wolfram Sang Date: Thu, 29 Jun 2023 14:05:26 +0200 Subject: [PATCH 439/513] virtio-mmio: don't break lifecycle of vm_dev [ Upstream commit 55c91fedd03d7b9cf0c5199b2eb12b9b8e95281a ] vm_dev has a separate lifecycle because it has a 'struct device' embedded. Thus, having a release callback for it is correct. Allocating the vm_dev struct with devres totally breaks this protection, though. Instead of waiting for the vm_dev release callback, the memory is freed when the platform_device is removed. Resulting in a use-after-free when finally the callback is to be called. To easily see the problem, compile the kernel with CONFIG_DEBUG_KOBJECT_RELEASE and unbind with sysfs. The fix is easy, don't use devres in this case. Found during my research about object lifetime problems. Fixes: 7eb781b1bbb7 ("virtio_mmio: add cleanup for virtio_mmio_probe") Signed-off-by: Wolfram Sang Message-Id: <20230629120526.7184-1-wsa+renesas@sang-engineering.com> Signed-off-by: Michael S. Tsirkin Signed-off-by: Sasha Levin --- drivers/virtio/virtio_mmio.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c index fe696aafaed8..f4d43d60d710 100644 --- a/drivers/virtio/virtio_mmio.c +++ b/drivers/virtio/virtio_mmio.c @@ -572,9 +572,8 @@ static void virtio_mmio_release_dev(struct device *_d) struct virtio_device *vdev = container_of(_d, struct virtio_device, dev); struct virtio_mmio_device *vm_dev = to_virtio_mmio_device(vdev); - struct platform_device *pdev = vm_dev->pdev; - devm_kfree(&pdev->dev, vm_dev); + kfree(vm_dev); } /* Platform device */ @@ -585,7 +584,7 @@ static int virtio_mmio_probe(struct platform_device *pdev) unsigned long magic; int rc; - vm_dev = devm_kzalloc(&pdev->dev, sizeof(*vm_dev), GFP_KERNEL); + vm_dev = kzalloc(sizeof(*vm_dev), GFP_KERNEL); if (!vm_dev) return -ENOMEM; From a5ae5a81bc191d82d27bafff554560f7fd8d8c4d Mon Sep 17 00:00:00 2001 From: Maxime Coquelin Date: Wed, 5 Jul 2023 13:45:05 +0200 Subject: [PATCH 440/513] vduse: Use proper spinlock for IRQ injection [ Upstream commit 7ca26efb09a1543fddb29308ea3b63b66cb5d3ee ] The IRQ injection work used spin_lock_irq() to protect the scheduling of the softirq, but spin_lock_bh() should be used. With spin_lock_irq(), we noticed delay of more than 6 seconds between the time a NAPI polling work is scheduled and the time it is executed. Fixes: c8a6153b6c59 ("vduse: Introduce VDUSE - vDPA Device in Userspace") Cc: xieyongji@bytedance.com Suggested-by: Jason Wang Signed-off-by: Maxime Coquelin Message-Id: <20230705114505.63274-1-maxime.coquelin@redhat.com> Signed-off-by: Michael S. Tsirkin Acked-by: Jason Wang Reviewed-by: Xie Yongji Signed-off-by: Sasha Levin --- drivers/vdpa/vdpa_user/vduse_dev.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/vdpa/vdpa_user/vduse_dev.c b/drivers/vdpa/vdpa_user/vduse_dev.c index 30ae4237f3dd..564864f039d2 100644 --- a/drivers/vdpa/vdpa_user/vduse_dev.c +++ b/drivers/vdpa/vdpa_user/vduse_dev.c @@ -879,10 +879,10 @@ static void vduse_dev_irq_inject(struct work_struct *work) { struct vduse_dev *dev = container_of(work, struct vduse_dev, inject); - spin_lock_irq(&dev->irq_lock); + spin_lock_bh(&dev->irq_lock); if (dev->config_cb.callback) dev->config_cb.callback(dev->config_cb.private); - spin_unlock_irq(&dev->irq_lock); + spin_unlock_bh(&dev->irq_lock); } static void vduse_vq_irq_inject(struct work_struct *work) @@ -890,10 +890,10 @@ static void vduse_vq_irq_inject(struct work_struct *work) struct vduse_virtqueue *vq = container_of(work, struct vduse_virtqueue, inject); - spin_lock_irq(&vq->irq_lock); + spin_lock_bh(&vq->irq_lock); if (vq->ready && vq->cb.callback) vq->cb.callback(vq->cb.private); - spin_unlock_irq(&vq->irq_lock); + spin_unlock_bh(&vq->irq_lock); } static int vduse_dev_queue_irq_work(struct vduse_dev *dev, From b99f490ea87ebcca3a429fd8837067feb56a4c7c Mon Sep 17 00:00:00 2001 From: Steve French Date: Thu, 10 Aug 2023 15:34:21 -0500 Subject: [PATCH 441/513] cifs: fix potential oops in cifs_oplock_break [ Upstream commit e8f5f849ffce24490eb9449e98312b66c0dba76f ] With deferred close we can have closes that race with lease breaks, and so with the current checks for whether to send the lease response, oplock_response(), this can mean that an unmount (kill_sb) can occur just before we were checking if the tcon->ses is valid. See below: [Fri Aug 4 04:12:50 2023] RIP: 0010:cifs_oplock_break+0x1f7/0x5b0 [cifs] [Fri Aug 4 04:12:50 2023] Code: 7d a8 48 8b 7d c0 c0 e9 02 48 89 45 b8 41 89 cf e8 3e f5 ff ff 4c 89 f7 41 83 e7 01 e8 82 b3 03 f2 49 8b 45 50 48 85 c0 74 5e <48> 83 78 60 00 74 57 45 84 ff 75 52 48 8b 43 98 48 83 eb 68 48 39 [Fri Aug 4 04:12:50 2023] RSP: 0018:ffffb30607ddbdf8 EFLAGS: 00010206 [Fri Aug 4 04:12:50 2023] RAX: 632d223d32612022 RBX: ffff97136944b1e0 RCX: 0000000080100009 [Fri Aug 4 04:12:50 2023] RDX: 0000000000000001 RSI: 0000000080100009 RDI: ffff97136944b188 [Fri Aug 4 04:12:50 2023] RBP: ffffb30607ddbe58 R08: 0000000000000001 R09: ffffffffc08e0900 [Fri Aug 4 04:12:50 2023] R10: 0000000000000001 R11: 000000000000000f R12: ffff97136944b138 [Fri Aug 4 04:12:50 2023] R13: ffff97149147c000 R14: ffff97136944b188 R15: 0000000000000000 [Fri Aug 4 04:12:50 2023] FS: 0000000000000000(0000) GS:ffff9714f7c00000(0000) knlGS:0000000000000000 [Fri Aug 4 04:12:50 2023] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [Fri Aug 4 04:12:50 2023] CR2: 00007fd8de9c7590 CR3: 000000011228e000 CR4: 0000000000350ef0 [Fri Aug 4 04:12:50 2023] Call Trace: [Fri Aug 4 04:12:50 2023] [Fri Aug 4 04:12:50 2023] process_one_work+0x225/0x3d0 [Fri Aug 4 04:12:50 2023] worker_thread+0x4d/0x3e0 [Fri Aug 4 04:12:50 2023] ? process_one_work+0x3d0/0x3d0 [Fri Aug 4 04:12:50 2023] kthread+0x12a/0x150 [Fri Aug 4 04:12:50 2023] ? set_kthread_struct+0x50/0x50 [Fri Aug 4 04:12:50 2023] ret_from_fork+0x22/0x30 [Fri Aug 4 04:12:50 2023] To fix this change the ordering of the checks before sending the oplock_response to first check if the openFileList is empty. Fixes: da787d5b7498 ("SMB3: Do not send lease break acknowledgment if all file handles have been closed") Suggested-by: Bharath SM Reviewed-by: Bharath SM Reviewed-by: Shyam Prasad N Signed-off-by: Paulo Alcantara (SUSE) Signed-off-by: Steve French Signed-off-by: Sasha Levin --- fs/cifs/file.c | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index e65fbae9e804..369620e82b84 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4865,9 +4865,11 @@ void cifs_oplock_break(struct work_struct *work) struct cifsFileInfo *cfile = container_of(work, struct cifsFileInfo, oplock_break); struct inode *inode = d_inode(cfile->dentry); + struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct cifsInodeInfo *cinode = CIFS_I(inode); - struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); - struct TCP_Server_Info *server = tcon->ses->server; + struct cifs_tcon *tcon; + struct TCP_Server_Info *server; + struct tcon_link *tlink; int rc = 0; bool purge_cache = false, oplock_break_cancelled; __u64 persistent_fid, volatile_fid; @@ -4876,6 +4878,12 @@ void cifs_oplock_break(struct work_struct *work) wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS, TASK_UNINTERRUPTIBLE); + tlink = cifs_sb_tlink(cifs_sb); + if (IS_ERR(tlink)) + goto out; + tcon = tlink_tcon(tlink); + server = tcon->ses->server; + server->ops->downgrade_oplock(server, cinode, cfile->oplock_level, cfile->oplock_epoch, &purge_cache); @@ -4925,18 +4933,19 @@ void cifs_oplock_break(struct work_struct *work) /* * MS-SMB2 3.2.5.19.1 and 3.2.5.19.2 (and MS-CIFS 3.2.5.42) do not require * an acknowledgment to be sent when the file has already been closed. - * check for server null, since can race with kill_sb calling tree disconnect. */ spin_lock(&cinode->open_file_lock); - if (tcon->ses && tcon->ses->server && !oplock_break_cancelled && - !list_empty(&cinode->openFileList)) { + /* check list empty since can race with kill_sb calling tree disconnect */ + if (!oplock_break_cancelled && !list_empty(&cinode->openFileList)) { spin_unlock(&cinode->open_file_lock); - rc = tcon->ses->server->ops->oplock_response(tcon, persistent_fid, - volatile_fid, net_fid, cinode); + rc = server->ops->oplock_response(tcon, persistent_fid, + volatile_fid, net_fid, cinode); cifs_dbg(FYI, "Oplock release rc = %d\n", rc); } else spin_unlock(&cinode->open_file_lock); + cifs_put_tlink(tlink); +out: cifs_done_oplock_break(cinode); } From c4e671dae50e04e08571b58d818825d8c93df040 Mon Sep 17 00:00:00 2001 From: Chengfeng Ye Date: Fri, 7 Jul 2023 08:49:41 +0000 Subject: [PATCH 442/513] i2c: bcm-iproc: Fix bcm_iproc_i2c_isr deadlock issue commit 4caf4cb1eaed469742ef719f2cc024b1ec3fa9e6 upstream. iproc_i2c_rd_reg() and iproc_i2c_wr_reg() are called from both interrupt context (e.g. bcm_iproc_i2c_isr) and process context (e.g. bcm_iproc_i2c_suspend). Therefore, interrupts should be disabled to avoid potential deadlock. To prevent this scenario, use spin_lock_irqsave(). Fixes: 9a1038728037 ("i2c: iproc: add NIC I2C support") Signed-off-by: Chengfeng Ye Acked-by: Ray Jui Reviewed-by: Andi Shyti Signed-off-by: Wolfram Sang Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-bcm-iproc.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/drivers/i2c/busses/i2c-bcm-iproc.c b/drivers/i2c/busses/i2c-bcm-iproc.c index 6304d1dd2dd6..ec6571b82fff 100644 --- a/drivers/i2c/busses/i2c-bcm-iproc.c +++ b/drivers/i2c/busses/i2c-bcm-iproc.c @@ -243,13 +243,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c, u32 offset) { u32 val; + unsigned long flags; if (iproc_i2c->idm_base) { - spin_lock(&iproc_i2c->idm_lock); + spin_lock_irqsave(&iproc_i2c->idm_lock, flags); writel(iproc_i2c->ape_addr_mask, iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET); val = readl(iproc_i2c->base + offset); - spin_unlock(&iproc_i2c->idm_lock); + spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags); } else { val = readl(iproc_i2c->base + offset); } @@ -260,12 +261,14 @@ static inline u32 iproc_i2c_rd_reg(struct bcm_iproc_i2c_dev *iproc_i2c, static inline void iproc_i2c_wr_reg(struct bcm_iproc_i2c_dev *iproc_i2c, u32 offset, u32 val) { + unsigned long flags; + if (iproc_i2c->idm_base) { - spin_lock(&iproc_i2c->idm_lock); + spin_lock_irqsave(&iproc_i2c->idm_lock, flags); writel(iproc_i2c->ape_addr_mask, iproc_i2c->idm_base + IDM_CTRL_DIRECT_OFFSET); writel(val, iproc_i2c->base + offset); - spin_unlock(&iproc_i2c->idm_lock); + spin_unlock_irqrestore(&iproc_i2c->idm_lock, flags); } else { writel(val, iproc_i2c->base + offset); } From 184f1b68bb99b5927ab63aefb4a58ec2f9cc6ca9 Mon Sep 17 00:00:00 2001 From: Yicong Yang Date: Tue, 1 Aug 2023 20:46:25 +0800 Subject: [PATCH 443/513] i2c: hisi: Only handle the interrupt of the driver's transfer commit fff67c1b17ee093947bdcbac6f64d072e644159a upstream. The controller may be shared with other port, for example the firmware. Handle the interrupt from other sources will cause crash since some data are not initialized. So only handle the interrupt of the driver's transfer and discard others. Fixes: d62fbdb99a85 ("i2c: add support for HiSilicon I2C controller") Signed-off-by: Yicong Yang Reviewed-by: Andi Shyti Link: https://lore.kernel.org/r/20230801124625.63587-1-yangyicong@huawei.com Signed-off-by: Wolfram Sang Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-hisi.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/i2c/busses/i2c-hisi.c b/drivers/i2c/busses/i2c-hisi.c index 1f406e6f4ece..6bdebe51ea11 100644 --- a/drivers/i2c/busses/i2c-hisi.c +++ b/drivers/i2c/busses/i2c-hisi.c @@ -329,6 +329,14 @@ static irqreturn_t hisi_i2c_irq(int irq, void *context) struct hisi_i2c_controller *ctlr = context; u32 int_stat; + /* + * Don't handle the interrupt if cltr->completion is NULL. We may + * reach here because the interrupt is spurious or the transfer is + * started by another port (e.g. firmware) rather than us. + */ + if (!ctlr->completion) + return IRQ_NONE; + int_stat = readl(ctlr->iobase + HISI_I2C_INT_MSTAT); hisi_i2c_clear_int(ctlr, int_stat); if (!(int_stat & HISI_I2C_INT_ALL)) From bb70e2b70f8d917dd30d3b575faffc707976ef28 Mon Sep 17 00:00:00 2001 From: Yuanjun Gong Date: Fri, 28 Jul 2023 01:03:18 +0800 Subject: [PATCH 444/513] fbdev: mmp: fix value check in mmphw_probe() commit 0872b2c0abc0e84ac82472959c8e14e35277549c upstream. in mmphw_probe(), check the return value of clk_prepare_enable() and return the error code if clk_prepare_enable() returns an unexpected value. Fixes: d63028c38905 ("video: mmp display controller support") Signed-off-by: Yuanjun Gong Signed-off-by: Helge Deller Signed-off-by: Greg Kroah-Hartman --- drivers/video/fbdev/mmp/hw/mmp_ctrl.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c index 061a105afb86..27c3ee5df8de 100644 --- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c +++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c @@ -518,7 +518,9 @@ static int mmphw_probe(struct platform_device *pdev) ret = -ENOENT; goto failed; } - clk_prepare_enable(ctrl->clk); + ret = clk_prepare_enable(ctrl->clk); + if (ret) + goto failed; /* init global regs */ ctrl_set_default(ctrl); From 8ef25fb13494e35c6dbe15445c7875fa92bc3e8b Mon Sep 17 00:00:00 2001 From: Nathan Lynch Date: Thu, 10 Aug 2023 22:37:55 -0500 Subject: [PATCH 445/513] powerpc/rtas_flash: allow user copy to flash block cache objects commit 4f3175979e62de3b929bfa54a0db4b87d36257a7 upstream. With hardened usercopy enabled (CONFIG_HARDENED_USERCOPY=y), using the /proc/powerpc/rtas/firmware_update interface to prepare a system firmware update yields a BUG(): kernel BUG at mm/usercopy.c:102! Oops: Exception in kernel mode, sig: 5 [#1] LE PAGE_SIZE=64K MMU=Hash SMP NR_CPUS=2048 NUMA pSeries Modules linked in: CPU: 0 PID: 2232 Comm: dd Not tainted 6.5.0-rc3+ #2 Hardware name: IBM,8408-E8E POWER8E (raw) 0x4b0201 0xf000004 of:IBM,FW860.50 (SV860_146) hv:phyp pSeries NIP: c0000000005991d0 LR: c0000000005991cc CTR: 0000000000000000 REGS: c0000000148c76a0 TRAP: 0700 Not tainted (6.5.0-rc3+) MSR: 8000000000029033 CR: 24002242 XER: 0000000c CFAR: c0000000001fbd34 IRQMASK: 0 [ ... GPRs omitted ... ] NIP usercopy_abort+0xa0/0xb0 LR usercopy_abort+0x9c/0xb0 Call Trace: usercopy_abort+0x9c/0xb0 (unreliable) __check_heap_object+0x1b4/0x1d0 __check_object_size+0x2d0/0x380 rtas_flash_write+0xe4/0x250 proc_reg_write+0xfc/0x160 vfs_write+0xfc/0x4e0 ksys_write+0x90/0x160 system_call_exception+0x178/0x320 system_call_common+0x160/0x2c4 The blocks of the firmware image are copied directly from user memory to objects allocated from flash_block_cache, so flash_block_cache must be created using kmem_cache_create_usercopy() to mark it safe for user access. Fixes: 6d07d1cd300f ("usercopy: Restrict non-usercopy caches to size 0") Signed-off-by: Nathan Lynch Reviewed-by: Kees Cook [mpe: Trim and indent oops] Signed-off-by: Michael Ellerman Link: https://msgid.link/20230810-rtas-flash-vs-hardened-usercopy-v2-1-dcf63793a938@linux.ibm.com Signed-off-by: Greg Kroah-Hartman --- arch/powerpc/kernel/rtas_flash.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index a99179d83538..56bd0aa30f93 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c @@ -710,9 +710,9 @@ static int __init rtas_flash_init(void) if (!rtas_validate_flash_data.buf) return -ENOMEM; - flash_block_cache = kmem_cache_create("rtas_flash_cache", - RTAS_BLK_SIZE, RTAS_BLK_SIZE, 0, - NULL); + flash_block_cache = kmem_cache_create_usercopy("rtas_flash_cache", + RTAS_BLK_SIZE, RTAS_BLK_SIZE, + 0, 0, RTAS_BLK_SIZE, NULL); if (!flash_block_cache) { printk(KERN_ERR "%s: failed to create block cache\n", __func__); From 2a523446438376bb7c224f3169ae9b98ce0fb893 Mon Sep 17 00:00:00 2001 From: Yi Yang Date: Fri, 11 Aug 2023 11:11:21 +0800 Subject: [PATCH 446/513] tty: n_gsm: fix the UAF caused by race condition in gsm_cleanup_mux commit 3c4f8333b582487a2d1e02171f1465531cde53e3 upstream. In commit 9b9c8195f3f0 ("tty: n_gsm: fix UAF in gsm_cleanup_mux"), the UAF problem is not completely fixed. There is a race condition in gsm_cleanup_mux(), which caused this UAF. The UAF problem is triggered by the following race: task[5046] task[5054] ----------------------- ----------------------- gsm_cleanup_mux(); dlci = gsm->dlci[0]; mutex_lock(&gsm->mutex); gsm_cleanup_mux(); dlci = gsm->dlci[0]; //Didn't take the lock gsm_dlci_release(gsm->dlci[i]); gsm->dlci[i] = NULL; mutex_unlock(&gsm->mutex); mutex_lock(&gsm->mutex); dlci->dead = true; //UAF Fix it by assigning values after mutex_lock(). Link: https://syzkaller.appspot.com/text?tag=CrashReport&x=176188b5a80000 Cc: stable Fixes: 9b9c8195f3f0 ("tty: n_gsm: fix UAF in gsm_cleanup_mux") Fixes: aa371e96f05d ("tty: n_gsm: fix restart handling via CLD command") Signed-off-by: Yi Yang Co-developed-by: Qiumiao Zhang Signed-off-by: Qiumiao Zhang Link: https://lore.kernel.org/r/20230811031121.153237-1-yiyang13@huawei.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/n_gsm.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c index 54173c23263c..67889c014414 100644 --- a/drivers/tty/n_gsm.c +++ b/drivers/tty/n_gsm.c @@ -2387,12 +2387,13 @@ static void gsm_error(struct gsm_mux *gsm, static void gsm_cleanup_mux(struct gsm_mux *gsm, bool disc) { int i; - struct gsm_dlci *dlci = gsm->dlci[0]; + struct gsm_dlci *dlci; struct gsm_msg *txq, *ntxq; gsm->dead = true; mutex_lock(&gsm->mutex); + dlci = gsm->dlci[0]; if (dlci) { if (disc && dlci->state != DLCI_CLOSED) { gsm_dlci_begin_close(dlci); From 77698e6ff6f04c5f866b1832a183ca3acc073916 Mon Sep 17 00:00:00 2001 From: Sherry Sun Date: Tue, 1 Aug 2023 10:23:04 +0800 Subject: [PATCH 447/513] tty: serial: fsl_lpuart: Clear the error flags by writing 1 for lpuart32 platforms commit 282069845af388b08d622ad192b831dcd0549c62 upstream. Do not read the data register to clear the error flags for lpuart32 platforms, the additional read may cause the receive FIFO underflow since the DMA has already read the data register. Actually all lpuart32 platforms support write 1 to clear those error bits, let's use this method to better clear the error flags. Fixes: 42b68768e51b ("serial: fsl_lpuart: DMA support for 32-bit variant") Cc: stable Signed-off-by: Sherry Sun Link: https://lore.kernel.org/r/20230801022304.24251-1-sherry.sun@nxp.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/serial/fsl_lpuart.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c index 74b445fb065b..e0d576b88d7d 100644 --- a/drivers/tty/serial/fsl_lpuart.c +++ b/drivers/tty/serial/fsl_lpuart.c @@ -1107,8 +1107,8 @@ static void lpuart_copy_rx_to_tty(struct lpuart_port *sport) unsigned long sr = lpuart32_read(&sport->port, UARTSTAT); if (sr & (UARTSTAT_PE | UARTSTAT_FE)) { - /* Read DR to clear the error flags */ - lpuart32_read(&sport->port, UARTDATA); + /* Clear the error flags */ + lpuart32_write(&sport->port, sr, UARTSTAT); if (sr & UARTSTAT_PE) sport->port.icount.parity++; From b966e9e1e250dfdb41a7f41775faea4a37af923c Mon Sep 17 00:00:00 2001 From: xiaoshoukui Date: Tue, 15 Aug 2023 02:55:59 -0400 Subject: [PATCH 448/513] btrfs: fix BUG_ON condition in btrfs_cancel_balance commit 29eefa6d0d07e185f7bfe9576f91e6dba98189c2 upstream. Pausing and canceling balance can race to interrupt balance lead to BUG_ON panic in btrfs_cancel_balance. The BUG_ON condition in btrfs_cancel_balance does not take this race scenario into account. However, the race condition has no other side effects. We can fix that. Reproducing it with panic trace like this: kernel BUG at fs/btrfs/volumes.c:4618! RIP: 0010:btrfs_cancel_balance+0x5cf/0x6a0 Call Trace: ? do_nanosleep+0x60/0x120 ? hrtimer_nanosleep+0xb7/0x1a0 ? sched_core_clone_cookie+0x70/0x70 btrfs_ioctl_balance_ctl+0x55/0x70 btrfs_ioctl+0xa46/0xd20 __x64_sys_ioctl+0x7d/0xa0 do_syscall_64+0x38/0x80 entry_SYSCALL_64_after_hwframe+0x63/0xcd Race scenario as follows: > mutex_unlock(&fs_info->balance_mutex); > -------------------- > .......issue pause and cancel req in another thread > -------------------- > ret = __btrfs_balance(fs_info); > > mutex_lock(&fs_info->balance_mutex); > if (ret == -ECANCELED && atomic_read(&fs_info->balance_pause_req)) { > btrfs_info(fs_info, "balance: paused"); > btrfs_exclop_balance(fs_info, BTRFS_EXCLOP_BALANCE_PAUSED); > } CC: stable@vger.kernel.org # 4.19+ Signed-off-by: xiaoshoukui Reviewed-by: David Sterba Signed-off-by: David Sterba Signed-off-by: Greg Kroah-Hartman --- fs/btrfs/volumes.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 5e191860e8a8..0e9236a745b8 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -4636,8 +4636,7 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info) } } - BUG_ON(fs_info->balance_ctl || - test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); + ASSERT(!test_bit(BTRFS_FS_BALANCE_RUNNING, &fs_info->flags)); atomic_dec(&fs_info->balance_cancel_req); mutex_unlock(&fs_info->balance_mutex); return 0; From 416c538684bd62f6f08291f25294470356c87da0 Mon Sep 17 00:00:00 2001 From: Quan Nguyen Date: Wed, 26 Jul 2023 15:00:00 +0700 Subject: [PATCH 449/513] i2c: designware: Correct length byte validation logic commit 49d4db3953cb9004ff94efc0c176e026c820af5a upstream. Commit 0daede80f870 ("i2c: designware: Convert driver to using regmap API") changes the logic to validate the whole 32-bit return value of DW_IC_DATA_CMD register instead of 8-bit LSB without reason. Later, commit f53f15ba5a85 ("i2c: designware: Get right data length"), introduced partial fix but not enough because the "tmp > 0" still test tmp as 32-bit value and is wrong in case the IC_DATA_CMD[11] is set. Revert the logic to just before commit 0daede80f870 ("i2c: designware: Convert driver to using regmap API"). Fixes: f53f15ba5a85 ("i2c: designware: Get right data length") Fixes: 0daede80f870 ("i2c: designware: Convert driver to using regmap API") Cc: stable@vger.kernel.org Signed-off-by: Tam Nguyen Signed-off-by: Quan Nguyen Acked-by: Jarkko Nikula Link: https://lore.kernel.org/r/20230726080001.337353-2-tamnguyenchi@os.amperecomputing.com Reviewed-by: Andi Shyti Signed-off-by: Wolfram Sang Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-designware-master.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index e0559eff8928..ca5cdec680d5 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -525,9 +525,10 @@ i2c_dw_read(struct dw_i2c_dev *dev) u32 flags = msgs[dev->msg_read_idx].flags; regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); + tmp &= DW_IC_DATA_CMD_DAT; /* Ensure length byte is a valid value */ if (flags & I2C_M_RECV_LEN && - (tmp & DW_IC_DATA_CMD_DAT) <= I2C_SMBUS_BLOCK_MAX && tmp > 0) { + tmp <= I2C_SMBUS_BLOCK_MAX && tmp > 0) { len = i2c_dw_recv_len(dev, tmp); } *buf++ = tmp; From 080dedb1cad81c5f3d9a0aa1451622c60616f95a Mon Sep 17 00:00:00 2001 From: Tam Nguyen Date: Wed, 26 Jul 2023 15:00:01 +0700 Subject: [PATCH 450/513] i2c: designware: Handle invalid SMBus block data response length value commit 69f035c480d76f12bf061148ccfd578e1099e5fc upstream. In the I2C_FUNC_SMBUS_BLOCK_DATA case, the invalid length byte value (outside of 1-32) of the SMBus block data response from the Slave device is not correctly handled by the I2C Designware driver. In case IC_EMPTYFIFO_HOLD_MASTER_EN==1, which cannot be detected from the registers, the Master can be disabled only if the STOP bit is set. Without STOP bit set, the Master remains active, holding the bus until receiving a block data response length. This hangs the bus and is unrecoverable. Avoid this by issuing another dump read to reach the stop condition when an invalid length byte is received. Cc: stable@vger.kernel.org Signed-off-by: Tam Nguyen Acked-by: Jarkko Nikula Link: https://lore.kernel.org/r/20230726080001.337353-3-tamnguyenchi@os.amperecomputing.com Reviewed-by: Andi Shyti Signed-off-by: Wolfram Sang Signed-off-by: Greg Kroah-Hartman --- drivers/i2c/busses/i2c-designware-master.c | 15 +++++++++++++-- 1 file changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/i2c/busses/i2c-designware-master.c b/drivers/i2c/busses/i2c-designware-master.c index ca5cdec680d5..b79e1380ff68 100644 --- a/drivers/i2c/busses/i2c-designware-master.c +++ b/drivers/i2c/busses/i2c-designware-master.c @@ -527,8 +527,19 @@ i2c_dw_read(struct dw_i2c_dev *dev) regmap_read(dev->map, DW_IC_DATA_CMD, &tmp); tmp &= DW_IC_DATA_CMD_DAT; /* Ensure length byte is a valid value */ - if (flags & I2C_M_RECV_LEN && - tmp <= I2C_SMBUS_BLOCK_MAX && tmp > 0) { + if (flags & I2C_M_RECV_LEN) { + /* + * if IC_EMPTYFIFO_HOLD_MASTER_EN is set, which cannot be + * detected from the registers, the controller can be + * disabled if the STOP bit is set. But it is only set + * after receiving block data response length in + * I2C_FUNC_SMBUS_BLOCK_DATA case. That needs to read + * another byte with STOP bit set when the block data + * response length is invalid to complete the transaction. + */ + if (!tmp || tmp > I2C_SMBUS_BLOCK_MAX) + tmp = 1; + len = i2c_dw_recv_len(dev, tmp); } *buf++ = tmp; From 1960f468078b3471d1ee9aafa0cf06c8c34a505f Mon Sep 17 00:00:00 2001 From: Lin Ma Date: Tue, 27 Jun 2023 11:31:38 +0800 Subject: [PATCH 451/513] net: xfrm: Fix xfrm_address_filter OOB read [ Upstream commit dfa73c17d55b921e1d4e154976de35317e43a93a ] We found below OOB crash: [ 44.211730] ================================================================== [ 44.212045] BUG: KASAN: slab-out-of-bounds in memcmp+0x8b/0xb0 [ 44.212045] Read of size 8 at addr ffff88800870f320 by task poc.xfrm/97 [ 44.212045] [ 44.212045] CPU: 0 PID: 97 Comm: poc.xfrm Not tainted 6.4.0-rc7-00072-gdad9774deaf1-dirty #4 [ 44.212045] Call Trace: [ 44.212045] [ 44.212045] dump_stack_lvl+0x37/0x50 [ 44.212045] print_report+0xcc/0x620 [ 44.212045] ? __virt_addr_valid+0xf3/0x170 [ 44.212045] ? memcmp+0x8b/0xb0 [ 44.212045] kasan_report+0xb2/0xe0 [ 44.212045] ? memcmp+0x8b/0xb0 [ 44.212045] kasan_check_range+0x39/0x1c0 [ 44.212045] memcmp+0x8b/0xb0 [ 44.212045] xfrm_state_walk+0x21c/0x420 [ 44.212045] ? __pfx_dump_one_state+0x10/0x10 [ 44.212045] xfrm_dump_sa+0x1e2/0x290 [ 44.212045] ? __pfx_xfrm_dump_sa+0x10/0x10 [ 44.212045] ? __kernel_text_address+0xd/0x40 [ 44.212045] ? kasan_unpoison+0x27/0x60 [ 44.212045] ? mutex_lock+0x60/0xe0 [ 44.212045] ? __pfx_mutex_lock+0x10/0x10 [ 44.212045] ? kasan_save_stack+0x22/0x50 [ 44.212045] netlink_dump+0x322/0x6c0 [ 44.212045] ? __pfx_netlink_dump+0x10/0x10 [ 44.212045] ? mutex_unlock+0x7f/0xd0 [ 44.212045] ? __pfx_mutex_unlock+0x10/0x10 [ 44.212045] __netlink_dump_start+0x353/0x430 [ 44.212045] xfrm_user_rcv_msg+0x3a4/0x410 [ 44.212045] ? __pfx__raw_spin_lock_irqsave+0x10/0x10 [ 44.212045] ? __pfx_xfrm_user_rcv_msg+0x10/0x10 [ 44.212045] ? __pfx_xfrm_dump_sa+0x10/0x10 [ 44.212045] ? __pfx_xfrm_dump_sa_done+0x10/0x10 [ 44.212045] ? __stack_depot_save+0x382/0x4e0 [ 44.212045] ? filter_irq_stacks+0x1c/0x70 [ 44.212045] ? kasan_save_stack+0x32/0x50 [ 44.212045] ? kasan_save_stack+0x22/0x50 [ 44.212045] ? kasan_set_track+0x25/0x30 [ 44.212045] ? __kasan_slab_alloc+0x59/0x70 [ 44.212045] ? kmem_cache_alloc_node+0xf7/0x260 [ 44.212045] ? kmalloc_reserve+0xab/0x120 [ 44.212045] ? __alloc_skb+0xcf/0x210 [ 44.212045] ? netlink_sendmsg+0x509/0x700 [ 44.212045] ? sock_sendmsg+0xde/0xe0 [ 44.212045] ? __sys_sendto+0x18d/0x230 [ 44.212045] ? __x64_sys_sendto+0x71/0x90 [ 44.212045] ? do_syscall_64+0x3f/0x90 [ 44.212045] ? entry_SYSCALL_64_after_hwframe+0x72/0xdc [ 44.212045] ? netlink_sendmsg+0x509/0x700 [ 44.212045] ? sock_sendmsg+0xde/0xe0 [ 44.212045] ? __sys_sendto+0x18d/0x230 [ 44.212045] ? __x64_sys_sendto+0x71/0x90 [ 44.212045] ? do_syscall_64+0x3f/0x90 [ 44.212045] ? entry_SYSCALL_64_after_hwframe+0x72/0xdc [ 44.212045] ? kasan_save_stack+0x22/0x50 [ 44.212045] ? kasan_set_track+0x25/0x30 [ 44.212045] ? kasan_save_free_info+0x2e/0x50 [ 44.212045] ? __kasan_slab_free+0x10a/0x190 [ 44.212045] ? kmem_cache_free+0x9c/0x340 [ 44.212045] ? netlink_recvmsg+0x23c/0x660 [ 44.212045] ? sock_recvmsg+0xeb/0xf0 [ 44.212045] ? __sys_recvfrom+0x13c/0x1f0 [ 44.212045] ? __x64_sys_recvfrom+0x71/0x90 [ 44.212045] ? do_syscall_64+0x3f/0x90 [ 44.212045] ? entry_SYSCALL_64_after_hwframe+0x72/0xdc [ 44.212045] ? copyout+0x3e/0x50 [ 44.212045] netlink_rcv_skb+0xd6/0x210 [ 44.212045] ? __pfx_xfrm_user_rcv_msg+0x10/0x10 [ 44.212045] ? __pfx_netlink_rcv_skb+0x10/0x10 [ 44.212045] ? __pfx_sock_has_perm+0x10/0x10 [ 44.212045] ? mutex_lock+0x8d/0xe0 [ 44.212045] ? __pfx_mutex_lock+0x10/0x10 [ 44.212045] xfrm_netlink_rcv+0x44/0x50 [ 44.212045] netlink_unicast+0x36f/0x4c0 [ 44.212045] ? __pfx_netlink_unicast+0x10/0x10 [ 44.212045] ? netlink_recvmsg+0x500/0x660 [ 44.212045] netlink_sendmsg+0x3b7/0x700 [ 44.212045] ? __pfx_netlink_sendmsg+0x10/0x10 [ 44.212045] ? __pfx_netlink_sendmsg+0x10/0x10 [ 44.212045] sock_sendmsg+0xde/0xe0 [ 44.212045] __sys_sendto+0x18d/0x230 [ 44.212045] ? __pfx___sys_sendto+0x10/0x10 [ 44.212045] ? rcu_core+0x44a/0xe10 [ 44.212045] ? __rseq_handle_notify_resume+0x45b/0x740 [ 44.212045] ? _raw_spin_lock_irq+0x81/0xe0 [ 44.212045] ? __pfx___rseq_handle_notify_resume+0x10/0x10 [ 44.212045] ? __pfx_restore_fpregs_from_fpstate+0x10/0x10 [ 44.212045] ? __pfx_blkcg_maybe_throttle_current+0x10/0x10 [ 44.212045] ? __pfx_task_work_run+0x10/0x10 [ 44.212045] __x64_sys_sendto+0x71/0x90 [ 44.212045] do_syscall_64+0x3f/0x90 [ 44.212045] entry_SYSCALL_64_after_hwframe+0x72/0xdc [ 44.212045] RIP: 0033:0x44b7da [ 44.212045] RSP: 002b:00007ffdc8838548 EFLAGS: 00000246 ORIG_RAX: 000000000000002c [ 44.212045] RAX: ffffffffffffffda RBX: 00007ffdc8839978 RCX: 000000000044b7da [ 44.212045] RDX: 0000000000000038 RSI: 00007ffdc8838770 RDI: 0000000000000003 [ 44.212045] RBP: 00007ffdc88385b0 R08: 00007ffdc883858c R09: 000000000000000c [ 44.212045] R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000001 [ 44.212045] R13: 00007ffdc8839968 R14: 00000000004c37d0 R15: 0000000000000001 [ 44.212045] [ 44.212045] [ 44.212045] Allocated by task 97: [ 44.212045] kasan_save_stack+0x22/0x50 [ 44.212045] kasan_set_track+0x25/0x30 [ 44.212045] __kasan_kmalloc+0x7f/0x90 [ 44.212045] __kmalloc_node_track_caller+0x5b/0x140 [ 44.212045] kmemdup+0x21/0x50 [ 44.212045] xfrm_dump_sa+0x17d/0x290 [ 44.212045] netlink_dump+0x322/0x6c0 [ 44.212045] __netlink_dump_start+0x353/0x430 [ 44.212045] xfrm_user_rcv_msg+0x3a4/0x410 [ 44.212045] netlink_rcv_skb+0xd6/0x210 [ 44.212045] xfrm_netlink_rcv+0x44/0x50 [ 44.212045] netlink_unicast+0x36f/0x4c0 [ 44.212045] netlink_sendmsg+0x3b7/0x700 [ 44.212045] sock_sendmsg+0xde/0xe0 [ 44.212045] __sys_sendto+0x18d/0x230 [ 44.212045] __x64_sys_sendto+0x71/0x90 [ 44.212045] do_syscall_64+0x3f/0x90 [ 44.212045] entry_SYSCALL_64_after_hwframe+0x72/0xdc [ 44.212045] [ 44.212045] The buggy address belongs to the object at ffff88800870f300 [ 44.212045] which belongs to the cache kmalloc-64 of size 64 [ 44.212045] The buggy address is located 32 bytes inside of [ 44.212045] allocated 36-byte region [ffff88800870f300, ffff88800870f324) [ 44.212045] [ 44.212045] The buggy address belongs to the physical page: [ 44.212045] page:00000000e4de16ee refcount:1 mapcount:0 mapping:000000000 ... [ 44.212045] flags: 0x100000000000200(slab|node=0|zone=1) [ 44.212045] page_type: 0xffffffff() [ 44.212045] raw: 0100000000000200 ffff888004c41640 dead000000000122 0000000000000000 [ 44.212045] raw: 0000000000000000 0000000080200020 00000001ffffffff 0000000000000000 [ 44.212045] page dumped because: kasan: bad access detected [ 44.212045] [ 44.212045] Memory state around the buggy address: [ 44.212045] ffff88800870f200: fa fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc [ 44.212045] ffff88800870f280: 00 00 00 00 00 fc fc fc fc fc fc fc fc fc fc fc [ 44.212045] >ffff88800870f300: 00 00 00 00 04 fc fc fc fc fc fc fc fc fc fc fc [ 44.212045] ^ [ 44.212045] ffff88800870f380: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [ 44.212045] ffff88800870f400: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc [ 44.212045] ================================================================== By investigating the code, we find the root cause of this OOB is the lack of checks in xfrm_dump_sa(). The buggy code allows a malicious user to pass arbitrary value of filter->splen/dplen. Hence, with crafted xfrm states, the attacker can achieve 8 bytes heap OOB read, which causes info leak. if (attrs[XFRMA_ADDRESS_FILTER]) { filter = kmemdup(nla_data(attrs[XFRMA_ADDRESS_FILTER]), sizeof(*filter), GFP_KERNEL); if (filter == NULL) return -ENOMEM; // NO MORE CHECKS HERE !!! } This patch fixes the OOB by adding necessary boundary checks, just like the code in pfkey_dump() function. Fixes: d3623099d350 ("ipsec: add support of limited SA dump") Signed-off-by: Lin Ma Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin --- net/xfrm/xfrm_user.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index eb0952dbf423..b2065f69c3d2 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1159,6 +1159,15 @@ static int xfrm_dump_sa(struct sk_buff *skb, struct netlink_callback *cb) sizeof(*filter), GFP_KERNEL); if (filter == NULL) return -ENOMEM; + + /* see addr_match(), (prefix length >> 5) << 2 + * will be used to compare xfrm_address_t + */ + if (filter->splen > (sizeof(xfrm_address_t) << 3) || + filter->dplen > (sizeof(xfrm_address_t) << 3)) { + kfree(filter); + return -EINVAL; + } } if (attrs[XFRMA_PROTO]) From fed1cd2cd3aa95dedb75d12d430c90d6203d6c32 Mon Sep 17 00:00:00 2001 From: Lin Ma Date: Tue, 27 Jun 2023 11:39:54 +0800 Subject: [PATCH 452/513] net: af_key: fix sadb_x_filter validation [ Upstream commit 75065a8929069bc93181848818e23f147a73f83a ] When running xfrm_state_walk_init(), the xfrm_address_filter being used is okay to have a splen/dplen that equals to sizeof(xfrm_address_t)<<3. This commit replaces >= to > to make sure the boundary checking is correct. Fixes: 37bd22420f85 ("af_key: pfkey_dump needs parameter validation") Signed-off-by: Lin Ma Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin --- net/key/af_key.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/key/af_key.c b/net/key/af_key.c index d34fed1a484a..258fa046f440 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1848,9 +1848,9 @@ static int pfkey_dump(struct sock *sk, struct sk_buff *skb, const struct sadb_ms if (ext_hdrs[SADB_X_EXT_FILTER - 1]) { struct sadb_x_filter *xfilter = ext_hdrs[SADB_X_EXT_FILTER - 1]; - if ((xfilter->sadb_x_filter_splen >= + if ((xfilter->sadb_x_filter_splen > (sizeof(xfrm_address_t) << 3)) || - (xfilter->sadb_x_filter_dplen >= + (xfilter->sadb_x_filter_dplen > (sizeof(xfrm_address_t) << 3))) { mutex_unlock(&pfk->dump_lock); return -EINVAL; From a465ace883ac600fef37165b653b0ca5cc9c1a53 Mon Sep 17 00:00:00 2001 From: Lin Ma Date: Fri, 30 Jun 2023 16:19:11 +0800 Subject: [PATCH 453/513] net: xfrm: Amend XFRMA_SEC_CTX nla_policy structure [ Upstream commit d1e0e61d617ba17aa516db707aa871387566bbf7 ] According to all consumers code of attrs[XFRMA_SEC_CTX], like * verify_sec_ctx_len(), convert to xfrm_user_sec_ctx* * xfrm_state_construct(), call security_xfrm_state_alloc whose prototype is int security_xfrm_state_alloc(.., struct xfrm_user_sec_ctx *sec_ctx); * copy_from_user_sec_ctx(), convert to xfrm_user_sec_ctx * ... It seems that the expected parsing result for XFRMA_SEC_CTX should be structure xfrm_user_sec_ctx, and the current xfrm_sec_ctx is confusing and misleading (Luckily, they happen to have same size 8 bytes). This commit amend the policy structure to xfrm_user_sec_ctx to avoid ambiguity. Fixes: cf5cb79f6946 ("[XFRM] netlink: Establish an attribute policy") Signed-off-by: Lin Ma Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin --- net/xfrm/xfrm_compat.c | 2 +- net/xfrm/xfrm_user.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/net/xfrm/xfrm_compat.c b/net/xfrm/xfrm_compat.c index 8cbf45a8bcdc..655fe4ff8621 100644 --- a/net/xfrm/xfrm_compat.c +++ b/net/xfrm/xfrm_compat.c @@ -108,7 +108,7 @@ static const struct nla_policy compat_policy[XFRMA_MAX+1] = { [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, - [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, + [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) }, [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index b2065f69c3d2..f36fd1379eff 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -2834,7 +2834,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_ALG_COMP] = { .len = sizeof(struct xfrm_algo) }, [XFRMA_ENCAP] = { .len = sizeof(struct xfrm_encap_tmpl) }, [XFRMA_TMPL] = { .len = sizeof(struct xfrm_user_tmpl) }, - [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_sec_ctx) }, + [XFRMA_SEC_CTX] = { .len = sizeof(struct xfrm_user_sec_ctx) }, [XFRMA_LTIME_VAL] = { .len = sizeof(struct xfrm_lifetime_cur) }, [XFRMA_REPLAY_VAL] = { .len = sizeof(struct xfrm_replay_state) }, [XFRMA_REPLAY_THRESH] = { .type = NLA_U32 }, From 44b3d40967009304617a7a6486490c1d6c12f899 Mon Sep 17 00:00:00 2001 From: Zhengchao Shao Date: Mon, 10 Jul 2023 17:40:51 +0800 Subject: [PATCH 454/513] xfrm: fix slab-use-after-free in decode_session6 [ Upstream commit 53223f2ed1ef5c90dad814daaaefea4e68a933c8 ] When the xfrm device is set to the qdisc of the sfb type, the cb field of the sent skb may be modified during enqueuing. Then, slab-use-after-free may occur when the xfrm device sends IPv6 packets. The stack information is as follows: BUG: KASAN: slab-use-after-free in decode_session6+0x103f/0x1890 Read of size 1 at addr ffff8881111458ef by task swapper/3/0 CPU: 3 PID: 0 Comm: swapper/3 Not tainted 6.4.0-next-20230707 #409 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-1.fc33 04/01/2014 Call Trace: dump_stack_lvl+0xd9/0x150 print_address_description.constprop.0+0x2c/0x3c0 kasan_report+0x11d/0x130 decode_session6+0x103f/0x1890 __xfrm_decode_session+0x54/0xb0 xfrmi_xmit+0x173/0x1ca0 dev_hard_start_xmit+0x187/0x700 sch_direct_xmit+0x1a3/0xc30 __qdisc_run+0x510/0x17a0 __dev_queue_xmit+0x2215/0x3b10 neigh_connected_output+0x3c2/0x550 ip6_finish_output2+0x55a/0x1550 ip6_finish_output+0x6b9/0x1270 ip6_output+0x1f1/0x540 ndisc_send_skb+0xa63/0x1890 ndisc_send_rs+0x132/0x6f0 addrconf_rs_timer+0x3f1/0x870 call_timer_fn+0x1a0/0x580 expire_timers+0x29b/0x4b0 run_timer_softirq+0x326/0x910 __do_softirq+0x1d4/0x905 irq_exit_rcu+0xb7/0x120 sysvec_apic_timer_interrupt+0x97/0xc0 asm_sysvec_apic_timer_interrupt+0x1a/0x20 RIP: 0010:intel_idle_hlt+0x23/0x30 Code: 1f 84 00 00 00 00 00 f3 0f 1e fa 41 54 41 89 d4 0f 1f 44 00 00 66 90 0f 1f 44 00 00 0f 00 2d c4 9f ab 00 0f 1f 44 00 00 fb f4 44 89 e0 41 5c c3 66 0f 1f 44 00 00 f3 0f 1e fa 41 54 41 89 d4 RSP: 0018:ffffc90000197d78 EFLAGS: 00000246 RAX: 00000000000a83c3 RBX: ffffe8ffffd09c50 RCX: ffffffff8a22d8e5 RDX: 0000000000000001 RSI: ffffffff8d3f8080 RDI: ffffe8ffffd09c50 RBP: ffffffff8d3f8080 R08: 0000000000000001 R09: ffffed1026ba6d9d R10: ffff888135d36ceb R11: 0000000000000001 R12: 0000000000000001 R13: ffffffff8d3f8100 R14: 0000000000000001 R15: 0000000000000000 cpuidle_enter_state+0xd3/0x6f0 cpuidle_enter+0x4e/0xa0 do_idle+0x2fe/0x3c0 cpu_startup_entry+0x18/0x20 start_secondary+0x200/0x290 secondary_startup_64_no_verify+0x167/0x16b Allocated by task 939: kasan_save_stack+0x22/0x40 kasan_set_track+0x25/0x30 __kasan_slab_alloc+0x7f/0x90 kmem_cache_alloc_node+0x1cd/0x410 kmalloc_reserve+0x165/0x270 __alloc_skb+0x129/0x330 inet6_ifa_notify+0x118/0x230 __ipv6_ifa_notify+0x177/0xbe0 addrconf_dad_completed+0x133/0xe00 addrconf_dad_work+0x764/0x1390 process_one_work+0xa32/0x16f0 worker_thread+0x67d/0x10c0 kthread+0x344/0x440 ret_from_fork+0x1f/0x30 The buggy address belongs to the object at ffff888111145800 which belongs to the cache skbuff_small_head of size 640 The buggy address is located 239 bytes inside of freed 640-byte region [ffff888111145800, ffff888111145a80) As commit f855691975bb ("xfrm6: Fix the nexthdr offset in _decode_session6.") showed, xfrm_decode_session was originally intended only for the receive path. IP6CB(skb)->nhoff is not set during transmission. Therefore, set the cb field in the skb to 0 before sending packets. Fixes: f855691975bb ("xfrm6: Fix the nexthdr offset in _decode_session6.") Signed-off-by: Zhengchao Shao Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin --- net/xfrm/xfrm_interface_core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/xfrm/xfrm_interface_core.c b/net/xfrm/xfrm_interface_core.c index 694eec6ca147..ded752e33dac 100644 --- a/net/xfrm/xfrm_interface_core.c +++ b/net/xfrm/xfrm_interface_core.c @@ -399,8 +399,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) switch (skb->protocol) { case htons(ETH_P_IPV6): - xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + xfrm_decode_session(skb, &fl, AF_INET6); if (!dst) { fl.u.ip6.flowi6_oif = dev->ifindex; fl.u.ip6.flowi6_flags |= FLOWI_FLAG_ANYSRC; @@ -414,8 +414,8 @@ static netdev_tx_t xfrmi_xmit(struct sk_buff *skb, struct net_device *dev) } break; case htons(ETH_P_IP): - xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + xfrm_decode_session(skb, &fl, AF_INET); if (!dst) { struct rtable *rt; From a1639a82ce14af76b6419778d343ccbff86ee626 Mon Sep 17 00:00:00 2001 From: Zhengchao Shao Date: Mon, 10 Jul 2023 17:40:52 +0800 Subject: [PATCH 455/513] ip6_vti: fix slab-use-after-free in decode_session6 [ Upstream commit 9fd41f1ba638938c9a1195d09bc6fa3be2712f25 ] When ipv6_vti device is set to the qdisc of the sfb type, the cb field of the sent skb may be modified during enqueuing. Then, slab-use-after-free may occur when ipv6_vti device sends IPv6 packets. The stack information is as follows: BUG: KASAN: slab-use-after-free in decode_session6+0x103f/0x1890 Read of size 1 at addr ffff88802e08edc2 by task swapper/0/0 CPU: 0 PID: 0 Comm: swapper/0 Not tainted 6.4.0-next-20230707-00001-g84e2cad7f979 #410 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.14.0-1.fc33 04/01/2014 Call Trace: dump_stack_lvl+0xd9/0x150 print_address_description.constprop.0+0x2c/0x3c0 kasan_report+0x11d/0x130 decode_session6+0x103f/0x1890 __xfrm_decode_session+0x54/0xb0 vti6_tnl_xmit+0x3e6/0x1ee0 dev_hard_start_xmit+0x187/0x700 sch_direct_xmit+0x1a3/0xc30 __qdisc_run+0x510/0x17a0 __dev_queue_xmit+0x2215/0x3b10 neigh_connected_output+0x3c2/0x550 ip6_finish_output2+0x55a/0x1550 ip6_finish_output+0x6b9/0x1270 ip6_output+0x1f1/0x540 ndisc_send_skb+0xa63/0x1890 ndisc_send_rs+0x132/0x6f0 addrconf_rs_timer+0x3f1/0x870 call_timer_fn+0x1a0/0x580 expire_timers+0x29b/0x4b0 run_timer_softirq+0x326/0x910 __do_softirq+0x1d4/0x905 irq_exit_rcu+0xb7/0x120 sysvec_apic_timer_interrupt+0x97/0xc0 Allocated by task 9176: kasan_save_stack+0x22/0x40 kasan_set_track+0x25/0x30 __kasan_slab_alloc+0x7f/0x90 kmem_cache_alloc_node+0x1cd/0x410 kmalloc_reserve+0x165/0x270 __alloc_skb+0x129/0x330 netlink_sendmsg+0x9b1/0xe30 sock_sendmsg+0xde/0x190 ____sys_sendmsg+0x739/0x920 ___sys_sendmsg+0x110/0x1b0 __sys_sendmsg+0xf7/0x1c0 do_syscall_64+0x39/0xb0 entry_SYSCALL_64_after_hwframe+0x63/0xcd Freed by task 9176: kasan_save_stack+0x22/0x40 kasan_set_track+0x25/0x30 kasan_save_free_info+0x2b/0x40 ____kasan_slab_free+0x160/0x1c0 slab_free_freelist_hook+0x11b/0x220 kmem_cache_free+0xf0/0x490 skb_free_head+0x17f/0x1b0 skb_release_data+0x59c/0x850 consume_skb+0xd2/0x170 netlink_unicast+0x54f/0x7f0 netlink_sendmsg+0x926/0xe30 sock_sendmsg+0xde/0x190 ____sys_sendmsg+0x739/0x920 ___sys_sendmsg+0x110/0x1b0 __sys_sendmsg+0xf7/0x1c0 do_syscall_64+0x39/0xb0 entry_SYSCALL_64_after_hwframe+0x63/0xcd The buggy address belongs to the object at ffff88802e08ed00 which belongs to the cache skbuff_small_head of size 640 The buggy address is located 194 bytes inside of freed 640-byte region [ffff88802e08ed00, ffff88802e08ef80) As commit f855691975bb ("xfrm6: Fix the nexthdr offset in _decode_session6.") showed, xfrm_decode_session was originally intended only for the receive path. IP6CB(skb)->nhoff is not set during transmission. Therefore, set the cb field in the skb to 0 before sending packets. Fixes: f855691975bb ("xfrm6: Fix the nexthdr offset in _decode_session6.") Signed-off-by: Zhengchao Shao Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin --- net/ipv6/ip6_vti.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c index 42c37ec832f1..190aa3b19591 100644 --- a/net/ipv6/ip6_vti.c +++ b/net/ipv6/ip6_vti.c @@ -570,12 +570,12 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev) vti6_addr_conflict(t, ipv6_hdr(skb))) goto tx_err; - xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + xfrm_decode_session(skb, &fl, AF_INET6); break; case htons(ETH_P_IP): - xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + xfrm_decode_session(skb, &fl, AF_INET); break; default: goto tx_err; From e1e04cc2ef2c0c0866c19f5627149a76c2baae32 Mon Sep 17 00:00:00 2001 From: Zhengchao Shao Date: Mon, 10 Jul 2023 17:40:53 +0800 Subject: [PATCH 456/513] ip_vti: fix potential slab-use-after-free in decode_session6 [ Upstream commit 6018a266279b1a75143c7c0804dd08a5fc4c3e0b ] When ip_vti device is set to the qdisc of the sfb type, the cb field of the sent skb may be modified during enqueuing. Then, slab-use-after-free may occur when ip_vti device sends IPv6 packets. As commit f855691975bb ("xfrm6: Fix the nexthdr offset in _decode_session6.") showed, xfrm_decode_session was originally intended only for the receive path. IP6CB(skb)->nhoff is not set during transmission. Therefore, set the cb field in the skb to 0 before sending packets. Fixes: f855691975bb ("xfrm6: Fix the nexthdr offset in _decode_session6.") Signed-off-by: Zhengchao Shao Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin --- net/ipv4/ip_vti.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c index efe25a0172e6..df23319adc80 100644 --- a/net/ipv4/ip_vti.c +++ b/net/ipv4/ip_vti.c @@ -287,12 +287,12 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) switch (skb->protocol) { case htons(ETH_P_IP): - xfrm_decode_session(skb, &fl, AF_INET); memset(IPCB(skb), 0, sizeof(*IPCB(skb))); + xfrm_decode_session(skb, &fl, AF_INET); break; case htons(ETH_P_IPV6): - xfrm_decode_session(skb, &fl, AF_INET6); memset(IP6CB(skb), 0, sizeof(*IP6CB(skb))); + xfrm_decode_session(skb, &fl, AF_INET6); break; default: goto tx_err; From 075448a2eb753f813fe873cfa52853e9fef8eedb Mon Sep 17 00:00:00 2001 From: Lin Ma Date: Fri, 21 Jul 2023 22:51:03 +0800 Subject: [PATCH 457/513] xfrm: add NULL check in xfrm_update_ae_params [ Upstream commit 00374d9b6d9f932802b55181be9831aa948e5b7c ] Normally, x->replay_esn and x->preplay_esn should be allocated at xfrm_alloc_replay_state_esn(...) in xfrm_state_construct(...), hence the xfrm_update_ae_params(...) is okay to update them. However, the current implementation of xfrm_new_ae(...) allows a malicious user to directly dereference a NULL pointer and crash the kernel like below. BUG: kernel NULL pointer dereference, address: 0000000000000000 PGD 8253067 P4D 8253067 PUD 8e0e067 PMD 0 Oops: 0002 [#1] PREEMPT SMP KASAN NOPTI CPU: 0 PID: 98 Comm: poc.npd Not tainted 6.4.0-rc7-00072-gdad9774deaf1 #8 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.o4 RIP: 0010:memcpy_orig+0xad/0x140 Code: e8 4c 89 5f e0 48 8d 7f e0 73 d2 83 c2 20 48 29 d6 48 29 d7 83 fa 10 72 34 4c 8b 06 4c 8b 4e 08 c RSP: 0018:ffff888008f57658 EFLAGS: 00000202 RAX: 0000000000000000 RBX: ffff888008bd0000 RCX: ffffffff8238e571 RDX: 0000000000000018 RSI: ffff888007f64844 RDI: 0000000000000000 RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000 R10: 0000000000000000 R11: 0000000000000000 R12: ffff888008f57818 R13: ffff888007f64aa4 R14: 0000000000000000 R15: 0000000000000000 FS: 00000000014013c0(0000) GS:ffff88806d600000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000000000000 CR3: 00000000054d8000 CR4: 00000000000006f0 Call Trace: ? __die+0x1f/0x70 ? page_fault_oops+0x1e8/0x500 ? __pfx_is_prefetch.constprop.0+0x10/0x10 ? __pfx_page_fault_oops+0x10/0x10 ? _raw_spin_unlock_irqrestore+0x11/0x40 ? fixup_exception+0x36/0x460 ? _raw_spin_unlock_irqrestore+0x11/0x40 ? exc_page_fault+0x5e/0xc0 ? asm_exc_page_fault+0x26/0x30 ? xfrm_update_ae_params+0xd1/0x260 ? memcpy_orig+0xad/0x140 ? __pfx__raw_spin_lock_bh+0x10/0x10 xfrm_update_ae_params+0xe7/0x260 xfrm_new_ae+0x298/0x4e0 ? __pfx_xfrm_new_ae+0x10/0x10 ? __pfx_xfrm_new_ae+0x10/0x10 xfrm_user_rcv_msg+0x25a/0x410 ? __pfx_xfrm_user_rcv_msg+0x10/0x10 ? __alloc_skb+0xcf/0x210 ? stack_trace_save+0x90/0xd0 ? filter_irq_stacks+0x1c/0x70 ? __stack_depot_save+0x39/0x4e0 ? __kasan_slab_free+0x10a/0x190 ? kmem_cache_free+0x9c/0x340 ? netlink_recvmsg+0x23c/0x660 ? sock_recvmsg+0xeb/0xf0 ? __sys_recvfrom+0x13c/0x1f0 ? __x64_sys_recvfrom+0x71/0x90 ? do_syscall_64+0x3f/0x90 ? entry_SYSCALL_64_after_hwframe+0x72/0xdc ? copyout+0x3e/0x50 netlink_rcv_skb+0xd6/0x210 ? __pfx_xfrm_user_rcv_msg+0x10/0x10 ? __pfx_netlink_rcv_skb+0x10/0x10 ? __pfx_sock_has_perm+0x10/0x10 ? mutex_lock+0x8d/0xe0 ? __pfx_mutex_lock+0x10/0x10 xfrm_netlink_rcv+0x44/0x50 netlink_unicast+0x36f/0x4c0 ? __pfx_netlink_unicast+0x10/0x10 ? netlink_recvmsg+0x500/0x660 netlink_sendmsg+0x3b7/0x700 This Null-ptr-deref bug is assigned CVE-2023-3772. And this commit adds additional NULL check in xfrm_update_ae_params to fix the NPD. Fixes: d8647b79c3b7 ("xfrm: Add user interface for esn and big anti-replay windows") Signed-off-by: Lin Ma Reviewed-by: Leon Romanovsky Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin --- net/xfrm/xfrm_user.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index f36fd1379eff..6ff405c2cd2c 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -527,7 +527,7 @@ static void xfrm_update_ae_params(struct xfrm_state *x, struct nlattr **attrs, struct nlattr *rt = attrs[XFRMA_REPLAY_THRESH]; struct nlattr *mt = attrs[XFRMA_MTIMER_THRESH]; - if (re) { + if (re && x->replay_esn && x->preplay_esn) { struct xfrm_replay_state_esn *replay_esn; replay_esn = nla_data(re); memcpy(x->replay_esn, replay_esn, From 8e5e967348caead2e03f047af28a4bcd79b80b9c Mon Sep 17 00:00:00 2001 From: Lin Ma Date: Sun, 23 Jul 2023 15:41:10 +0800 Subject: [PATCH 458/513] xfrm: add forgotten nla_policy for XFRMA_MTIMER_THRESH [ Upstream commit 5e2424708da7207087934c5c75211e8584d553a0 ] The previous commit 4e484b3e969b ("xfrm: rate limit SA mapping change message to user space") added one additional attribute named XFRMA_MTIMER_THRESH and described its type at compat_policy (net/xfrm/xfrm_compat.c). However, the author forgot to also describe the nla_policy at xfrma_policy (net/xfrm/xfrm_user.c). Hence, this suppose NLA_U32 (4 bytes) value can be faked as empty (0 bytes) by a malicious user, which leads to 4 bytes overflow read and heap information leak when parsing nlattrs. To exploit this, one malicious user can spray the SLUB objects and then leverage this 4 bytes OOB read to leak the heap data into x->mapping_maxage (see xfrm_update_ae_params(...)), and leak it to userspace via copy_to_user_state_extra(...). The above bug is assigned CVE-2023-3773. To fix it, this commit just completes the nla_policy description for XFRMA_MTIMER_THRESH, which enforces the length check and avoids such OOB read. Fixes: 4e484b3e969b ("xfrm: rate limit SA mapping change message to user space") Signed-off-by: Lin Ma Reviewed-by: Simon Horman Reviewed-by: Leon Romanovsky Signed-off-by: Steffen Klassert Signed-off-by: Sasha Levin --- net/xfrm/xfrm_user.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index 6ff405c2cd2c..ff56b6a0162e 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -2854,6 +2854,7 @@ const struct nla_policy xfrma_policy[XFRMA_MAX+1] = { [XFRMA_SET_MARK] = { .type = NLA_U32 }, [XFRMA_SET_MARK_MASK] = { .type = NLA_U32 }, [XFRMA_IF_ID] = { .type = NLA_U32 }, + [XFRMA_MTIMER_THRESH] = { .type = NLA_U32 }, }; EXPORT_SYMBOL_GPL(xfrma_policy); From c2a6ffe3f1a3e53e0572cf050cb09f314e2fca54 Mon Sep 17 00:00:00 2001 From: "Russell King (Oracle)" Date: Fri, 11 Aug 2023 11:26:30 +0100 Subject: [PATCH 459/513] net: phy: fix IRQ-based wake-on-lan over hibernate / power off MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit cc941e548bffc01b5816b4edc5cb432a137a58b3 ] Uwe reports: "Most PHYs signal WoL using an interrupt. So disabling interrupts [at shutdown] breaks WoL at least on PHYs covered by the marvell driver." Discussing with Ioana, the problem which was trying to be solved was: "The board in question is a LS1021ATSN which has two AR8031 PHYs that share an interrupt line. In case only one of the PHYs is probed and there are pending interrupts on the PHY#2 an IRQ storm will happen since there is no entity to clear the interrupt from PHY#2's registers. PHY#1's driver will get stuck in .handle_interrupt() indefinitely." Further confirmation that "the two AR8031 PHYs are on the same MDIO bus." With WoL using interrupts to wake the system, in such a case, the system will begin booting with an asserted interrupt. Thus, we need to cope with an interrupt asserted during boot. Solve this instead by disabling interrupts during PHY probe. This will ensure in Ioana's situation that both PHYs of the same type sharing an interrupt line on a common MDIO bus will have their interrupt outputs disabled when the driver probes the device, but before we hook in any interrupt handlers - thus avoiding the interrupt storm. A better fix would be for platform firmware to disable the interrupting devices at source during boot, before control is handed to the kernel. Fixes: e2f016cf7751 ("net: phy: add a shutdown procedure") Link: 20230804071757.383971-1-u.kleine-koenig@pengutronix.de Reported-by: Uwe Kleine-König Signed-off-by: Russell King (Oracle) Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- drivers/net/phy/phy_device.c | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 6085a28cae3d..0429825a7179 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3061,6 +3061,8 @@ static int phy_probe(struct device *dev) goto out; } + phy_disable_interrupts(phydev); + /* Start out supporting everything. Eventually, * a controller will attach, and may modify one * or both of these values @@ -3148,16 +3150,6 @@ static int phy_remove(struct device *dev) return 0; } -static void phy_shutdown(struct device *dev) -{ - struct phy_device *phydev = to_phy_device(dev); - - if (phydev->state == PHY_READY || !phydev->attached_dev) - return; - - phy_disable_interrupts(phydev); -} - /** * phy_driver_register - register a phy_driver with the PHY layer * @new_driver: new phy_driver to register @@ -3181,7 +3173,6 @@ int phy_driver_register(struct phy_driver *new_driver, struct module *owner) new_driver->mdiodrv.driver.bus = &mdio_bus_type; new_driver->mdiodrv.driver.probe = phy_probe; new_driver->mdiodrv.driver.remove = phy_remove; - new_driver->mdiodrv.driver.shutdown = phy_shutdown; new_driver->mdiodrv.driver.owner = owner; new_driver->mdiodrv.driver.probe_type = PROBE_FORCE_SYNCHRONOUS; From 7207ee323afa9c69336689f08fe024bda4dbb8cf Mon Sep 17 00:00:00 2001 From: Petr Machata Date: Fri, 11 Aug 2023 17:59:27 +0200 Subject: [PATCH 460/513] selftests: mirror_gre_changes: Tighten up the TTL test match [ Upstream commit 855067defa36b1f9effad8c219d9a85b655cf500 ] This test verifies whether the encapsulated packets have the correct configured TTL. It does so by sending ICMP packets through the test topology and mirroring them to a gretap netdevice. On a busy host however, more than just the test ICMP packets may end up flowing through the topology, get mirrored, and counted. This leads to potential spurious failures as the test observes much more mirrored packets than the sent test packets, and assumes a bug. Fix this by tightening up the mirror action match. Change it from matchall to a flower classifier matching on ICMP packets specifically. Fixes: 45315673e0c5 ("selftests: forwarding: Test changes in mirror-to-gretap") Signed-off-by: Petr Machata Tested-by: Mirsad Todorovac Reviewed-by: Ido Schimmel Reviewed-by: Simon Horman Signed-off-by: David S. Miller Signed-off-by: Sasha Levin --- tools/testing/selftests/net/forwarding/mirror_gre_changes.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh index 472bd023e2a5..b501b366367f 100755 --- a/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh +++ b/tools/testing/selftests/net/forwarding/mirror_gre_changes.sh @@ -72,7 +72,8 @@ test_span_gre_ttl() RET=0 - mirror_install $swp1 ingress $tundev "matchall $tcflags" + mirror_install $swp1 ingress $tundev \ + "prot ip flower $tcflags ip_prot icmp" tc filter add dev $h3 ingress pref 77 prot $prot \ flower ip_ttl 50 action pass From b2f6d73395cbac48977f209cf36de9214152b118 Mon Sep 17 00:00:00 2001 From: Luca Ceresoli Date: Fri, 4 Aug 2023 17:12:39 +0200 Subject: [PATCH 461/513] drm/panel: simple: Fix AUO G121EAN01 panel timings according to the docs [ Upstream commit e8470c0a7bcaa82f78ad34282d662dd7bd9630c2 ] Commit 03e909acd95a ("drm/panel: simple: Add support for AUO G121EAN01.4 panel") added support for this panel model, but the timings it implements are very different from what the datasheet describes. I checked both the G121EAN01.0 datasheet from [0] and the G121EAN01.4 one from [1] and they all have the same timings: for example the LVDS clock typical value is 74.4 MHz, not 66.7 MHz as implemented. Replace the timings with the ones from the documentation. These timings have been tested and the clock frequencies verified with an oscilloscope to ensure they are correct. Also use struct display_timing instead of struct drm_display_mode in order to also specify the minimum and maximum values. [0] https://embedded.avnet.com/product/g121ean01-0/ [1] https://embedded.avnet.com/product/g121ean01-4/ Fixes: 03e909acd95a ("drm/panel: simple: Add support for AUO G121EAN01.4 panel") Signed-off-by: Luca Ceresoli Reviewed-by: Neil Armstrong Signed-off-by: Neil Armstrong Link: https://patchwork.freedesktop.org/patch/msgid/20230804151239.835216-1-luca.ceresoli@bootlin.com Signed-off-by: Sasha Levin --- drivers/gpu/drm/panel/panel-simple.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c index 391d73d2638a..7cf0af78b7bc 100644 --- a/drivers/gpu/drm/panel/panel-simple.c +++ b/drivers/gpu/drm/panel/panel-simple.c @@ -1258,21 +1258,21 @@ static const struct panel_desc auo_g104sn02 = { .connector_type = DRM_MODE_CONNECTOR_LVDS, }; -static const struct drm_display_mode auo_g121ean01_mode = { - .clock = 66700, - .hdisplay = 1280, - .hsync_start = 1280 + 58, - .hsync_end = 1280 + 58 + 8, - .htotal = 1280 + 58 + 8 + 70, - .vdisplay = 800, - .vsync_start = 800 + 6, - .vsync_end = 800 + 6 + 4, - .vtotal = 800 + 6 + 4 + 10, +static const struct display_timing auo_g121ean01_timing = { + .pixelclock = { 60000000, 74400000, 90000000 }, + .hactive = { 1280, 1280, 1280 }, + .hfront_porch = { 20, 50, 100 }, + .hback_porch = { 20, 50, 100 }, + .hsync_len = { 30, 100, 200 }, + .vactive = { 800, 800, 800 }, + .vfront_porch = { 2, 10, 25 }, + .vback_porch = { 2, 10, 25 }, + .vsync_len = { 4, 18, 50 }, }; static const struct panel_desc auo_g121ean01 = { - .modes = &auo_g121ean01_mode, - .num_modes = 1, + .timings = &auo_g121ean01_timing, + .num_timings = 1, .bpc = 8, .size = { .width = 261, From 82109740d6105c6fc5d354247e4c3cb360b5c3a9 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 8 Aug 2023 20:40:17 +0200 Subject: [PATCH 462/513] netfilter: nf_tables: fix false-positive lockdep splat [ Upstream commit b9f052dc68f69dac89fe1e24693354c033daa091 ] ->abort invocation may cause splat on debug kernels: WARNING: suspicious RCU usage net/netfilter/nft_set_pipapo.c:1697 suspicious rcu_dereference_check() usage! [..] rcu_scheduler_active = 2, debug_locks = 1 1 lock held by nft/133554: [..] (nft_net->commit_mutex){+.+.}-{3:3}, at: nf_tables_valid_genid [..] lockdep_rcu_suspicious+0x1ad/0x260 nft_pipapo_abort+0x145/0x180 __nf_tables_abort+0x5359/0x63d0 nf_tables_abort+0x24/0x40 nfnetlink_rcv+0x1a0a/0x22c0 netlink_unicast+0x73c/0x900 netlink_sendmsg+0x7f0/0xc20 ____sys_sendmsg+0x48d/0x760 Transaction mutex is held, so parallel updates are not possible. Switch to _protected and check mutex is held for lockdep enabled builds. Fixes: 212ed75dc5fb ("netfilter: nf_tables: integrate pipapo into commit protocol") Signed-off-by: Florian Westphal Signed-off-by: Sasha Levin --- net/netfilter/nft_set_pipapo.c | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c index a81829c10fea..32cfd0a84b0e 100644 --- a/net/netfilter/nft_set_pipapo.c +++ b/net/netfilter/nft_set_pipapo.c @@ -1665,6 +1665,17 @@ static void nft_pipapo_commit(const struct nft_set *set) priv->clone = new_clone; } +static bool nft_pipapo_transaction_mutex_held(const struct nft_set *set) +{ +#ifdef CONFIG_PROVE_LOCKING + const struct net *net = read_pnet(&set->net); + + return lockdep_is_held(&nft_pernet(net)->commit_mutex); +#else + return true; +#endif +} + static void nft_pipapo_abort(const struct nft_set *set) { struct nft_pipapo *priv = nft_set_priv(set); @@ -1673,7 +1684,7 @@ static void nft_pipapo_abort(const struct nft_set *set) if (!priv->dirty) return; - m = rcu_dereference(priv->match); + m = rcu_dereference_protected(priv->match, nft_pipapo_transaction_mutex_held(set)); new_clone = pipapo_clone(m); if (IS_ERR(new_clone)) From 1adaec4758d1cefbf348a291ad9b752aaa10f8d3 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sat, 12 Aug 2023 13:05:16 +0200 Subject: [PATCH 463/513] netfilter: nf_tables: deactivate catchall elements in next generation [ Upstream commit 90e5b3462efa37b8bba82d7c4e63683856e188af ] When flushing, individual set elements are disabled in the next generation via the ->flush callback. Catchall elements are not disabled. This is incorrect and may lead to double-deactivations of catchall elements which then results in memory leaks: WARNING: CPU: 1 PID: 3300 at include/net/netfilter/nf_tables.h:1172 nft_map_deactivate+0x549/0x730 CPU: 1 PID: 3300 Comm: nft Not tainted 6.5.0-rc5+ #60 RIP: 0010:nft_map_deactivate+0x549/0x730 [..] ? nft_map_deactivate+0x549/0x730 nf_tables_delset+0xb66/0xeb0 (the warn is due to nft_use_dec() detecting underflow). Fixes: aaa31047a6d2 ("netfilter: nftables: add catch-all set element support") Reported-by: lonial con Signed-off-by: Florian Westphal Signed-off-by: Sasha Levin --- net/netfilter/nf_tables_api.c | 1 + 1 file changed, 1 insertion(+) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 1e84314fe334..1e2d1e4bdb74 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -6719,6 +6719,7 @@ static int nft_set_catchall_flush(const struct nft_ctx *ctx, ret = __nft_set_catchall_flush(ctx, set, &elem); if (ret < 0) break; + nft_set_elem_change_active(ctx->net, set, ext); } return ret; From 9177869b85ddd9184d4f5e09eaa3d2d8547089af Mon Sep 17 00:00:00 2001 From: Sishuai Gong Date: Thu, 10 Aug 2023 15:12:42 -0400 Subject: [PATCH 464/513] ipvs: fix racy memcpy in proc_do_sync_threshold [ Upstream commit 5310760af1d4fbea1452bfc77db5f9a680f7ae47 ] When two threads run proc_do_sync_threshold() in parallel, data races could happen between the two memcpy(): Thread-1 Thread-2 memcpy(val, valp, sizeof(val)); memcpy(valp, val, sizeof(val)); This race might mess up the (struct ctl_table *) table->data, so we add a mutex lock to serialize them. Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2") Link: https://lore.kernel.org/netdev/B6988E90-0A1E-4B85-BF26-2DAF6D482433@gmail.com/ Signed-off-by: Sishuai Gong Acked-by: Simon Horman Acked-by: Julian Anastasov Signed-off-by: Florian Westphal Signed-off-by: Sasha Levin --- net/netfilter/ipvs/ip_vs_ctl.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 29ec3ef63edc..d0b64c36471d 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1802,6 +1802,7 @@ static int proc_do_sync_threshold(struct ctl_table *table, int write, void *buffer, size_t *lenp, loff_t *ppos) { + struct netns_ipvs *ipvs = table->extra2; int *valp = table->data; int val[2]; int rc; @@ -1811,6 +1812,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write, .mode = table->mode, }; + mutex_lock(&ipvs->sync_mutex); memcpy(val, valp, sizeof(val)); rc = proc_dointvec(&tmp, write, buffer, lenp, ppos); if (write) { @@ -1820,6 +1822,7 @@ proc_do_sync_threshold(struct ctl_table *table, int write, else memcpy(valp, val, sizeof(val)); } + mutex_unlock(&ipvs->sync_mutex); return rc; } @@ -4077,6 +4080,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct netns_ipvs *ipvs) ipvs->sysctl_sync_threshold[0] = DEFAULT_SYNC_THRESHOLD; ipvs->sysctl_sync_threshold[1] = DEFAULT_SYNC_PERIOD; tbl[idx].data = &ipvs->sysctl_sync_threshold; + tbl[idx].extra2 = ipvs; tbl[idx++].maxlen = sizeof(ipvs->sysctl_sync_threshold); ipvs->sysctl_sync_refresh_period = DEFAULT_SYNC_REFRESH_PERIOD; tbl[idx++].data = &ipvs->sysctl_sync_refresh_period; From bf221e5e4b19c5b463af94281cb3dc4f8c792741 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Tue, 15 Aug 2023 15:39:02 +0200 Subject: [PATCH 465/513] netfilter: nft_dynset: disallow object maps [ Upstream commit 23185c6aed1ffb8fc44087880ba2767aba493779 ] Do not allow to insert elements from datapath to objects maps. Fixes: 8aeff920dcc9 ("netfilter: nf_tables: add stateful object reference to set elements") Signed-off-by: Pablo Neira Ayuso Signed-off-by: Florian Westphal Signed-off-by: Sasha Levin --- net/netfilter/nft_dynset.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c index 29c7ae8789e9..73e606372b05 100644 --- a/net/netfilter/nft_dynset.c +++ b/net/netfilter/nft_dynset.c @@ -191,6 +191,9 @@ static int nft_dynset_init(const struct nft_ctx *ctx, if (IS_ERR(set)) return PTR_ERR(set); + if (set->flags & NFT_SET_OBJECT) + return -EOPNOTSUPP; + if (set->ops->update == NULL) return -EOPNOTSUPP; From 0ffbc341cfaea151e4d9b0187e65aea51d7011f6 Mon Sep 17 00:00:00 2001 From: Justin Chen Date: Sat, 12 Aug 2023 21:41:47 -0700 Subject: [PATCH 466/513] net: phy: broadcom: stub c45 read/write for 54810 [ Upstream commit 096516d092d54604d590827d05b1022c8f326639 ] The 54810 does not support c45. The mmd_phy_indirect accesses return arbirtary values leading to odd behavior like saying it supports EEE when it doesn't. We also see that reading/writing these non-existent MMD registers leads to phy instability in some cases. Fixes: b14995ac2527 ("net: phy: broadcom: Add BCM54810 PHY entry") Signed-off-by: Justin Chen Reviewed-by: Florian Fainelli Link: https://lore.kernel.org/r/1691901708-28650-1-git-send-email-justin.chen@broadcom.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/phy/broadcom.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c index b330efb98209..f3b39af83a27 100644 --- a/drivers/net/phy/broadcom.c +++ b/drivers/net/phy/broadcom.c @@ -412,6 +412,17 @@ static int bcm54xx_resume(struct phy_device *phydev) return bcm54xx_config_init(phydev); } +static int bcm54810_read_mmd(struct phy_device *phydev, int devnum, u16 regnum) +{ + return -EOPNOTSUPP; +} + +static int bcm54810_write_mmd(struct phy_device *phydev, int devnum, u16 regnum, + u16 val) +{ + return -EOPNOTSUPP; +} + static int bcm54811_config_init(struct phy_device *phydev) { int err, reg; @@ -832,6 +843,8 @@ static struct phy_driver broadcom_drivers[] = { .get_strings = bcm_phy_get_strings, .get_stats = bcm54xx_get_stats, .probe = bcm54xx_phy_probe, + .read_mmd = bcm54810_read_mmd, + .write_mmd = bcm54810_write_mmd, .config_init = bcm54xx_config_init, .config_aneg = bcm5481_config_aneg, .config_intr = bcm_phy_config_intr, From ae6834689fd69b6ee068d6f124a3b14714e0fc3b Mon Sep 17 00:00:00 2001 From: Ziyang Xuan Date: Mon, 14 Aug 2023 11:23:01 +0800 Subject: [PATCH 467/513] team: Fix incorrect deletion of ETH_P_8021AD protocol vid from slaves [ Upstream commit dafcbce07136d799edc4c67f04f9fd69ff1eac1f ] Similar to commit 01f4fd270870 ("bonding: Fix incorrect deletion of ETH_P_8021AD protocol vid from slaves"), we can trigger BUG_ON(!vlan_info) in unregister_vlan_dev() with the following testcase: # ip netns add ns1 # ip netns exec ns1 ip link add team1 type team # ip netns exec ns1 ip link add team_slave type veth peer veth2 # ip netns exec ns1 ip link set team_slave master team1 # ip netns exec ns1 ip link add link team_slave name team_slave.10 type vlan id 10 protocol 802.1ad # ip netns exec ns1 ip link add link team1 name team1.10 type vlan id 10 protocol 802.1ad # ip netns exec ns1 ip link set team_slave nomaster # ip netns del ns1 Add S-VLAN tag related features support to team driver. So the team driver will always propagate the VLAN info to its slaves. Fixes: 8ad227ff89a7 ("net: vlan: add 802.1ad support") Suggested-by: Ido Schimmel Signed-off-by: Ziyang Xuan Reviewed-by: Simon Horman Link: https://lore.kernel.org/r/20230814032301.2804971-1-william.xuanziyang@huawei.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/team/team.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 4dfa9c610974..f99df92d211e 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -2195,7 +2195,9 @@ static void team_setup(struct net_device *dev) dev->hw_features = TEAM_VLAN_FEATURES | NETIF_F_HW_VLAN_CTAG_RX | - NETIF_F_HW_VLAN_CTAG_FILTER; + NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_HW_VLAN_STAG_RX | + NETIF_F_HW_VLAN_STAG_FILTER; dev->hw_features |= NETIF_F_GSO_ENCAP_ALL; dev->features |= dev->hw_features; From 5ba2b936f3e1e6325d70e4e8de71ad905477478a Mon Sep 17 00:00:00 2001 From: Piotr Gardocki Date: Mon, 7 Aug 2023 16:46:04 +0200 Subject: [PATCH 468/513] iavf: fix FDIR rule fields masks validation [ Upstream commit 751969e5b1196821ef78f0aa664a8a97c92c9057 ] Return an error if a field's mask is neither full nor empty. When a mask is only partial the field is not being used for rule programming but it gives a wrong impression it is used. Fix by returning an error on any partial mask to make it clear they are not supported. The ip_ver assignment is moved earlier in code to allow using it in iavf_validate_fdir_fltr_masks. Fixes: 527691bf0682 ("iavf: Support IPv4 Flow Director filters") Fixes: e90cbc257a6f ("iavf: Support IPv6 Flow Director filters") Signed-off-by: Piotr Gardocki Tested-by: Rafal Romanowski Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- .../net/ethernet/intel/iavf/iavf_ethtool.c | 10 +++ drivers/net/ethernet/intel/iavf/iavf_fdir.c | 77 ++++++++++++++++++- drivers/net/ethernet/intel/iavf/iavf_fdir.h | 2 + 3 files changed, 85 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c index e622b6e6ac2b..a9a7453d969c 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_ethtool.c +++ b/drivers/net/ethernet/intel/iavf/iavf_ethtool.c @@ -1275,6 +1275,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.src_port = fsp->m_u.tcp_ip4_spec.psrc; fltr->ip_mask.dst_port = fsp->m_u.tcp_ip4_spec.pdst; fltr->ip_mask.tos = fsp->m_u.tcp_ip4_spec.tos; + fltr->ip_ver = 4; break; case AH_V4_FLOW: case ESP_V4_FLOW: @@ -1286,6 +1287,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.v4_addrs.dst_ip = fsp->m_u.ah_ip4_spec.ip4dst; fltr->ip_mask.spi = fsp->m_u.ah_ip4_spec.spi; fltr->ip_mask.tos = fsp->m_u.ah_ip4_spec.tos; + fltr->ip_ver = 4; break; case IPV4_USER_FLOW: fltr->ip_data.v4_addrs.src_ip = fsp->h_u.usr_ip4_spec.ip4src; @@ -1298,6 +1300,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.l4_header = fsp->m_u.usr_ip4_spec.l4_4_bytes; fltr->ip_mask.tos = fsp->m_u.usr_ip4_spec.tos; fltr->ip_mask.proto = fsp->m_u.usr_ip4_spec.proto; + fltr->ip_ver = 4; break; case TCP_V6_FLOW: case UDP_V6_FLOW: @@ -1316,6 +1319,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.src_port = fsp->m_u.tcp_ip6_spec.psrc; fltr->ip_mask.dst_port = fsp->m_u.tcp_ip6_spec.pdst; fltr->ip_mask.tclass = fsp->m_u.tcp_ip6_spec.tclass; + fltr->ip_ver = 6; break; case AH_V6_FLOW: case ESP_V6_FLOW: @@ -1331,6 +1335,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe sizeof(struct in6_addr)); fltr->ip_mask.spi = fsp->m_u.ah_ip6_spec.spi; fltr->ip_mask.tclass = fsp->m_u.ah_ip6_spec.tclass; + fltr->ip_ver = 6; break; case IPV6_USER_FLOW: memcpy(&fltr->ip_data.v6_addrs.src_ip, fsp->h_u.usr_ip6_spec.ip6src, @@ -1347,6 +1352,7 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe fltr->ip_mask.l4_header = fsp->m_u.usr_ip6_spec.l4_4_bytes; fltr->ip_mask.tclass = fsp->m_u.usr_ip6_spec.tclass; fltr->ip_mask.proto = fsp->m_u.usr_ip6_spec.l4_proto; + fltr->ip_ver = 6; break; case ETHER_FLOW: fltr->eth_data.etype = fsp->h_u.ether_spec.h_proto; @@ -1357,6 +1363,10 @@ iavf_add_fdir_fltr_info(struct iavf_adapter *adapter, struct ethtool_rx_flow_spe return -EINVAL; } + err = iavf_validate_fdir_fltr_masks(adapter, fltr); + if (err) + return err; + if (iavf_fdir_is_dup_fltr(adapter, fltr)) return -EEXIST; diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.c b/drivers/net/ethernet/intel/iavf/iavf_fdir.c index 505e82ebafe4..03e774bd2a5b 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.c +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.c @@ -18,6 +18,79 @@ static const struct in6_addr ipv6_addr_full_mask = { } }; +static const struct in6_addr ipv6_addr_zero_mask = { + .in6_u = { + .u6_addr8 = { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + } + } +}; + +/** + * iavf_validate_fdir_fltr_masks - validate Flow Director filter fields masks + * @adapter: pointer to the VF adapter structure + * @fltr: Flow Director filter data structure + * + * Returns 0 if all masks of packet fields are either full or empty. Returns + * error on at least one partial mask. + */ +int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter, + struct iavf_fdir_fltr *fltr) +{ + if (fltr->eth_mask.etype && fltr->eth_mask.etype != htons(U16_MAX)) + goto partial_mask; + + if (fltr->ip_ver == 4) { + if (fltr->ip_mask.v4_addrs.src_ip && + fltr->ip_mask.v4_addrs.src_ip != htonl(U32_MAX)) + goto partial_mask; + + if (fltr->ip_mask.v4_addrs.dst_ip && + fltr->ip_mask.v4_addrs.dst_ip != htonl(U32_MAX)) + goto partial_mask; + + if (fltr->ip_mask.tos && fltr->ip_mask.tos != U8_MAX) + goto partial_mask; + } else if (fltr->ip_ver == 6) { + if (memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_zero_mask, + sizeof(struct in6_addr)) && + memcmp(&fltr->ip_mask.v6_addrs.src_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) + goto partial_mask; + + if (memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_zero_mask, + sizeof(struct in6_addr)) && + memcmp(&fltr->ip_mask.v6_addrs.dst_ip, &ipv6_addr_full_mask, + sizeof(struct in6_addr))) + goto partial_mask; + + if (fltr->ip_mask.tclass && fltr->ip_mask.tclass != U8_MAX) + goto partial_mask; + } + + if (fltr->ip_mask.proto && fltr->ip_mask.proto != U8_MAX) + goto partial_mask; + + if (fltr->ip_mask.src_port && fltr->ip_mask.src_port != htons(U16_MAX)) + goto partial_mask; + + if (fltr->ip_mask.dst_port && fltr->ip_mask.dst_port != htons(U16_MAX)) + goto partial_mask; + + if (fltr->ip_mask.spi && fltr->ip_mask.spi != htonl(U32_MAX)) + goto partial_mask; + + if (fltr->ip_mask.l4_header && + fltr->ip_mask.l4_header != htonl(U32_MAX)) + goto partial_mask; + + return 0; + +partial_mask: + dev_err(&adapter->pdev->dev, "Failed to add Flow Director filter, partial masks are not supported\n"); + return -EOPNOTSUPP; +} + /** * iavf_pkt_udp_no_pay_len - the length of UDP packet without payload * @fltr: Flow Director filter data structure @@ -263,8 +336,6 @@ iavf_fill_fdir_ip4_hdr(struct iavf_fdir_fltr *fltr, VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV4, DST); } - fltr->ip_ver = 4; - return 0; } @@ -309,8 +380,6 @@ iavf_fill_fdir_ip6_hdr(struct iavf_fdir_fltr *fltr, VIRTCHNL_ADD_PROTO_HDR_FIELD_BIT(hdr, IPV6, DST); } - fltr->ip_ver = 6; - return 0; } diff --git a/drivers/net/ethernet/intel/iavf/iavf_fdir.h b/drivers/net/ethernet/intel/iavf/iavf_fdir.h index 33c55c366315..9eb9f73f6adf 100644 --- a/drivers/net/ethernet/intel/iavf/iavf_fdir.h +++ b/drivers/net/ethernet/intel/iavf/iavf_fdir.h @@ -110,6 +110,8 @@ struct iavf_fdir_fltr { struct virtchnl_fdir_add vc_add_msg; }; +int iavf_validate_fdir_fltr_masks(struct iavf_adapter *adapter, + struct iavf_fdir_fltr *fltr); int iavf_fill_fdir_add_msg(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); void iavf_print_fdir_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); bool iavf_fdir_is_dup_fltr(struct iavf_adapter *adapter, struct iavf_fdir_fltr *fltr); From f11c2802e143b949e6b4485a494207e5be98cf40 Mon Sep 17 00:00:00 2001 From: Andrii Staikov Date: Wed, 2 Aug 2023 09:47:32 +0200 Subject: [PATCH 469/513] i40e: fix misleading debug logs [ Upstream commit 2f2beb8874cb0844e84ad26e990f05f4f13ff63f ] Change "write" into the actual "read" word. Change parameters description. Fixes: 7073f46e443e ("i40e: Add AQ commands for NVM Update for X722") Signed-off-by: Aleksandr Loktionov Signed-off-by: Andrii Staikov Signed-off-by: Tony Nguyen Signed-off-by: Sasha Levin --- drivers/net/ethernet/intel/i40e/i40e_nvm.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_nvm.c b/drivers/net/ethernet/intel/i40e/i40e_nvm.c index 82af180cc5ee..b7556a6c2758 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_nvm.c +++ b/drivers/net/ethernet/intel/i40e/i40e_nvm.c @@ -210,11 +210,11 @@ static int i40e_read_nvm_word_srctl(struct i40e_hw *hw, u16 offset, * @hw: pointer to the HW structure. * @module_pointer: module pointer location in words from the NVM beginning * @offset: offset in words from module start - * @words: number of words to write - * @data: buffer with words to write to the Shadow RAM + * @words: number of words to read + * @data: buffer with words to read to the Shadow RAM * @last_command: tells the AdminQ that this is the last command * - * Writes a 16 bit words buffer to the Shadow RAM using the admin command. + * Reads a 16 bit words buffer to the Shadow RAM using the admin command. **/ static int i40e_read_nvm_aq(struct i40e_hw *hw, u8 module_pointer, u32 offset, @@ -234,18 +234,18 @@ static int i40e_read_nvm_aq(struct i40e_hw *hw, */ if ((offset + words) > hw->nvm.sr_size) i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: offset %d beyond Shadow RAM limit %d\n", + "NVM read error: offset %d beyond Shadow RAM limit %d\n", (offset + words), hw->nvm.sr_size); else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS) - /* We can write only up to 4KB (one sector), in one AQ write */ + /* We can read only up to 4KB (one sector), in one AQ write */ i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write fail error: tried to write %d words, limit is %d.\n", + "NVM read fail error: tried to read %d words, limit is %d.\n", words, I40E_SR_SECTOR_SIZE_IN_WORDS); else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS) != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS)) - /* A single write cannot spread over two sectors */ + /* A single read cannot spread over two sectors */ i40e_debug(hw, I40E_DEBUG_NVM, - "NVM write error: cannot spread over two sectors in a single write offset=%d words=%d\n", + "NVM read error: cannot spread over two sectors in a single read offset=%d words=%d\n", offset, words); else ret_code = i40e_aq_read_nvm(hw, module_pointer, From fc3d82d295d1f65543d322af84a88f898fbade87 Mon Sep 17 00:00:00 2001 From: Alfred Lee Date: Mon, 14 Aug 2023 17:13:23 -0700 Subject: [PATCH 470/513] net: dsa: mv88e6xxx: Wait for EEPROM done before HW reset [ Upstream commit 23d775f12dcd23d052a4927195f15e970e27ab26 ] If the switch is reset during active EEPROM transactions, as in just after an SoC reset after power up, the I2C bus transaction may be cut short leaving the EEPROM internal I2C state machine in the wrong state. When the switch is reset again, the bad state machine state may result in data being read from the wrong memory location causing the switch to enter unexpected mode rendering it inoperational. Fixes: a3dcb3e7e70c ("net: dsa: mv88e6xxx: Wait for EEPROM done after HW reset") Signed-off-by: Alfred Lee Reviewed-by: Andrew Lunn Link: https://lore.kernel.org/r/20230815001323.24739-1-l00g33k@gmail.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- drivers/net/dsa/mv88e6xxx/chip.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index ac1560fa29e4..7e93b72f9b54 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -2588,6 +2588,14 @@ static void mv88e6xxx_hardware_reset(struct mv88e6xxx_chip *chip) /* If there is a GPIO connected to the reset pin, toggle it */ if (gpiod) { + /* If the switch has just been reset and not yet completed + * loading EEPROM, the reset may interrupt the I2C transaction + * mid-byte, causing the first EEPROM read after the reset + * from the wrong location resulting in the switch booting + * to wrong mode and inoperable. + */ + mv88e6xxx_g1_wait_eeprom_done(chip); + gpiod_set_value_cansleep(gpiod, 1); usleep_range(10000, 20000); gpiod_set_value_cansleep(gpiod, 0); From 5fc4fd3f3eb8056859dd396e7de349b4da560f0e Mon Sep 17 00:00:00 2001 From: Abel Wu Date: Wed, 16 Aug 2023 17:12:22 +0800 Subject: [PATCH 471/513] sock: Fix misuse of sk_under_memory_pressure() [ Upstream commit 2d0c88e84e483982067a82073f6125490ddf3614 ] The status of global socket memory pressure is updated when: a) __sk_mem_raise_allocated(): enter: sk_memory_allocated(sk) > sysctl_mem[1] leave: sk_memory_allocated(sk) <= sysctl_mem[0] b) __sk_mem_reduce_allocated(): leave: sk_under_memory_pressure(sk) && sk_memory_allocated(sk) < sysctl_mem[0] So the conditions of leaving global pressure are inconstant, which may lead to the situation that one pressured net-memcg prevents the global pressure from being cleared when there is indeed no global pressure, thus the global constrains are still in effect unexpectedly on the other sockets. This patch fixes this by ignoring the net-memcg's pressure when deciding whether should leave global memory pressure. Fixes: e1aab161e013 ("socket: initial cgroup code.") Signed-off-by: Abel Wu Acked-by: Shakeel Butt Link: https://lore.kernel.org/r/20230816091226.1542-1-wuyun.abel@bytedance.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- include/net/sock.h | 6 ++++++ net/core/sock.c | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/include/net/sock.h b/include/net/sock.h index 93a6717213ae..6b12b62417e0 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1381,6 +1381,12 @@ static inline bool sk_has_memory_pressure(const struct sock *sk) return sk->sk_prot->memory_pressure != NULL; } +static inline bool sk_under_global_memory_pressure(const struct sock *sk) +{ + return sk->sk_prot->memory_pressure && + !!*sk->sk_prot->memory_pressure; +} + static inline bool sk_under_memory_pressure(const struct sock *sk) { if (!sk->sk_prot->memory_pressure) diff --git a/net/core/sock.c b/net/core/sock.c index 1f9401d757cb..ae1e9e2b8255 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2880,7 +2880,7 @@ void __sk_mem_reduce_allocated(struct sock *sk, int amount) if (mem_cgroup_sockets_enabled && sk->sk_memcg) mem_cgroup_uncharge_skmem(sk->sk_memcg, amount); - if (sk_under_memory_pressure(sk) && + if (sk_under_global_memory_pressure(sk) && (sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0))) sk_leave_memory_pressure(sk); } From 578371ce0d7f67ea1e65817c04478aaab0d36b68 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 16 Aug 2023 14:21:58 +0000 Subject: [PATCH 472/513] net: do not allow gso_size to be set to GSO_BY_FRAGS [ Upstream commit b616be6b97688f2f2bd7c4a47ab32f27f94fb2a9 ] One missing check in virtio_net_hdr_to_skb() allowed syzbot to crash kernels again [1] Do not allow gso_size to be set to GSO_BY_FRAGS (0xffff), because this magic value is used by the kernel. [1] general protection fault, probably for non-canonical address 0xdffffc000000000e: 0000 [#1] PREEMPT SMP KASAN KASAN: null-ptr-deref in range [0x0000000000000070-0x0000000000000077] CPU: 0 PID: 5039 Comm: syz-executor401 Not tainted 6.5.0-rc5-next-20230809-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 07/26/2023 RIP: 0010:skb_segment+0x1a52/0x3ef0 net/core/skbuff.c:4500 Code: 00 00 00 e9 ab eb ff ff e8 6b 96 5d f9 48 8b 84 24 00 01 00 00 48 8d 78 70 48 b8 00 00 00 00 00 fc ff df 48 89 fa 48 c1 ea 03 <0f> b6 04 02 84 c0 74 08 3c 03 0f 8e ea 21 00 00 48 8b 84 24 00 01 RSP: 0018:ffffc90003d3f1c8 EFLAGS: 00010202 RAX: dffffc0000000000 RBX: 000000000001fffe RCX: 0000000000000000 RDX: 000000000000000e RSI: ffffffff882a3115 RDI: 0000000000000070 RBP: ffffc90003d3f378 R08: 0000000000000005 R09: 000000000000ffff R10: 000000000000ffff R11: 5ee4a93e456187d6 R12: 000000000001ffc6 R13: dffffc0000000000 R14: 0000000000000008 R15: 000000000000ffff FS: 00005555563f2380(0000) GS:ffff8880b9800000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 0000000020020000 CR3: 000000001626d000 CR4: 00000000003506f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: udp6_ufo_fragment+0x9d2/0xd50 net/ipv6/udp_offload.c:109 ipv6_gso_segment+0x5c4/0x17b0 net/ipv6/ip6_offload.c:120 skb_mac_gso_segment+0x292/0x610 net/core/gso.c:53 __skb_gso_segment+0x339/0x710 net/core/gso.c:124 skb_gso_segment include/net/gso.h:83 [inline] validate_xmit_skb+0x3a5/0xf10 net/core/dev.c:3625 __dev_queue_xmit+0x8f0/0x3d60 net/core/dev.c:4329 dev_queue_xmit include/linux/netdevice.h:3082 [inline] packet_xmit+0x257/0x380 net/packet/af_packet.c:276 packet_snd net/packet/af_packet.c:3087 [inline] packet_sendmsg+0x24c7/0x5570 net/packet/af_packet.c:3119 sock_sendmsg_nosec net/socket.c:727 [inline] sock_sendmsg+0xd9/0x180 net/socket.c:750 ____sys_sendmsg+0x6ac/0x940 net/socket.c:2496 ___sys_sendmsg+0x135/0x1d0 net/socket.c:2550 __sys_sendmsg+0x117/0x1e0 net/socket.c:2579 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x38/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x63/0xcd RIP: 0033:0x7ff27cdb34d9 Fixes: 3953c46c3ac7 ("sk_buff: allow segmenting based on frag sizes") Reported-by: syzbot Signed-off-by: Eric Dumazet Cc: Xin Long Cc: "Michael S. Tsirkin" Cc: Jason Wang Reviewed-by: Willem de Bruijn Reviewed-by: Marcelo Ricardo Leitner Reviewed-by: Xuan Zhuo Link: https://lore.kernel.org/r/20230816142158.1779798-1-edumazet@google.com Signed-off-by: Jakub Kicinski Signed-off-by: Sasha Levin --- include/linux/virtio_net.h | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h index a960de68ac69..6047058d6703 100644 --- a/include/linux/virtio_net.h +++ b/include/linux/virtio_net.h @@ -148,6 +148,10 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb, if (gso_type & SKB_GSO_UDP) nh_off -= thlen; + /* Kernel has a special handling for GSO_BY_FRAGS. */ + if (gso_size == GSO_BY_FRAGS) + return -EINVAL; + /* Too small packets are not really GSO ones. */ if (skb->len - nh_off > gso_size) { shinfo->gso_size = gso_size; From 03522d73a4989a2757c42d7ae5e97d787e35a4c5 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Wed, 14 Jun 2023 10:18:23 +0300 Subject: [PATCH 473/513] bus: ti-sysc: Flush posted write on enable before reset [ Upstream commit 34539b442b3bc7d5bf10164750302b60b91f18a7 ] The am335x devices started producing boot errors for resetting musb module in because of subtle timing changes: Unhandled fault: external abort on non-linefetch (0x1008) ... sysc_poll_reset_sysconfig from sysc_reset+0x109/0x12 sysc_reset from sysc_probe+0xa99/0xeb0 ... The fix is to flush posted write after enable before reset during probe. Note that some devices also need to specify the delay after enable with ti,sysc-delay-us, but this is not needed for musb on am335x based on my tests. Reported-by: kernelci.org bot Closes: https://storage.kernelci.org/next/master/next-20230614/arm/multi_v7_defconfig+CONFIG_THUMB2_KERNEL=y/gcc-10/lab-cip/baseline-beaglebone-black.html Fixes: 596e7955692b ("bus: ti-sysc: Add support for software reset") Signed-off-by: Tony Lindgren Signed-off-by: Sasha Levin --- drivers/bus/ti-sysc.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c index 7d508f905003..71b541538801 100644 --- a/drivers/bus/ti-sysc.c +++ b/drivers/bus/ti-sysc.c @@ -2089,6 +2089,8 @@ static int sysc_reset(struct sysc *ddata) sysc_val = sysc_read_sysconfig(ddata); sysc_val |= sysc_mask; sysc_write(ddata, sysc_offset, sysc_val); + /* Flush posted write */ + sysc_val = sysc_read_sysconfig(ddata); } if (ddata->cfg.srst_udelay) From 430c29a54e4a2c5d4e73a9dc1a818cb8ceebefbc Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Tue, 13 Jun 2023 16:12:24 +0300 Subject: [PATCH 474/513] arm64: dts: qcom: qrb5165-rb5: fix thermal zone conflict [ Upstream commit 798f1df86e5709b7b6aedf493cc04c7fedbf544a ] The commit 3a786086c6f8 ("arm64: dts: qcom: Add missing "-thermal" suffix for thermal zones") renamed the thermal zone in the pm8150l.dtsi file to comply with the schema. However this resulted in a clash with the RB5 board file, which already contained the pm8150l-thermal zone for the on-board sensor. This resulted in the board file definition overriding the thermal zone defined in the PMIC include file (and thus the on-die PMIC temp alarm was not probing at all). Rename the thermal zone in qcom/qrb5165-rb5.dts to remove this override. Fixes: 3a786086c6f8 ("arm64: dts: qcom: Add missing "-thermal" suffix for thermal zones") Signed-off-by: Dmitry Baryshkov Reviewed-by: Konrad Dybcio Link: https://lore.kernel.org/r/20230613131224.666668-1-dmitry.baryshkov@linaro.org Signed-off-by: Bjorn Andersson Signed-off-by: Sasha Levin --- arch/arm64/boot/dts/qcom/qrb5165-rb5.dts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts index 0ce2d36ab257..d3449cb52def 100644 --- a/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts +++ b/arch/arm64/boot/dts/qcom/qrb5165-rb5.dts @@ -113,7 +113,7 @@ }; }; - pm8150l-thermal { + pm8150l-pcb-thermal { polling-delay-passive = <0>; polling-delay = <0>; thermal-sensors = <&pm8150l_adc_tm 1>; From e878458d820cb4d4c4fa5fd709beaa2e124af1f4 Mon Sep 17 00:00:00 2001 From: Xiaolei Wang Date: Wed, 26 Jul 2023 15:57:47 +0800 Subject: [PATCH 475/513] ARM: dts: imx: Set default tuning step for imx6sx usdhc [ Upstream commit 0a2b96e42a0284c4fc03022236f656a085ca714a ] If the tuning step is not set, the tuning step is set to 1. For some sd cards, the following Tuning timeout will occur. Tuning failed, falling back to fixed sampling clock So set the default tuning step. This refers to the NXP vendor's commit below: https://github.com/nxp-imx/linux-imx/blob/lf-6.1.y/ arch/arm/boot/dts/imx6sx.dtsi#L1108-L1109 Fixes: 1e336aa0c025 ("mmc: sdhci-esdhc-imx: correct the tuning start tap and step setting") Signed-off-by: Xiaolei Wang Reviewed-by: Fabio Estevam Signed-off-by: Shawn Guo Signed-off-by: Sasha Levin --- arch/arm/boot/dts/imx6sx.dtsi | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi index 8bef5440278b..3e779fd0a396 100644 --- a/arch/arm/boot/dts/imx6sx.dtsi +++ b/arch/arm/boot/dts/imx6sx.dtsi @@ -981,6 +981,8 @@ <&clks IMX6SX_CLK_USDHC1>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; + fsl,tuning-start-tap = <20>; + fsl,tuning-step= <2>; status = "disabled"; }; @@ -993,6 +995,8 @@ <&clks IMX6SX_CLK_USDHC2>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; + fsl,tuning-start-tap = <20>; + fsl,tuning-step= <2>; status = "disabled"; }; @@ -1005,6 +1009,8 @@ <&clks IMX6SX_CLK_USDHC3>; clock-names = "ipg", "ahb", "per"; bus-width = <4>; + fsl,tuning-start-tap = <20>; + fsl,tuning-step= <2>; status = "disabled"; }; From fd346ef1cd2d3ccf576133a406c132597760a66d Mon Sep 17 00:00:00 2001 From: Zhang Shurong Date: Tue, 1 Aug 2023 23:59:11 +0800 Subject: [PATCH 476/513] ASoC: rt5665: add missed regulator_bulk_disable [ Upstream commit c163108e706909570f8aa9aa5bcf6806e2b4c98c ] The driver forgets to call regulator_bulk_disable() Add the missed call to fix it. Fixes: 33ada14a26c8 ("ASoC: add rt5665 codec driver") Signed-off-by: Zhang Shurong Link: https://lore.kernel.org/r/tencent_A560D01E3E0A00A85A12F137E4B5205B3508@qq.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/codecs/rt5665.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/soc/codecs/rt5665.c b/sound/soc/codecs/rt5665.c index e59323fd5bf2..5e00aca0c418 100644 --- a/sound/soc/codecs/rt5665.c +++ b/sound/soc/codecs/rt5665.c @@ -4472,6 +4472,8 @@ static void rt5665_remove(struct snd_soc_component *component) struct rt5665_priv *rt5665 = snd_soc_component_get_drvdata(component); regmap_write(rt5665->regmap, RT5665_RESET, 0); + + regulator_bulk_disable(ARRAY_SIZE(rt5665->supplies), rt5665->supplies); } #ifdef CONFIG_PM From c5ac7522a8db4afd87acaeb6bc2664bfd917161f Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Wed, 9 Aug 2023 19:19:31 +0200 Subject: [PATCH 477/513] ASoC: meson: axg-tdm-formatter: fix channel slot allocation [ Upstream commit c1f848f12103920ca165758aedb1c10904e193e1 ] When the tdm lane mask is computed, the driver currently fills the 1st lane before moving on to the next. If the stream has less channels than the lanes can accommodate, slots will be disabled on the last lanes. Unfortunately, the HW distribute channels in a different way. It distribute channels in pair on each lanes before moving on the next slots. This difference leads to problems if a device has an interface with more than 1 lane and with more than 2 slots per lane. For example: a playback interface with 2 lanes and 4 slots each (total 8 slots - zero based numbering) - Playing a 8ch stream: - All slots activated by the driver - channel #2 will be played on lane #1 - slot #0 following HW placement - Playing a 4ch stream: - Lane #1 disabled by the driver - channel #2 will be played on lane #0 - slot #2 This behaviour is obviously not desirable. Change the way slots are activated on the TDM lanes to follow what the HW does and make sure each channel always get mapped to the same slot/lane. Fixes: 1a11d88f499c ("ASoC: meson: add tdm formatter base driver") Signed-off-by: Jerome Brunet Link: https://lore.kernel.org/r/20230809171931.1244502-1-jbrunet@baylibre.com Signed-off-by: Mark Brown Signed-off-by: Sasha Levin --- sound/soc/meson/axg-tdm-formatter.c | 42 ++++++++++++++++++----------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/sound/soc/meson/axg-tdm-formatter.c b/sound/soc/meson/axg-tdm-formatter.c index cab7fa2851aa..4834cfd163c0 100644 --- a/sound/soc/meson/axg-tdm-formatter.c +++ b/sound/soc/meson/axg-tdm-formatter.c @@ -30,27 +30,32 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map, struct axg_tdm_stream *ts, unsigned int offset) { - unsigned int val, ch = ts->channels; - unsigned long mask; - int i, j; + unsigned int ch = ts->channels; + u32 val[AXG_TDM_NUM_LANES]; + int i, j, k; + + /* + * We need to mimick the slot distribution used by the HW to keep the + * channel placement consistent regardless of the number of channel + * in the stream. This is why the odd algorithm below is used. + */ + memset(val, 0, sizeof(*val) * AXG_TDM_NUM_LANES); /* * Distribute the channels of the stream over the available slots - * of each TDM lane + * of each TDM lane. We need to go over the 32 slots ... */ - for (i = 0; i < AXG_TDM_NUM_LANES; i++) { - val = 0; - mask = ts->mask[i]; - - for (j = find_first_bit(&mask, 32); - (j < 32) && ch; - j = find_next_bit(&mask, 32, j + 1)) { - val |= 1 << j; - ch -= 1; + for (i = 0; (i < 32) && ch; i += 2) { + /* ... of all the lanes ... */ + for (j = 0; j < AXG_TDM_NUM_LANES; j++) { + /* ... then distribute the channels in pairs */ + for (k = 0; k < 2; k++) { + if ((BIT(i + k) & ts->mask[j]) && ch) { + val[j] |= BIT(i + k); + ch -= 1; + } + } } - - regmap_write(map, offset, val); - offset += regmap_get_reg_stride(map); } /* @@ -63,6 +68,11 @@ int axg_tdm_formatter_set_channel_masks(struct regmap *map, return -EINVAL; } + for (i = 0; i < AXG_TDM_NUM_LANES; i++) { + regmap_write(map, offset, val[i]); + offset += regmap_get_reg_stride(map); + } + return 0; } EXPORT_SYMBOL_GPL(axg_tdm_formatter_set_channel_masks); From dfb9676ed25be25ca7cd198d0f0e093b76b7bc7f Mon Sep 17 00:00:00 2001 From: Jiasheng Jiang Date: Thu, 10 Aug 2023 22:01:04 +0930 Subject: [PATCH 478/513] soc: aspeed: socinfo: Add kfree for kstrdup [ Upstream commit 6e6d847a8ce18ab2fbec4f579f682486a82d2c6b ] Add kfree() in the later error handling in order to avoid memory leak. Fixes: e0218dca5787 ("soc: aspeed: Add soc info driver") Signed-off-by: Jiasheng Jiang Link: https://lore.kernel.org/r/20230707021625.7727-1-jiasheng@iscas.ac.cn Signed-off-by: Joel Stanley Link: https://lore.kernel.org/r/20230810123104.231167-1-joel@jms.id.au Signed-off-by: Arnd Bergmann Signed-off-by: Sasha Levin --- drivers/soc/aspeed/aspeed-socinfo.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/soc/aspeed/aspeed-socinfo.c b/drivers/soc/aspeed/aspeed-socinfo.c index 1ca140356a08..3f759121dc00 100644 --- a/drivers/soc/aspeed/aspeed-socinfo.c +++ b/drivers/soc/aspeed/aspeed-socinfo.c @@ -137,6 +137,7 @@ static int __init aspeed_socinfo_init(void) soc_dev = soc_device_register(attrs); if (IS_ERR(soc_dev)) { + kfree(attrs->machine); kfree(attrs->soc_id); kfree(attrs->serial_number); kfree(attrs); From b2a331abcb03149f1365e6871e76878423e5041b Mon Sep 17 00:00:00 2001 From: Kailang Yang Date: Tue, 15 Aug 2023 15:54:23 +0800 Subject: [PATCH 479/513] ALSA: hda/realtek - Remodified 3k pull low procedure [ Upstream commit 46cdff2369cbdf8d78081a22526e77bd1323f563 ] Set spec->en_3kpull_low default to true. Then fillback ALC236 and ALC257 to false. Additional note: this addresses a regression caused by the previous fix 69ea4c9d02b7 ("ALSA: hda/realtek - remove 3k pull low procedure"). The previous workaround was applied too widely without necessity, which resulted in the pop noise at PM again. This patch corrects the condition and restores the old behavior for the devices that don't suffer from the original problem. Fixes: 69ea4c9d02b7 ("ALSA: hda/realtek - remove 3k pull low procedure") Link: https://bugzilla.kernel.org/show_bug.cgi?id=217732 Link: https://lore.kernel.org/r/01e212a538fc407ca6edd10b81ff7b05@realtek.com Signed-off-by: Kailang Yang Signed-off-by: Takashi Iwai Signed-off-by: Sasha Levin --- sound/pci/hda/patch_realtek.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index e335f3b5338f..59e11a070c20 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -10088,6 +10088,7 @@ static int patch_alc269(struct hda_codec *codec) spec = codec->spec; spec->gen.shared_mic_vref_pin = 0x18; codec->power_save_node = 0; + spec->en_3kpull_low = true; #ifdef CONFIG_PM codec->patch_ops.suspend = alc269_suspend; @@ -10170,14 +10171,16 @@ static int patch_alc269(struct hda_codec *codec) spec->shutup = alc256_shutup; spec->init_hook = alc256_init; spec->gen.mixer_nid = 0; /* ALC256 does not have any loopback mixer path */ - if (codec->bus->pci->vendor == PCI_VENDOR_ID_AMD) - spec->en_3kpull_low = true; + if (codec->core.vendor_id == 0x10ec0236 && + codec->bus->pci->vendor != PCI_VENDOR_ID_AMD) + spec->en_3kpull_low = false; break; case 0x10ec0257: spec->codec_variant = ALC269_TYPE_ALC257; spec->shutup = alc256_shutup; spec->init_hook = alc256_init; spec->gen.mixer_nid = 0; + spec->en_3kpull_low = false; break; case 0x10ec0215: case 0x10ec0245: From e9b8ee715dbcaa1da94d1c0df2c52d587f46dec2 Mon Sep 17 00:00:00 2001 From: Alexandre Ghiti Date: Fri, 11 Aug 2023 17:06:04 +0200 Subject: [PATCH 480/513] riscv: uaccess: Return the number of bytes effectively not copied MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit [ Upstream commit 4b05b993900dd3eba0fc83ef5c5ddc7d65d786c6 ] It was reported that the riscv kernel hangs while executing the test in [1]. Indeed, the test hangs when trying to write a buffer to a file. The problem is that the riscv implementation of raw_copy_from_user() does not return the correct number of bytes not written when an exception happens and is fixed up, instead it always returns the initial size to copy, even if some bytes were actually copied. generic_perform_write() pre-faults the user pages and bails out if nothing can be written, otherwise it will access the userspace buffer: here the riscv implementation keeps returning it was not able to copy any byte though the pre-faulting indicates otherwise. So generic_perform_write() keeps retrying to access the user memory and ends up in an infinite loop. Note that before the commit mentioned in [1] that introduced this regression, it worked because generic_perform_write() would bail out if only one byte could not be written. So fix this by returning the number of bytes effectively not written in __asm_copy_[to|from]_user() and __clear_user(), as it is expected. Link: https://lore.kernel.org/linux-riscv/20230309151841.bomov6hq3ybyp42a@debian/ [1] Fixes: ebcbd75e3962 ("riscv: Fix the bug in memory access fixup code") Reported-by: Bo YU Closes: https://lore.kernel.org/linux-riscv/20230309151841.bomov6hq3ybyp42a@debian/#t Reported-by: Aurelien Jarno Closes: https://lore.kernel.org/linux-riscv/ZNOnCakhwIeue3yr@aurel32.net/ Signed-off-by: Alexandre Ghiti Reviewed-by: Björn Töpel Tested-by: Aurelien Jarno Reviewed-by: Aurelien Jarno Link: https://lore.kernel.org/r/20230811150604.1621784-1-alexghiti@rivosinc.com Signed-off-by: Palmer Dabbelt Signed-off-by: Sasha Levin --- arch/riscv/lib/uaccess.S | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S index 2c7c1c5026af..4fe436a0eec2 100644 --- a/arch/riscv/lib/uaccess.S +++ b/arch/riscv/lib/uaccess.S @@ -19,8 +19,11 @@ ENTRY(__asm_copy_from_user) li t6, SR_SUM csrs CSR_STATUS, t6 - /* Save for return value */ - mv t5, a2 + /* + * Save the terminal address which will be used to compute the number + * of bytes copied in case of a fixup exception. + */ + add t5, a0, a2 /* * Register allocation for code below: @@ -178,7 +181,7 @@ ENTRY(__asm_copy_from_user) 10: /* Disable access to user memory */ csrc CSR_STATUS, t6 - mv a0, t5 + sub a0, t5, a0 ret ENDPROC(__asm_copy_to_user) ENDPROC(__asm_copy_from_user) @@ -230,7 +233,7 @@ ENTRY(__clear_user) 11: /* Disable access to user memory */ csrc CSR_STATUS, t6 - mv a0, a1 + sub a0, a3, a0 ret ENDPROC(__clear_user) EXPORT_SYMBOL(__clear_user) From 18e27df4f2b4e257c317ba8076f31a888f6cc64b Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Fri, 4 Aug 2023 16:15:51 +0300 Subject: [PATCH 481/513] serial: 8250: Fix oops for port->pm on uart_change_pm() [ Upstream commit dfe2aeb226fd5e19b0ee795f4f6ed8bc494c1534 ] Unloading a hardware specific 8250 driver can produce error "Unable to handle kernel paging request at virtual address" about ten seconds after unloading the driver. This happens on uart_hangup() calling uart_change_pm(). Turns out commit 04e82793f068 ("serial: 8250: Reinit port->pm on port specific driver unbind") was only a partial fix. If the hardware specific driver has initialized port->pm function, we need to clear port->pm too. Just reinitializing port->ops does not do this. Otherwise serial8250_pm() will call port->pm() instead of serial8250_do_pm(). Fixes: 04e82793f068 ("serial: 8250: Reinit port->pm on port specific driver unbind") Signed-off-by: Tony Lindgren Link: https://lore.kernel.org/r/20230804131553.52927-1-tony@atomide.com Signed-off-by: Greg Kroah-Hartman Signed-off-by: Sasha Levin --- drivers/tty/serial/8250/8250_port.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c index ad5b742a68cd..74e477016f25 100644 --- a/drivers/tty/serial/8250/8250_port.c +++ b/drivers/tty/serial/8250/8250_port.c @@ -3274,6 +3274,7 @@ void serial8250_init_port(struct uart_8250_port *up) struct uart_port *port = &up->port; spin_lock_init(&port->lock); + port->pm = NULL; port->ops = &serial8250_pops; port->has_sysrq = IS_ENABLED(CONFIG_SERIAL_8250_CONSOLE); From cc3f194f46e72c83b4357b2a50dbd1de676c8727 Mon Sep 17 00:00:00 2001 From: dengxiang Date: Thu, 3 Aug 2023 10:44:37 +0800 Subject: [PATCH 482/513] ALSA: usb-audio: Add support for Mythware XA001AU capture and playback interfaces. commit 788449ae57f4273111b779bbcaad552b67f517d5 upstream. This patch adds a USB quirk for Mythware XA001AU USB interface. Signed-off-by: dengxiang Cc: Link: https://lore.kernel.org/r/20230803024437.370069-1-dengxiang@nfschina.com Signed-off-by: Takashi Iwai Signed-off-by: Greg Kroah-Hartman --- sound/usb/quirks-table.h | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index efe62f19c4d2..6d332c9eb444 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -4431,6 +4431,35 @@ YAMAHA_DEVICE(0x7010, "UB99"), } } }, +{ + /* Advanced modes of the Mythware XA001AU. + * For the standard mode, Mythware XA001AU has ID ffad:a001 + */ + USB_DEVICE_VENDOR_SPEC(0xffad, 0xa001), + .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { + .vendor_name = "Mythware", + .product_name = "XA001AU", + .ifnum = QUIRK_ANY_INTERFACE, + .type = QUIRK_COMPOSITE, + .data = (const struct snd_usb_audio_quirk[]) { + { + .ifnum = 0, + .type = QUIRK_IGNORE_INTERFACE, + }, + { + .ifnum = 1, + .type = QUIRK_AUDIO_STANDARD_INTERFACE, + }, + { + .ifnum = 2, + .type = QUIRK_AUDIO_STANDARD_INTERFACE, + }, + { + .ifnum = -1 + } + } + } +}, #undef USB_DEVICE_VENDOR_SPEC #undef USB_AUDIO_DEVICE From 961f7ce16223aee2db583a43877d84e6d1f2b857 Mon Sep 17 00:00:00 2001 From: Russell Harmon via samba-technical Date: Thu, 10 Aug 2023 00:19:22 -0700 Subject: [PATCH 483/513] cifs: Release folio lock on fscache read hit. commit 69513dd669e243928f7450893190915a88f84a2b upstream. Under the current code, when cifs_readpage_worker is called, the call contract is that the callee should unlock the page. This is documented in the read_folio section of Documentation/filesystems/vfs.rst as: > The filesystem should unlock the folio once the read has completed, > whether it was successful or not. Without this change, when fscache is in use and cache hit occurs during a read, the page lock is leaked, producing the following stack on subsequent reads (via mmap) to the page: $ cat /proc/3890/task/12864/stack [<0>] folio_wait_bit_common+0x124/0x350 [<0>] filemap_read_folio+0xad/0xf0 [<0>] filemap_fault+0x8b1/0xab0 [<0>] __do_fault+0x39/0x150 [<0>] do_fault+0x25c/0x3e0 [<0>] __handle_mm_fault+0x6ca/0xc70 [<0>] handle_mm_fault+0xe9/0x350 [<0>] do_user_addr_fault+0x225/0x6c0 [<0>] exc_page_fault+0x84/0x1b0 [<0>] asm_exc_page_fault+0x27/0x30 This requires a reboot to resolve; it is a deadlock. Note however that the call to cifs_readpage_from_fscache does mark the page clean, but does not free the folio lock. This happens in __cifs_readpage_from_fscache on success. Releasing the lock at that point however is not appropriate as cifs_readahead also calls cifs_readpage_from_fscache and *does* unconditionally release the lock after its return. This change therefore effectively makes cifs_readpage_worker work like cifs_readahead. Signed-off-by: Russell Harmon Acked-by: Paulo Alcantara (SUSE) Reviewed-by: David Howells Cc: stable@vger.kernel.org Signed-off-by: Steve French Signed-off-by: Greg Kroah-Hartman --- fs/cifs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 369620e82b84..9e8a69f9421e 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4671,9 +4671,9 @@ static int cifs_readpage_worker(struct file *file, struct page *page, io_error: kunmap(page); - unlock_page(page); read_complete: + unlock_page(page); return rc; } From b803fedb6e509afe50dbe777afd47ae05c6bde8b Mon Sep 17 00:00:00 2001 From: Yang Yingliang Date: Mon, 7 Aug 2023 20:44:42 +0800 Subject: [PATCH 484/513] mmc: wbsd: fix double mmc_free_host() in wbsd_init() commit d83035433701919ac6db15f7737cbf554c36c1a6 upstream. mmc_free_host() has already be called in wbsd_free_mmc(), remove the mmc_free_host() in error path in wbsd_init(). Fixes: dc5b9b50fc9d ("mmc: wbsd: fix return value check of mmc_add_host()") Signed-off-by: Yang Yingliang Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230807124443.3431366-1-yangyingliang@huawei.com Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/wbsd.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/mmc/host/wbsd.c b/drivers/mmc/host/wbsd.c index 7c7ec8d10232..b5b1a42ca25e 100644 --- a/drivers/mmc/host/wbsd.c +++ b/drivers/mmc/host/wbsd.c @@ -1705,8 +1705,6 @@ static int wbsd_init(struct device *dev, int base, int irq, int dma, wbsd_release_resources(host); wbsd_free_mmc(dev); - - mmc_free_host(mmc); return ret; } From ff09f9e671ffba2e3b13627ec2b942a099811236 Mon Sep 17 00:00:00 2001 From: Yibin Ding Date: Wed, 2 Aug 2023 10:30:23 +0800 Subject: [PATCH 485/513] mmc: block: Fix in_flight[issue_type] value error commit 4b430d4ac99750ee2ae2f893f1055c7af1ec3dc5 upstream. For a completed request, after the mmc_blk_mq_complete_rq(mq, req) function is executed, the bitmap_tags corresponding to the request will be cleared, that is, the request will be regarded as idle. If the request is acquired by a different type of process at this time, the issue_type of the request may change. It further caused the value of mq->in_flight[issue_type] to be abnormal, and a large number of requests could not be sent. p1: p2: mmc_blk_mq_complete_rq blk_mq_free_request blk_mq_get_request blk_mq_rq_ctx_init mmc_blk_mq_dec_in_flight mmc_issue_type(mq, req) This strategy can ensure the consistency of issue_type before and after executing mmc_blk_mq_complete_rq. Fixes: 81196976ed94 ("mmc: block: Add blk-mq support") Cc: stable@vger.kernel.org Signed-off-by: Yibin Ding Acked-by: Adrian Hunter Link: https://lore.kernel.org/r/20230802023023.1318134-1-yunlong.xing@unisoc.com Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/core/block.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c index 0b72096f10e6..965b44a09507 100644 --- a/drivers/mmc/core/block.c +++ b/drivers/mmc/core/block.c @@ -2081,14 +2081,14 @@ static void mmc_blk_mq_poll_completion(struct mmc_queue *mq, mmc_blk_urgent_bkops(mq, mqrq); } -static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) +static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, enum mmc_issue_type issue_type) { unsigned long flags; bool put_card; spin_lock_irqsave(&mq->lock, flags); - mq->in_flight[mmc_issue_type(mq, req)] -= 1; + mq->in_flight[issue_type] -= 1; put_card = (mmc_tot_in_flight(mq) == 0); @@ -2100,6 +2100,7 @@ static void mmc_blk_mq_dec_in_flight(struct mmc_queue *mq, struct request *req) static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) { + enum mmc_issue_type issue_type = mmc_issue_type(mq, req); struct mmc_queue_req *mqrq = req_to_mmc_queue_req(req); struct mmc_request *mrq = &mqrq->brq.mrq; struct mmc_host *host = mq->card->host; @@ -2115,7 +2116,7 @@ static void mmc_blk_mq_post_req(struct mmc_queue *mq, struct request *req) else if (likely(!blk_should_fake_timeout(req->q))) blk_mq_complete_request(req); - mmc_blk_mq_dec_in_flight(mq, req); + mmc_blk_mq_dec_in_flight(mq, issue_type); } void mmc_blk_mq_recovery(struct mmc_queue *mq) From d578c919deb786b4d6ba8c7639255cb658731671 Mon Sep 17 00:00:00 2001 From: Wander Lairson Costa Date: Mon, 14 Aug 2023 13:51:19 -0300 Subject: [PATCH 486/513] drm/qxl: fix UAF on handle creation commit c611589b4259ed63b9b77be6872b1ce07ec0ac16 upstream. qxl_mode_dumb_create() dereferences the qobj returned by qxl_gem_object_create_with_handle(), but the handle is the only one holding a reference to it. A potential attacker could guess the returned handle value and closes it between the return of qxl_gem_object_create_with_handle() and the qobj usage, triggering a use-after-free scenario. Reproducer: int dri_fd =-1; struct drm_mode_create_dumb arg = {0}; void gem_close(int handle); void* trigger(void* ptr) { int ret; arg.width = arg.height = 0x20; arg.bpp = 32; ret = ioctl(dri_fd, DRM_IOCTL_MODE_CREATE_DUMB, &arg); if(ret) { perror("[*] DRM_IOCTL_MODE_CREATE_DUMB Failed"); exit(-1); } gem_close(arg.handle); while(1) { struct drm_mode_create_dumb args = {0}; args.width = args.height = 0x20; args.bpp = 32; ret = ioctl(dri_fd, DRM_IOCTL_MODE_CREATE_DUMB, &args); if (ret) { perror("[*] DRM_IOCTL_MODE_CREATE_DUMB Failed"); exit(-1); } printf("[*] DRM_IOCTL_MODE_CREATE_DUMB created, %d\n", args.handle); gem_close(args.handle); } return NULL; } void gem_close(int handle) { struct drm_gem_close args; args.handle = handle; int ret = ioctl(dri_fd, DRM_IOCTL_GEM_CLOSE, &args); // gem close handle if (!ret) printf("gem close handle %d\n", args.handle); } int main(void) { dri_fd= open("/dev/dri/card0", O_RDWR); printf("fd:%d\n", dri_fd); if(dri_fd == -1) return -1; pthread_t tid1; if(pthread_create(&tid1,NULL,trigger,NULL)){ perror("[*] thread_create tid1\n"); return -1; } while (1) { gem_close(arg.handle); } return 0; } This is a KASAN report: ================================================================== BUG: KASAN: slab-use-after-free in qxl_mode_dumb_create+0x3c2/0x400 linux/drivers/gpu/drm/qxl/qxl_dumb.c:69 Write of size 1 at addr ffff88801136c240 by task poc/515 CPU: 1 PID: 515 Comm: poc Not tainted 6.3.0 #3 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.16.0-debian-1.16.0-4 04/01/2014 Call Trace: __dump_stack linux/lib/dump_stack.c:88 dump_stack_lvl+0x48/0x70 linux/lib/dump_stack.c:106 print_address_description linux/mm/kasan/report.c:319 print_report+0xd2/0x660 linux/mm/kasan/report.c:430 kasan_report+0xd2/0x110 linux/mm/kasan/report.c:536 __asan_report_store1_noabort+0x17/0x30 linux/mm/kasan/report_generic.c:383 qxl_mode_dumb_create+0x3c2/0x400 linux/drivers/gpu/drm/qxl/qxl_dumb.c:69 drm_mode_create_dumb linux/drivers/gpu/drm/drm_dumb_buffers.c:96 drm_mode_create_dumb_ioctl+0x1f5/0x2d0 linux/drivers/gpu/drm/drm_dumb_buffers.c:102 drm_ioctl_kernel+0x21d/0x430 linux/drivers/gpu/drm/drm_ioctl.c:788 drm_ioctl+0x56f/0xcc0 linux/drivers/gpu/drm/drm_ioctl.c:891 vfs_ioctl linux/fs/ioctl.c:51 __do_sys_ioctl linux/fs/ioctl.c:870 __se_sys_ioctl linux/fs/ioctl.c:856 __x64_sys_ioctl+0x13d/0x1c0 linux/fs/ioctl.c:856 do_syscall_x64 linux/arch/x86/entry/common.c:50 do_syscall_64+0x5b/0x90 linux/arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x72/0xdc linux/arch/x86/entry/entry_64.S:120 RIP: 0033:0x7ff5004ff5f7 Code: 00 00 00 48 8b 05 99 c8 0d 00 64 c7 00 26 00 00 00 48 c7 c0 ff ff ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 b8 10 00 00 00 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 8b 0d 69 c8 0d 00 f7 d8 64 89 01 48 RSP: 002b:00007ff500408ea8 EFLAGS: 00000286 ORIG_RAX: 0000000000000010 RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007ff5004ff5f7 RDX: 00007ff500408ec0 RSI: 00000000c02064b2 RDI: 0000000000000003 RBP: 00007ff500408ef0 R08: 0000000000000000 R09: 000000000000002a R10: 0000000000000000 R11: 0000000000000286 R12: 00007fff1c6cdafe R13: 00007fff1c6cdaff R14: 00007ff500408fc0 R15: 0000000000802000 Allocated by task 515: kasan_save_stack+0x38/0x70 linux/mm/kasan/common.c:45 kasan_set_track+0x25/0x40 linux/mm/kasan/common.c:52 kasan_save_alloc_info+0x1e/0x40 linux/mm/kasan/generic.c:510 ____kasan_kmalloc linux/mm/kasan/common.c:374 __kasan_kmalloc+0xc3/0xd0 linux/mm/kasan/common.c:383 kasan_kmalloc linux/./include/linux/kasan.h:196 kmalloc_trace+0x48/0xc0 linux/mm/slab_common.c:1066 kmalloc linux/./include/linux/slab.h:580 kzalloc linux/./include/linux/slab.h:720 qxl_bo_create+0x11a/0x610 linux/drivers/gpu/drm/qxl/qxl_object.c:124 qxl_gem_object_create+0xd9/0x360 linux/drivers/gpu/drm/qxl/qxl_gem.c:58 qxl_gem_object_create_with_handle+0xa1/0x180 linux/drivers/gpu/drm/qxl/qxl_gem.c:89 qxl_mode_dumb_create+0x1cd/0x400 linux/drivers/gpu/drm/qxl/qxl_dumb.c:63 drm_mode_create_dumb linux/drivers/gpu/drm/drm_dumb_buffers.c:96 drm_mode_create_dumb_ioctl+0x1f5/0x2d0 linux/drivers/gpu/drm/drm_dumb_buffers.c:102 drm_ioctl_kernel+0x21d/0x430 linux/drivers/gpu/drm/drm_ioctl.c:788 drm_ioctl+0x56f/0xcc0 linux/drivers/gpu/drm/drm_ioctl.c:891 vfs_ioctl linux/fs/ioctl.c:51 __do_sys_ioctl linux/fs/ioctl.c:870 __se_sys_ioctl linux/fs/ioctl.c:856 __x64_sys_ioctl+0x13d/0x1c0 linux/fs/ioctl.c:856 do_syscall_x64 linux/arch/x86/entry/common.c:50 do_syscall_64+0x5b/0x90 linux/arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x72/0xdc linux/arch/x86/entry/entry_64.S:120 Freed by task 515: kasan_save_stack+0x38/0x70 linux/mm/kasan/common.c:45 kasan_set_track+0x25/0x40 linux/mm/kasan/common.c:52 kasan_save_free_info+0x2e/0x60 linux/mm/kasan/generic.c:521 ____kasan_slab_free linux/mm/kasan/common.c:236 ____kasan_slab_free+0x180/0x1f0 linux/mm/kasan/common.c:200 __kasan_slab_free+0x12/0x30 linux/mm/kasan/common.c:244 kasan_slab_free linux/./include/linux/kasan.h:162 slab_free_hook linux/mm/slub.c:1781 slab_free_freelist_hook+0xd2/0x1a0 linux/mm/slub.c:1807 slab_free linux/mm/slub.c:3787 __kmem_cache_free+0x196/0x2d0 linux/mm/slub.c:3800 kfree+0x78/0x120 linux/mm/slab_common.c:1019 qxl_ttm_bo_destroy+0x140/0x1a0 linux/drivers/gpu/drm/qxl/qxl_object.c:49 ttm_bo_release+0x678/0xa30 linux/drivers/gpu/drm/ttm/ttm_bo.c:381 kref_put linux/./include/linux/kref.h:65 ttm_bo_put+0x50/0x80 linux/drivers/gpu/drm/ttm/ttm_bo.c:393 qxl_gem_object_free+0x3e/0x60 linux/drivers/gpu/drm/qxl/qxl_gem.c:42 drm_gem_object_free+0x5c/0x90 linux/drivers/gpu/drm/drm_gem.c:974 kref_put linux/./include/linux/kref.h:65 __drm_gem_object_put linux/./include/drm/drm_gem.h:431 drm_gem_object_put linux/./include/drm/drm_gem.h:444 qxl_gem_object_create_with_handle+0x151/0x180 linux/drivers/gpu/drm/qxl/qxl_gem.c:100 qxl_mode_dumb_create+0x1cd/0x400 linux/drivers/gpu/drm/qxl/qxl_dumb.c:63 drm_mode_create_dumb linux/drivers/gpu/drm/drm_dumb_buffers.c:96 drm_mode_create_dumb_ioctl+0x1f5/0x2d0 linux/drivers/gpu/drm/drm_dumb_buffers.c:102 drm_ioctl_kernel+0x21d/0x430 linux/drivers/gpu/drm/drm_ioctl.c:788 drm_ioctl+0x56f/0xcc0 linux/drivers/gpu/drm/drm_ioctl.c:891 vfs_ioctl linux/fs/ioctl.c:51 __do_sys_ioctl linux/fs/ioctl.c:870 __se_sys_ioctl linux/fs/ioctl.c:856 __x64_sys_ioctl+0x13d/0x1c0 linux/fs/ioctl.c:856 do_syscall_x64 linux/arch/x86/entry/common.c:50 do_syscall_64+0x5b/0x90 linux/arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x72/0xdc linux/arch/x86/entry/entry_64.S:120 The buggy address belongs to the object at ffff88801136c000 which belongs to the cache kmalloc-1k of size 1024 The buggy address is located 576 bytes inside of freed 1024-byte region [ffff88801136c000, ffff88801136c400) The buggy address belongs to the physical page: page:0000000089fc329b refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x11368 head:0000000089fc329b order:3 entire_mapcount:0 nr_pages_mapped:0 pincount:0 flags: 0xfffffc0010200(slab|head|node=0|zone=1|lastcpupid=0x1fffff) raw: 000fffffc0010200 ffff888007841dc0 dead000000000122 0000000000000000 raw: 0000000000000000 0000000080100010 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected Memory state around the buggy address: ffff88801136c100: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff88801136c180: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb >ffff88801136c200: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff88801136c280: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff88801136c300: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ================================================================== Disabling lock debugging due to kernel taint Instead of returning a weak reference to the qxl_bo object, return the created drm_gem_object and let the caller decrement the reference count when it no longer needs it. As a convenience, if the caller is not interested in the gobj object, it can pass NULL to the parameter and the reference counting is descremented internally. The bug and the reproducer were originally found by the Zero Day Initiative project (ZDI-CAN-20940). Link: https://www.zerodayinitiative.com/ Signed-off-by: Wander Lairson Costa Cc: stable@vger.kernel.org Reviewed-by: Dave Airlie Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20230814165119.90847-1-wander@redhat.com Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/qxl/qxl_drv.h | 2 +- drivers/gpu/drm/qxl/qxl_dumb.c | 5 ++++- drivers/gpu/drm/qxl/qxl_gem.c | 25 +++++++++++++++++-------- drivers/gpu/drm/qxl/qxl_ioctl.c | 6 ++---- 4 files changed, 24 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 359266d9e860..f0f512d58497 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -318,7 +318,7 @@ int qxl_gem_object_create_with_handle(struct qxl_device *qdev, u32 domain, size_t size, struct qxl_surface *surf, - struct qxl_bo **qobj, + struct drm_gem_object **gobj, uint32_t *handle); void qxl_gem_object_free(struct drm_gem_object *gobj); int qxl_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv); diff --git a/drivers/gpu/drm/qxl/qxl_dumb.c b/drivers/gpu/drm/qxl/qxl_dumb.c index d636ba685451..17df5c7ccf69 100644 --- a/drivers/gpu/drm/qxl/qxl_dumb.c +++ b/drivers/gpu/drm/qxl/qxl_dumb.c @@ -34,6 +34,7 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, { struct qxl_device *qdev = to_qxl(dev); struct qxl_bo *qobj; + struct drm_gem_object *gobj; uint32_t handle; int r; struct qxl_surface surf; @@ -62,11 +63,13 @@ int qxl_mode_dumb_create(struct drm_file *file_priv, r = qxl_gem_object_create_with_handle(qdev, file_priv, QXL_GEM_DOMAIN_CPU, - args->size, &surf, &qobj, + args->size, &surf, &gobj, &handle); if (r) return r; + qobj = gem_to_qxl_bo(gobj); qobj->is_dumb = true; + drm_gem_object_put(gobj); args->pitch = pitch; args->handle = handle; return 0; diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index a08da0bd9098..fc5e3763c359 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c @@ -72,32 +72,41 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size, return 0; } +/* + * If the caller passed a valid gobj pointer, it is responsible to call + * drm_gem_object_put() when it no longer needs to acess the object. + * + * If gobj is NULL, it is handled internally. + */ int qxl_gem_object_create_with_handle(struct qxl_device *qdev, struct drm_file *file_priv, u32 domain, size_t size, struct qxl_surface *surf, - struct qxl_bo **qobj, + struct drm_gem_object **gobj, uint32_t *handle) { - struct drm_gem_object *gobj; int r; + struct drm_gem_object *local_gobj; - BUG_ON(!qobj); BUG_ON(!handle); r = qxl_gem_object_create(qdev, size, 0, domain, false, false, surf, - &gobj); + &local_gobj); if (r) return -ENOMEM; - r = drm_gem_handle_create(file_priv, gobj, handle); + r = drm_gem_handle_create(file_priv, local_gobj, handle); if (r) return r; - /* drop reference from allocate - handle holds it now */ - *qobj = gem_to_qxl_bo(gobj); - drm_gem_object_put(gobj); + + if (gobj) + *gobj = local_gobj; + else + /* drop reference from allocate - handle holds it now */ + drm_gem_object_put(local_gobj); + return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 38aabcbe2238..4066499ca79e 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -39,7 +39,6 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_alloc *qxl_alloc = data; int ret; - struct qxl_bo *qobj; uint32_t handle; u32 domain = QXL_GEM_DOMAIN_VRAM; @@ -51,7 +50,7 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data, domain, qxl_alloc->size, NULL, - &qobj, &handle); + NULL, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); @@ -393,7 +392,6 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_alloc_surf *param = data; - struct qxl_bo *qobj; int handle; int ret; int size, actual_stride; @@ -413,7 +411,7 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, QXL_GEM_DOMAIN_SURFACE, size, &surf, - &qobj, &handle); + NULL, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); From a00c5d2c208b7e40403524bd413f01055b73a9f4 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Thu, 18 May 2023 11:52:51 -0500 Subject: [PATCH 487/513] drm/amd: flush any delayed gfxoff on suspend entry commit a7b7d9e8aee4f71b4c7151702fd74237b8cef989 upstream. DCN 3.1.4 is reported to hang on s2idle entry if graphics activity is happening during entry. This is because GFXOFF was scheduled as delayed but RLC gets disabled in s2idle entry sequence which will hang GFX IP if not already in GFXOFF. To help this problem, flush any delayed work for GFXOFF early in s2idle entry sequence to ensure that it's off when RLC is changed. commit 4b31b92b143f ("drm/amdgpu: complete gfxoff allow signal during suspend without delay") modified power gating flow so that if called in s0ix that it ensured that GFXOFF wasn't put in work queue but instead processed immediately. This is dead code due to commit 10cb67eb8a1b ("drm/amdgpu: skip CG/PG for gfx during S0ix") because GFXOFF will now not be explicitly called as part of the suspend entry code. Remove that dead code. Signed-off-by: Mario Limonciello Signed-off-by: Tim Huang Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org Signed-off-by: Greg Kroah-Hartman --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 9 +-------- 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 642acfc9f0b1..2b5766d3789b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4066,6 +4066,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) amdgpu_fbdev_set_suspend(adev, 1); cancel_delayed_work_sync(&adev->delayed_init_work); + flush_delayed_work(&adev->gfx.gfx_off_delay_work); amdgpu_ras_suspend(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 5e32906f9819..252712f930f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -579,15 +579,8 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable) if (adev->gfx.gfx_off_req_count == 0 && !adev->gfx.gfx_off_state) { - /* If going to s2idle, no need to wait */ - if (adev->in_s0ix) { - if (!amdgpu_dpm_set_powergating_by_smu(adev, - AMD_IP_BLOCK_TYPE_GFX, true)) - adev->gfx.gfx_off_state = true; - } else { - schedule_delayed_work(&adev->gfx.gfx_off_delay_work, + schedule_delayed_work(&adev->gfx.gfx_off_delay_work, delay); - } } } else { if (adev->gfx.gfx_off_req_count == 0) { From 6065b3017107eb7f7bac44dda46b7ff09165c701 Mon Sep 17 00:00:00 2001 From: Xin Long Date: Tue, 15 Aug 2023 14:08:47 -0400 Subject: [PATCH 488/513] netfilter: set default timeout to 3 secs for sctp shutdown send and recv state commit 9bfab6d23a2865966a4f89a96536fbf23f83bc8c upstream. In SCTP protocol, it is using the same timer (T2 timer) for SHUTDOWN and SHUTDOWN_ACK retransmission. However in sctp conntrack the default timeout value for SCTP_CONNTRACK_SHUTDOWN_ACK_SENT state is 3 secs while it's 300 msecs for SCTP_CONNTRACK_SHUTDOWN_SEND/RECV state. As Paolo Valerio noticed, this might cause unwanted expiration of the ct entry. In my test, with 1s tc netem delay set on the NAT path, after the SHUTDOWN is sent, the sctp ct entry enters SCTP_CONNTRACK_SHUTDOWN_SEND state. However, due to 300ms (too short) delay, when the SHUTDOWN_ACK is sent back from the peer, the sctp ct entry has expired and been deleted, and then the SHUTDOWN_ACK has to be dropped. Also, it is confusing these two sysctl options always show 0 due to all timeout values using sec as unit: net.netfilter.nf_conntrack_sctp_timeout_shutdown_recd = 0 net.netfilter.nf_conntrack_sctp_timeout_shutdown_sent = 0 This patch fixes it by also using 3 secs for sctp shutdown send and recv state in sctp conntrack, which is also RTO.initial value in SCTP protocol. Note that the very short time value for SCTP_CONNTRACK_SHUTDOWN_SEND/RECV was probably used for a rare scenario where SHUTDOWN is sent on 1st path but SHUTDOWN_ACK is replied on 2nd path, then a new connection started immediately on 1st path. So this patch also moves from SHUTDOWN_SEND/RECV to CLOSE when receiving INIT in the ORIGINAL direction. Fixes: 9fb9cbb1082d ("[NETFILTER]: Add nf_conntrack subsystem.") Reported-by: Paolo Valerio Signed-off-by: Xin Long Reviewed-by: Simon Horman Signed-off-by: Florian Westphal Signed-off-by: Greg Kroah-Hartman --- net/netfilter/nf_conntrack_proto_sctp.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 895e0ca54299..7247af51bdfc 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -49,8 +49,8 @@ static const unsigned int sctp_timeouts[SCTP_CONNTRACK_MAX] = { [SCTP_CONNTRACK_COOKIE_WAIT] = 3 SECS, [SCTP_CONNTRACK_COOKIE_ECHOED] = 3 SECS, [SCTP_CONNTRACK_ESTABLISHED] = 210 SECS, - [SCTP_CONNTRACK_SHUTDOWN_SENT] = 300 SECS / 1000, - [SCTP_CONNTRACK_SHUTDOWN_RECD] = 300 SECS / 1000, + [SCTP_CONNTRACK_SHUTDOWN_SENT] = 3 SECS, + [SCTP_CONNTRACK_SHUTDOWN_RECD] = 3 SECS, [SCTP_CONNTRACK_SHUTDOWN_ACK_SENT] = 3 SECS, [SCTP_CONNTRACK_HEARTBEAT_SENT] = 30 SECS, }; @@ -105,7 +105,7 @@ static const u8 sctp_conntracks[2][11][SCTP_CONNTRACK_MAX] = { { /* ORIGINAL */ /* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA, sHS */ -/* init */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCW}, +/* init */ {sCL, sCL, sCW, sCE, sES, sCL, sCL, sSA, sCW}, /* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA, sCL}, /* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL}, /* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA, sCL}, From 6b64974e02ea82d0bae917f1fa79495a1a59b5bf Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Thu, 13 Jul 2023 21:59:37 +0900 Subject: [PATCH 489/513] exfat: check if filename entries exceeds max filename length commit d42334578eba1390859012ebb91e1e556d51db49 upstream. exfat_extract_uni_name copies characters from a given file name entry into the 'uniname' variable. This variable is actually defined on the stack of the exfat_readdir() function. According to the definition of the 'exfat_uni_name' type, the file name should be limited 255 characters (+ null teminator space), but the exfat_get_uniname_from_ext_entry() function can write more characters because there is no check if filename entries exceeds max filename length. This patch add the check not to copy filename characters when exceeding max filename length. Cc: stable@vger.kernel.org Cc: Yuezhang Mo Reported-by: Maxim Suhanov Reviewed-by: Sungjong Seo Signed-off-by: Namjae Jeon Signed-off-by: Sasha Levin [Harshit: backport to 5.15.y] Signed-off-by: Harshit Mogalapalli Signed-off-by: Greg Kroah-Hartman --- fs/exfat/dir.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/fs/exfat/dir.c b/fs/exfat/dir.c index 8475a8653c3a..f6dd4fc8eaf4 100644 --- a/fs/exfat/dir.c +++ b/fs/exfat/dir.c @@ -34,6 +34,7 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb, { int i; struct exfat_entry_set_cache *es; + unsigned int uni_len = 0, len; es = exfat_get_dentry_set(sb, p_dir, entry, ES_ALL_ENTRIES); if (!es) @@ -52,7 +53,10 @@ static void exfat_get_uniname_from_ext_entry(struct super_block *sb, if (exfat_get_entry_type(ep) != TYPE_EXTEND) break; - exfat_extract_uni_name(ep, uniname); + len = exfat_extract_uni_name(ep, uniname); + uni_len += len; + if (len != EXFAT_FILE_NAME_LEN || uni_len >= MAX_NAME_LENGTH) + break; uniname += EXFAT_FILE_NAME_LEN; } @@ -1032,7 +1036,8 @@ int exfat_find_dir_entry(struct super_block *sb, struct exfat_inode_info *ei, if (entry_type == TYPE_EXTEND) { unsigned short entry_uniname[16], unichar; - if (step != DIRENT_STEP_NAME) { + if (step != DIRENT_STEP_NAME || + name_len >= MAX_NAME_LENGTH) { step = DIRENT_STEP_FILE; continue; } From 9a4d8dc706c2523ea084e29e47b9a63bb0ca83d9 Mon Sep 17 00:00:00 2001 From: Christopher Obbard Date: Wed, 5 Jul 2023 15:42:54 +0100 Subject: [PATCH 490/513] arm64: dts: rockchip: Disable HS400 for eMMC on ROCK Pi 4 commit cee572756aa2cb46e959e9797ad4b730b78a050b upstream. There is some instablity with some eMMC modules on ROCK Pi 4 SBCs running in HS400 mode. This ends up resulting in some block errors after a while or after a "heavy" operation utilising the eMMC (e.g. resizing a filesystem). An example of these errors is as follows: [ 289.171014] mmc1: running CQE recovery [ 290.048972] mmc1: running CQE recovery [ 290.054834] mmc1: running CQE recovery [ 290.060817] mmc1: running CQE recovery [ 290.061337] blk_update_request: I/O error, dev mmcblk1, sector 1411072 op 0x1:(WRITE) flags 0x800 phys_seg 36 prio class 0 [ 290.061370] EXT4-fs warning (device mmcblk1p1): ext4_end_bio:348: I/O error 10 writing to inode 29547 starting block 176466) [ 290.061484] Buffer I/O error on device mmcblk1p1, logical block 172288 [ 290.061531] Buffer I/O error on device mmcblk1p1, logical block 172289 [ 290.061551] Buffer I/O error on device mmcblk1p1, logical block 172290 [ 290.061574] Buffer I/O error on device mmcblk1p1, logical block 172291 [ 290.061592] Buffer I/O error on device mmcblk1p1, logical block 172292 [ 290.061615] Buffer I/O error on device mmcblk1p1, logical block 172293 [ 290.061632] Buffer I/O error on device mmcblk1p1, logical block 172294 [ 290.061654] Buffer I/O error on device mmcblk1p1, logical block 172295 [ 290.061673] Buffer I/O error on device mmcblk1p1, logical block 172296 [ 290.061695] Buffer I/O error on device mmcblk1p1, logical block 172297 Disabling the Command Queue seems to stop the CQE recovery from running, but doesn't seem to improve the I/O errors. Until this can be investigated further, disable HS400 mode on the ROCK Pi 4 SBCs to at least stop I/O errors from occurring. While we are here, set the eMMC maximum clock frequency to 1.5MHz to follow the ROCK 4C+. Fixes: 1b5715c602fd ("arm64: dts: rockchip: add ROCK Pi 4 DTS support") Signed-off-by: Christopher Obbard Tested-By: Folker Schwesinger Link: https://lore.kernel.org/r/20230705144255.115299-2-chris.obbard@collabora.com Signed-off-by: Heiko Stuebner Signed-off-by: Greg Kroah-Hartman --- arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi index a7ec81657503..8b70e831aff2 100644 --- a/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3399-rock-pi-4.dtsi @@ -595,9 +595,9 @@ }; &sdhci { + max-frequency = <150000000>; bus-width = <8>; - mmc-hs400-1_8v; - mmc-hs400-enhanced-strobe; + mmc-hs200-1_8v; non-removable; status = "okay"; }; From d39fc9b94dc0719afa4bc8e58341a5eb41febef3 Mon Sep 17 00:00:00 2001 From: Kuniyuki Iwashima Date: Mon, 21 Aug 2023 10:55:05 -0700 Subject: [PATCH 491/513] af_unix: Fix null-ptr-deref in unix_stream_sendpage(). Bing-Jhong Billy Jheng reported null-ptr-deref in unix_stream_sendpage() with detailed analysis and a nice repro. unix_stream_sendpage() tries to add data to the last skb in the peer's recv queue without locking the queue. If the peer's FD is passed to another socket and the socket's FD is passed to the peer, there is a loop between them. If we close both sockets without receiving FD, the sockets will be cleaned up by garbage collection. The garbage collection iterates such sockets and unlinks skb with FD from the socket's receive queue under the queue's lock. So, there is a race where unix_stream_sendpage() could access an skb locklessly that is being released by garbage collection, resulting in use-after-free. To avoid the issue, unix_stream_sendpage() must lock the peer's recv queue. Note the issue does not exist in 6.5+ thanks to the recent sendpage() refactoring. This patch is originally written by Linus Torvalds. BUG: unable to handle page fault for address: ffff988004dd6870 PF: supervisor read access in kernel mode PF: error_code(0x0000) - not-present page PGD 0 P4D 0 PREEMPT SMP PTI CPU: 4 PID: 297 Comm: garbage_uaf Not tainted 6.1.46 #1 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.16.0-0-gd239552ce722-prebuilt.qemu.org 04/01/2014 RIP: 0010:kmem_cache_alloc_node+0xa2/0x1e0 Code: c0 0f 84 32 01 00 00 41 83 fd ff 74 10 48 8b 00 48 c1 e8 3a 41 39 c5 0f 85 1c 01 00 00 41 8b 44 24 28 49 8b 3c 24 48 8d 4a 40 <49> 8b 1c 06 4c 89 f0 65 48 0f c7 0f 0f 94 c0 84 c0 74 a1 41 8b 44 RSP: 0018:ffffc9000079fac0 EFLAGS: 00000246 RAX: 0000000000000070 RBX: 0000000000000005 RCX: 000000000001a284 RDX: 000000000001a244 RSI: 0000000000400cc0 RDI: 000000000002eee0 RBP: 0000000000400cc0 R08: 0000000000400cc0 R09: 0000000000000003 R10: 0000000000000001 R11: 0000000000000000 R12: ffff888003970f00 R13: 00000000ffffffff R14: ffff988004dd6800 R15: 00000000000000e8 FS: 00007f174d6f3600(0000) GS:ffff88807db00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffff988004dd6870 CR3: 00000000092be000 CR4: 00000000007506e0 PKRU: 55555554 Call Trace: ? __die_body.cold+0x1a/0x1f ? page_fault_oops+0xa9/0x1e0 ? fixup_exception+0x1d/0x310 ? exc_page_fault+0xa8/0x150 ? asm_exc_page_fault+0x22/0x30 ? kmem_cache_alloc_node+0xa2/0x1e0 ? __alloc_skb+0x16c/0x1e0 __alloc_skb+0x16c/0x1e0 alloc_skb_with_frags+0x48/0x1e0 sock_alloc_send_pskb+0x234/0x270 unix_stream_sendmsg+0x1f5/0x690 sock_sendmsg+0x5d/0x60 ____sys_sendmsg+0x210/0x260 ___sys_sendmsg+0x83/0xd0 ? kmem_cache_alloc+0xc6/0x1c0 ? avc_disable+0x20/0x20 ? percpu_counter_add_batch+0x53/0xc0 ? alloc_empty_file+0x5d/0xb0 ? alloc_file+0x91/0x170 ? alloc_file_pseudo+0x94/0x100 ? __fget_light+0x9f/0x120 __sys_sendmsg+0x54/0xa0 do_syscall_64+0x3b/0x90 entry_SYSCALL_64_after_hwframe+0x69/0xd3 RIP: 0033:0x7f174d639a7d Code: 28 89 54 24 1c 48 89 74 24 10 89 7c 24 08 e8 8a c1 f4 ff 8b 54 24 1c 48 8b 74 24 10 41 89 c0 8b 7c 24 08 b8 2e 00 00 00 0f 05 <48> 3d 00 f0 ff ff 77 33 44 89 c7 48 89 44 24 08 e8 de c1 f4 ff 48 RSP: 002b:00007ffcb563ea50 EFLAGS: 00000293 ORIG_RAX: 000000000000002e RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f174d639a7d RDX: 0000000000000000 RSI: 00007ffcb563eab0 RDI: 0000000000000007 RBP: 00007ffcb563eb10 R08: 0000000000000000 R09: 00000000ffffffff R10: 00000000004040a0 R11: 0000000000000293 R12: 00007ffcb563ec28 R13: 0000000000401398 R14: 0000000000403e00 R15: 00007f174d72c000 Fixes: 869e7c62486e ("net: af_unix: implement stream sendpage support") Reported-by: Bing-Jhong Billy Jheng Reviewed-by: Bing-Jhong Billy Jheng Co-developed-by: Linus Torvalds Signed-off-by: Linus Torvalds Signed-off-by: Kuniyuki Iwashima Signed-off-by: Greg Kroah-Hartman --- net/unix/af_unix.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 7a076d5017d1..5264fe82e6ec 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -2156,6 +2156,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, if (false) { alloc_skb: + spin_unlock(&other->sk_receive_queue.lock); unix_state_unlock(other); mutex_unlock(&unix_sk(other)->iolock); newskb = sock_alloc_send_pskb(sk, 0, 0, flags & MSG_DONTWAIT, @@ -2195,6 +2196,7 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, init_scm = false; } + spin_lock(&other->sk_receive_queue.lock); skb = skb_peek_tail(&other->sk_receive_queue); if (tail && tail == skb) { skb = newskb; @@ -2225,14 +2227,11 @@ static ssize_t unix_stream_sendpage(struct socket *socket, struct page *page, refcount_add(size, &sk->sk_wmem_alloc); if (newskb) { - err = unix_scm_to_skb(&scm, skb, false); - if (err) - goto err_state_unlock; - spin_lock(&other->sk_receive_queue.lock); + unix_scm_to_skb(&scm, skb, false); __skb_queue_tail(&other->sk_receive_queue, newskb); - spin_unlock(&other->sk_receive_queue.lock); } + spin_unlock(&other->sk_receive_queue.lock); unix_state_unlock(other); mutex_unlock(&unix_sk(other)->iolock); From 431db3f48c286462ad7453ccdf284f590aafa949 Mon Sep 17 00:00:00 2001 From: Jason Wang Date: Wed, 9 Aug 2023 23:12:56 -0400 Subject: [PATCH 492/513] virtio-net: set queues after driver_ok commit 51b813176f098ff61bd2833f627f5319ead098a5 upstream. Commit 25266128fe16 ("virtio-net: fix race between set queues and probe") tries to fix the race between set queues and probe by calling _virtnet_set_queues() before DRIVER_OK is set. This violates virtio spec. Fixing this by setting queues after virtio_device_ready(). Note that rtnl needs to be held for userspace requests to change the number of queues. So we are serialized in this way. Fixes: 25266128fe16 ("virtio-net: fix race between set queues and probe") Reported-by: Dragos Tatulea Acked-by: Michael S. Tsirkin Signed-off-by: Jason Wang Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- drivers/net/virtio_net.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index af335f8266c2..3eefe8171925 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -3319,8 +3319,6 @@ static int virtnet_probe(struct virtio_device *vdev) } } - _virtnet_set_queues(vi, vi->curr_queue_pairs); - /* serialize netdev register + virtio_device_ready() with ndo_open() */ rtnl_lock(); @@ -3333,6 +3331,8 @@ static int virtnet_probe(struct virtio_device *vdev) virtio_device_ready(vdev); + _virtnet_set_queues(vi, vi->curr_queue_pairs); + rtnl_unlock(); err = virtnet_cpu_notif_add(vi); From e8c5081da2cc3dffa3ae1062121f2db3aa9c0cd2 Mon Sep 17 00:00:00 2001 From: Jason Xing Date: Fri, 11 Aug 2023 10:37:47 +0800 Subject: [PATCH 493/513] net: fix the RTO timer retransmitting skb every 1ms if linear option is enabled commit e4dd0d3a2f64b8bd8029ec70f52bdbebd0644408 upstream. In the real workload, I encountered an issue which could cause the RTO timer to retransmit the skb per 1ms with linear option enabled. The amount of lost-retransmitted skbs can go up to 1000+ instantly. The root cause is that if the icsk_rto happens to be zero in the 6th round (which is the TCP_THIN_LINEAR_RETRIES value), then it will always be zero due to the changed calculation method in tcp_retransmit_timer() as follows: icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); Above line could be converted to icsk->icsk_rto = min(0 << 1, TCP_RTO_MAX) = 0 Therefore, the timer expires so quickly without any doubt. I read through the RFC 6298 and found that the RTO value can be rounded up to a certain value, in Linux, say TCP_RTO_MIN as default, which is regarded as the lower bound in this patch as suggested by Eric. Fixes: 36e31b0af587 ("net: TCP thin linear timeouts") Suggested-by: Eric Dumazet Signed-off-by: Jason Xing Reviewed-by: Eric Dumazet Signed-off-by: David S. Miller Signed-off-by: Greg Kroah-Hartman --- net/ipv4/tcp_timer.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c index 50bba370486e..a8592c187b32 100644 --- a/net/ipv4/tcp_timer.c +++ b/net/ipv4/tcp_timer.c @@ -582,7 +582,9 @@ void tcp_retransmit_timer(struct sock *sk) tcp_stream_is_thin(tp) && icsk->icsk_retransmits <= TCP_THIN_LINEAR_RETRIES) { icsk->icsk_backoff = 0; - icsk->icsk_rto = min(__tcp_set_rto(tp), TCP_RTO_MAX); + icsk->icsk_rto = clamp(__tcp_set_rto(tp), + tcp_rto_min(sk), + TCP_RTO_MAX); } else { /* Use normal (exponential) backoff */ icsk->icsk_rto = min(icsk->icsk_rto << 1, TCP_RTO_MAX); From 5b28fda5fdee72e93b12b44db158882d7e3f3e92 Mon Sep 17 00:00:00 2001 From: Yangtao Li Date: Thu, 27 Jul 2023 15:00:51 +0800 Subject: [PATCH 494/513] mmc: f-sdh30: fix order of function calls in sdhci_f_sdh30_remove MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit commit 58abdd80b93b09023ca03007b608685c41e3a289 upstream. The order of function calls in sdhci_f_sdh30_remove is wrong, let's call sdhci_pltfm_unregister first. Cc: Uwe Kleine-König Fixes: 5def5c1c15bf ("mmc: sdhci-f-sdh30: Replace with sdhci_pltfm") Signed-off-by: Yangtao Li Reported-by: Uwe Kleine-König Acked-by: Uwe Kleine-König Acked-by: Adrian Hunter Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20230727070051.17778-62-frank.li@vivo.com Signed-off-by: Ulf Hansson Signed-off-by: Greg Kroah-Hartman --- drivers/mmc/host/sdhci_f_sdh30.c | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/mmc/host/sdhci_f_sdh30.c b/drivers/mmc/host/sdhci_f_sdh30.c index 8876fd1c7eee..7ede74bf3723 100644 --- a/drivers/mmc/host/sdhci_f_sdh30.c +++ b/drivers/mmc/host/sdhci_f_sdh30.c @@ -188,12 +188,14 @@ static int sdhci_f_sdh30_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); struct f_sdhost_priv *priv = sdhci_f_sdhost_priv(host); - - clk_disable_unprepare(priv->clk_iface); - clk_disable_unprepare(priv->clk); + struct clk *clk_iface = priv->clk_iface; + struct clk *clk = priv->clk; sdhci_pltfm_unregister(pdev); + clk_disable_unprepare(clk_iface); + clk_disable_unprepare(clk); + return 0; } From 8089aae6020e51d556dcc6af54495c9aa99a761c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:27 +0200 Subject: [PATCH 495/513] x86/cpu: Fix __x86_return_thunk symbol type commit 77f67119004296a9b2503b377d610e08b08afc2a upstream. Commit fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") reimplemented __x86_return_thunk with a mix of SYM_FUNC_START and SYM_CODE_END, this is not a sane combination. Since nothing should ever actually 'CALL' this, make it consistently CODE. Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121148.571027074@infradead.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/lib/retpoline.S | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 5f7eed97487e..cb04a2b17d36 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -204,7 +204,9 @@ SYM_CODE_END(srso_safe_ret) SYM_FUNC_END(srso_untrain_ret) __EXPORT_THUNK(srso_untrain_ret) -SYM_FUNC_START(__x86_return_thunk) +SYM_CODE_START(__x86_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS int3 From 0d810eff090c30535f5cb8db6a6cfbaed7aa499a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:28 +0200 Subject: [PATCH 496/513] x86/cpu: Fix up srso_safe_ret() and __x86_return_thunk() commit af023ef335f13c8b579298fc432daeef609a9e60 upstream. vmlinux.o: warning: objtool: srso_untrain_ret() falls through to next function __x86_return_skl() vmlinux.o: warning: objtool: __x86_return_thunk() falls through to next function __x86_return_skl() This is because these functions (can) end with CALL, which objtool does not consider a terminating instruction. Therefore, replace the INT3 instruction (which is a non-fatal trap) with UD2 (which is a fatal-trap). This indicates execution will not continue past this point. Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121148.637802730@infradead.org Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/lib/retpoline.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index cb04a2b17d36..f6b65faa266a 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -199,7 +199,7 @@ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) int3 lfence call srso_safe_ret - int3 + ud2 SYM_CODE_END(srso_safe_ret) SYM_FUNC_END(srso_untrain_ret) __EXPORT_THUNK(srso_untrain_ret) @@ -209,7 +209,7 @@ SYM_CODE_START(__x86_return_thunk) ANNOTATE_NOENDBR ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS - int3 + ud2 SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) From 08f7cfd44f77b2796582bc26164fdef44dd33b6c Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:30 +0200 Subject: [PATCH 497/513] x86/alternative: Make custom return thunk unconditional commit 095b8303f3835c68ac4a8b6d754ca1c3b6230711 upstream. There is infrastructure to rewrite return thunks to point to any random thunk one desires, unwrap that from CALL_THUNKS, which up to now was the sole user of that. [ bp: Make the thunks visible on 32-bit and add ifdeffery for the 32-bit builds. ] Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121148.775293785@infradead.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/nospec-branch.h | 5 +++++ arch/x86/kernel/cpu/bugs.c | 2 ++ 2 files changed, 7 insertions(+) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 4a12dfdd317c..b22add0885ab 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -195,7 +195,12 @@ _ASM_PTR " 999b\n\t" \ ".popsection\n\t" +#ifdef CONFIG_RETHUNK extern void __x86_return_thunk(void); +#else +static inline void __x86_return_thunk(void) {} +#endif + extern void zen_untrain_ret(void); extern void srso_untrain_ret(void); extern void srso_untrain_ret_alias(void); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 73dad1400633..169cb40121f0 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -62,6 +62,8 @@ EXPORT_SYMBOL_GPL(x86_pred_cmd); static DEFINE_MUTEX(spec_ctrl_mutex); +void (*x86_return_thunk)(void) __ro_after_init = &__x86_return_thunk; + /* Update SPEC_CTRL MSR and its cached copy unconditionally */ static void update_spec_ctrl(u64 val) { From f1171d455d94f15ae70b8965b82c3f039f187a63 Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Tue, 14 Sep 2021 23:41:13 +0900 Subject: [PATCH 498/513] objtool: Add frame-pointer-specific function ignore [ Upstream commit e028c4f7ac7ca8c96126fe46c54ab3d56ffe6a66 ] Add a CONFIG_FRAME_POINTER-specific version of STACK_FRAME_NON_STANDARD() for the case where a function is intentionally missing frame pointer setup, but otherwise needs objtool/ORC coverage when frame pointers are disabled. Link: https://lkml.kernel.org/r/163163047364.489837.17377799909553689661.stgit@devnote2 Signed-off-by: Josh Poimboeuf Reviewed-by: Masami Hiramatsu Tested-by: Masami Hiramatsu Signed-off-by: Masami Hiramatsu Signed-off-by: Steven Rostedt (VMware) Stable-dep-of: c8c301abeae5 ("x86/ibt: Add ANNOTATE_NOENDBR") Signed-off-by: Sasha Levin --- include/linux/objtool.h | 12 ++++++++++++ tools/include/linux/objtool.h | 12 ++++++++++++ 2 files changed, 24 insertions(+) diff --git a/include/linux/objtool.h b/include/linux/objtool.h index a2042c418686..d59e69df821e 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -71,6 +71,17 @@ struct unwind_hint { static void __used __section(".discard.func_stack_frame_non_standard") \ *__func_stack_frame_non_standard_##func = func +/* + * STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore + * for the case where a function is intentionally missing frame pointer setup, + * but otherwise needs objtool/ORC coverage when frame pointers are disabled. + */ +#ifdef CONFIG_FRAME_POINTER +#define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func) +#else +#define STACK_FRAME_NON_STANDARD_FP(func) +#endif + #else /* __ASSEMBLY__ */ /* @@ -132,6 +143,7 @@ struct unwind_hint { #define UNWIND_HINT(sp_reg, sp_offset, type, end) \ "\n\t" #define STACK_FRAME_NON_STANDARD(func) +#define STACK_FRAME_NON_STANDARD_FP(func) #else #define ANNOTATE_INTRA_FUNCTION_CALL .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 diff --git a/tools/include/linux/objtool.h b/tools/include/linux/objtool.h index a2042c418686..d59e69df821e 100644 --- a/tools/include/linux/objtool.h +++ b/tools/include/linux/objtool.h @@ -71,6 +71,17 @@ struct unwind_hint { static void __used __section(".discard.func_stack_frame_non_standard") \ *__func_stack_frame_non_standard_##func = func +/* + * STACK_FRAME_NON_STANDARD_FP() is a frame-pointer-specific function ignore + * for the case where a function is intentionally missing frame pointer setup, + * but otherwise needs objtool/ORC coverage when frame pointers are disabled. + */ +#ifdef CONFIG_FRAME_POINTER +#define STACK_FRAME_NON_STANDARD_FP(func) STACK_FRAME_NON_STANDARD(func) +#else +#define STACK_FRAME_NON_STANDARD_FP(func) +#endif + #else /* __ASSEMBLY__ */ /* @@ -132,6 +143,7 @@ struct unwind_hint { #define UNWIND_HINT(sp_reg, sp_offset, type, end) \ "\n\t" #define STACK_FRAME_NON_STANDARD(func) +#define STACK_FRAME_NON_STANDARD_FP(func) #else #define ANNOTATE_INTRA_FUNCTION_CALL .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 From f624ce6c7fc27468cb52623222ed61921822edc0 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Tue, 8 Mar 2022 16:30:18 +0100 Subject: [PATCH 499/513] x86/ibt: Add ANNOTATE_NOENDBR [ Upstream commit c8c301abeae58ec756b8fcb2178a632bd3c9e284 ] In order to have objtool warn about code references to !ENDBR instruction, we need an annotation to allow this for non-control-flow instances -- consider text range checks, text patching, or return trampolines etc. Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Kees Cook Acked-by: Josh Poimboeuf Link: https://lore.kernel.org/r/20220308154317.578968224@infradead.org Signed-off-by: Sasha Levin --- include/linux/objtool.h | 16 ++++++++++++++++ tools/include/linux/objtool.h | 16 ++++++++++++++++ 2 files changed, 32 insertions(+) diff --git a/include/linux/objtool.h b/include/linux/objtool.h index d59e69df821e..51f5b24af834 100644 --- a/include/linux/objtool.h +++ b/include/linux/objtool.h @@ -82,6 +82,12 @@ struct unwind_hint { #define STACK_FRAME_NON_STANDARD_FP(func) #endif +#define ANNOTATE_NOENDBR \ + "986: \n\t" \ + ".pushsection .discard.noendbr\n\t" \ + _ASM_PTR " 986b\n\t" \ + ".popsection\n\t" + #else /* __ASSEMBLY__ */ /* @@ -134,6 +140,13 @@ struct unwind_hint { .popsection .endm +.macro ANNOTATE_NOENDBR +.Lhere_\@: + .pushsection .discard.noendbr + .quad .Lhere_\@ + .popsection +.endm + #endif /* __ASSEMBLY__ */ #else /* !CONFIG_STACK_VALIDATION */ @@ -144,12 +157,15 @@ struct unwind_hint { "\n\t" #define STACK_FRAME_NON_STANDARD(func) #define STACK_FRAME_NON_STANDARD_FP(func) +#define ANNOTATE_NOENDBR #else #define ANNOTATE_INTRA_FUNCTION_CALL .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 .endm .macro STACK_FRAME_NON_STANDARD func:req .endm +.macro ANNOTATE_NOENDBR +.endm #endif #endif /* CONFIG_STACK_VALIDATION */ diff --git a/tools/include/linux/objtool.h b/tools/include/linux/objtool.h index d59e69df821e..51f5b24af834 100644 --- a/tools/include/linux/objtool.h +++ b/tools/include/linux/objtool.h @@ -82,6 +82,12 @@ struct unwind_hint { #define STACK_FRAME_NON_STANDARD_FP(func) #endif +#define ANNOTATE_NOENDBR \ + "986: \n\t" \ + ".pushsection .discard.noendbr\n\t" \ + _ASM_PTR " 986b\n\t" \ + ".popsection\n\t" + #else /* __ASSEMBLY__ */ /* @@ -134,6 +140,13 @@ struct unwind_hint { .popsection .endm +.macro ANNOTATE_NOENDBR +.Lhere_\@: + .pushsection .discard.noendbr + .quad .Lhere_\@ + .popsection +.endm + #endif /* __ASSEMBLY__ */ #else /* !CONFIG_STACK_VALIDATION */ @@ -144,12 +157,15 @@ struct unwind_hint { "\n\t" #define STACK_FRAME_NON_STANDARD(func) #define STACK_FRAME_NON_STANDARD_FP(func) +#define ANNOTATE_NOENDBR #else #define ANNOTATE_INTRA_FUNCTION_CALL .macro UNWIND_HINT type:req sp_reg=0 sp_offset=0 end=0 .endm .macro STACK_FRAME_NON_STANDARD func:req .endm +.macro ANNOTATE_NOENDBR +.endm #endif #endif /* CONFIG_STACK_VALIDATION */ From f77dbb90962b58c1b3b0a5f60b772582d1caa3d2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:31 +0200 Subject: [PATCH 500/513] x86/cpu: Clean up SRSO return thunk mess commit d43490d0ab824023e11d0b57d0aeec17a6e0ca13 upstream. Use the existing configurable return thunk. There is absolute no justification for having created this __x86_return_thunk alternative. To clarify, the whole thing looks like: Zen3/4 does: srso_alias_untrain_ret: nop2 lfence jmp srso_alias_return_thunk int3 srso_alias_safe_ret: // aliasses srso_alias_untrain_ret just so add $8, %rsp ret int3 srso_alias_return_thunk: call srso_alias_safe_ret ud2 While Zen1/2 does: srso_untrain_ret: movabs $foo, %rax lfence call srso_safe_ret (jmp srso_return_thunk ?) int3 srso_safe_ret: // embedded in movabs instruction add $8,%rsp ret int3 srso_return_thunk: call srso_safe_ret ud2 While retbleed does: zen_untrain_ret: test $0xcc, %bl lfence jmp zen_return_thunk int3 zen_return_thunk: // embedded in the test instruction ret int3 Where Zen1/2 flush the BTB entry using the instruction decoder trick (test,movabs) Zen3/4 use BTB aliasing. SRSO adds a return sequence (srso_safe_ret()) which forces the function return instruction to speculate into a trap (UD2). This RET will then mispredict and execution will continue at the return site read from the top of the stack. Pick one of three options at boot (evey function can only ever return once). [ bp: Fixup commit message uarch details and add them in a comment in the code too. Add a comment about the srso_select_mitigation() dependency on retbleed_select_mitigation(). Add moar ifdeffery for 32-bit builds. Add a dummy srso_untrain_ret_alias() definition for 32-bit alternatives needing the symbol. ] Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121148.842775684@infradead.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/nospec-branch.h | 5 +++ arch/x86/kernel/cpu/bugs.c | 17 ++++++-- arch/x86/kernel/vmlinux.lds.S | 4 +- arch/x86/lib/retpoline.S | 58 ++++++++++++++++++++-------- tools/objtool/arch/x86/decode.c | 2 +- 5 files changed, 64 insertions(+), 22 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index b22add0885ab..053164a18a32 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -201,9 +201,14 @@ extern void __x86_return_thunk(void); static inline void __x86_return_thunk(void) {} #endif +extern void zen_return_thunk(void); +extern void srso_return_thunk(void); +extern void srso_alias_return_thunk(void); + extern void zen_untrain_ret(void); extern void srso_untrain_ret(void); extern void srso_untrain_ret_alias(void); + extern void entry_ibpb(void); #ifdef CONFIG_RETPOLINE diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 169cb40121f0..41ca879eb081 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -166,8 +166,13 @@ void __init cpu_select_mitigations(void) md_clear_select_mitigation(); srbds_select_mitigation(); l1d_flush_select_mitigation(); - gds_select_mitigation(); + + /* + * srso_select_mitigation() depends and must run after + * retbleed_select_mitigation(). + */ srso_select_mitigation(); + gds_select_mitigation(); } /* @@ -1015,6 +1020,9 @@ static void __init retbleed_select_mitigation(void) setup_force_cpu_cap(X86_FEATURE_RETHUNK); setup_force_cpu_cap(X86_FEATURE_UNRET); + if (IS_ENABLED(CONFIG_RETHUNK)) + x86_return_thunk = zen_return_thunk; + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) pr_err(RETBLEED_UNTRAIN_MSG); @@ -2422,10 +2430,13 @@ static void __init srso_select_mitigation(void) */ setup_force_cpu_cap(X86_FEATURE_RETHUNK); - if (boot_cpu_data.x86 == 0x19) + if (boot_cpu_data.x86 == 0x19) { setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); - else + x86_return_thunk = srso_alias_return_thunk; + } else { setup_force_cpu_cap(X86_FEATURE_SRSO); + x86_return_thunk = srso_return_thunk; + } srso_mitigation = SRSO_MITIGATION_SAFE_RET; } else { pr_err("WARNING: kernel not compiled with CPU_SRSO.\n"); diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 7f3fa72a236a..9e06ebbdb15e 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -511,8 +511,8 @@ INIT_PER_CPU(irq_stack_backing_store); "fixed_percpu_data is not at start of per-cpu area"); #endif - #ifdef CONFIG_RETHUNK -. = ASSERT((__ret & 0x3f) == 0, "__ret not cacheline-aligned"); +#ifdef CONFIG_RETHUNK +. = ASSERT((zen_return_thunk & 0x3f) == 0, "zen_return_thunk not cacheline-aligned"); . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); #endif diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index f6b65faa266a..0d0de750b87e 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -93,21 +93,26 @@ SYM_CODE_END(__x86_indirect_thunk_array) .section .text.__x86.rethunk_untrain SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) + UNWIND_HINT_FUNC ASM_NOP2 lfence - jmp __x86_return_thunk + jmp srso_alias_return_thunk SYM_FUNC_END(srso_untrain_ret_alias) __EXPORT_THUNK(srso_untrain_ret_alias) .section .text.__x86.rethunk_safe +#else +/* dummy definition for alternatives */ +SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) + ANNOTATE_UNRET_SAFE + ret + int3 +SYM_FUNC_END(srso_untrain_ret_alias) #endif -/* Needs a definition for the __x86_return_thunk alternative below. */ SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) -#ifdef CONFIG_CPU_SRSO add $8, %_ASM_SP UNWIND_HINT_FUNC -#endif ANNOTATE_UNRET_SAFE ret int3 @@ -115,9 +120,16 @@ SYM_FUNC_END(srso_safe_ret_alias) .section .text.__x86.return_thunk +SYM_CODE_START(srso_alias_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + call srso_safe_ret_alias + ud2 +SYM_CODE_END(srso_alias_return_thunk) + /* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: - * 1) The RET at __x86_return_thunk must be on a 64 byte boundary, for + * 1) The RET at zen_return_thunk must be on a 64 byte boundary, for * alignment within the BTB. * 2) The instruction at zen_untrain_ret must contain, and not * end with, the 0xc3 byte of the RET. @@ -125,7 +137,7 @@ SYM_FUNC_END(srso_safe_ret_alias) * from re-poisioning the BTB prediction. */ .align 64 - .skip 64 - (__ret - zen_untrain_ret), 0xcc + .skip 64 - (zen_return_thunk - zen_untrain_ret), 0xcc SYM_FUNC_START_NOALIGN(zen_untrain_ret); /* @@ -133,16 +145,16 @@ SYM_FUNC_START_NOALIGN(zen_untrain_ret); * * TEST $0xcc, %bl * LFENCE - * JMP __x86_return_thunk + * JMP zen_return_thunk * * Executing the TEST instruction has a side effect of evicting any BTB * prediction (potentially attacker controlled) attached to the RET, as - * __x86_return_thunk + 1 isn't an instruction boundary at the moment. + * zen_return_thunk + 1 isn't an instruction boundary at the moment. */ .byte 0xf6 /* - * As executed from __x86_return_thunk, this is a plain RET. + * As executed from zen_return_thunk, this is a plain RET. * * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. * @@ -154,13 +166,13 @@ SYM_FUNC_START_NOALIGN(zen_untrain_ret); * With SMT enabled and STIBP active, a sibling thread cannot poison * RET's prediction to a type of its choice, but can evict the * prediction due to competitive sharing. If the prediction is - * evicted, __x86_return_thunk will suffer Straight Line Speculation + * evicted, zen_return_thunk will suffer Straight Line Speculation * which will be contained safely by the INT3. */ -SYM_INNER_LABEL(__ret, SYM_L_GLOBAL) +SYM_INNER_LABEL(zen_return_thunk, SYM_L_GLOBAL) ret int3 -SYM_CODE_END(__ret) +SYM_CODE_END(zen_return_thunk) /* * Ensure the TEST decoding / BTB invalidation is complete. @@ -171,7 +183,7 @@ SYM_CODE_END(__ret) * Jump back and execute the RET in the middle of the TEST instruction. * INT3 is for SLS protection. */ - jmp __ret + jmp zen_return_thunk int3 SYM_FUNC_END(zen_untrain_ret) __EXPORT_THUNK(zen_untrain_ret) @@ -191,12 +203,19 @@ __EXPORT_THUNK(zen_untrain_ret) SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) .byte 0x48, 0xb8 +/* + * This forces the function return instruction to speculate into a trap + * (UD2 in srso_return_thunk() below). This RET will then mispredict + * and execution will continue at the return site read from the top of + * the stack. + */ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) add $8, %_ASM_SP ret int3 int3 int3 + /* end of movabs */ lfence call srso_safe_ret ud2 @@ -204,12 +223,19 @@ SYM_CODE_END(srso_safe_ret) SYM_FUNC_END(srso_untrain_ret) __EXPORT_THUNK(srso_untrain_ret) -SYM_CODE_START(__x86_return_thunk) +SYM_CODE_START(srso_return_thunk) UNWIND_HINT_FUNC ANNOTATE_NOENDBR - ALTERNATIVE_2 "jmp __ret", "call srso_safe_ret", X86_FEATURE_SRSO, \ - "call srso_safe_ret_alias", X86_FEATURE_SRSO_ALIAS + call srso_safe_ret ud2 +SYM_CODE_END(srso_return_thunk) + +SYM_CODE_START(__x86_return_thunk) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + ANNOTATE_UNRET_SAFE + ret + int3 SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index cf8ea3594125..66b6fdb8e2f9 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -728,5 +728,5 @@ bool arch_is_rethunk(struct symbol *sym) return !strcmp(sym->name, "__x86_return_thunk") || !strcmp(sym->name, "srso_untrain_ret") || !strcmp(sym->name, "srso_safe_ret") || - !strcmp(sym->name, "__ret"); + !strcmp(sym->name, "zen_return_thunk"); } From 19c1c049965094bae561de10d9e0311988c7858f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:32 +0200 Subject: [PATCH 501/513] x86/cpu: Rename original retbleed methods commit d025b7bac07a6e90b6b98b487f88854ad9247c39 upstream. Rename the original retbleed return thunk and untrain_ret to retbleed_return_thunk() and retbleed_untrain_ret(). No functional changes. Suggested-by: Josh Poimboeuf Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121148.909378169@infradead.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/nospec-branch.h | 8 ++++---- arch/x86/kernel/cpu/bugs.c | 2 +- arch/x86/kernel/vmlinux.lds.S | 2 +- arch/x86/lib/retpoline.S | 30 ++++++++++++++-------------- tools/objtool/arch/x86/decode.c | 2 +- tools/objtool/check.c | 2 +- 6 files changed, 23 insertions(+), 23 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index 053164a18a32..c2c99c43b7cd 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -156,7 +156,7 @@ .endm #ifdef CONFIG_CPU_UNRET_ENTRY -#define CALL_ZEN_UNTRAIN_RET "call zen_untrain_ret" +#define CALL_ZEN_UNTRAIN_RET "call retbleed_untrain_ret" #else #define CALL_ZEN_UNTRAIN_RET "" #endif @@ -166,7 +166,7 @@ * return thunk isn't mapped into the userspace tables (then again, AMD * typically has NO_MELTDOWN). * - * While zen_untrain_ret() doesn't clobber anything but requires stack, + * While retbleed_untrain_ret() doesn't clobber anything but requires stack, * entry_ibpb() will clobber AX, CX, DX. * * As such, this must be placed after every *SWITCH_TO_KERNEL_CR3 at a point @@ -201,11 +201,11 @@ extern void __x86_return_thunk(void); static inline void __x86_return_thunk(void) {} #endif -extern void zen_return_thunk(void); +extern void retbleed_return_thunk(void); extern void srso_return_thunk(void); extern void srso_alias_return_thunk(void); -extern void zen_untrain_ret(void); +extern void retbleed_untrain_ret(void); extern void srso_untrain_ret(void); extern void srso_untrain_ret_alias(void); diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 41ca879eb081..030a7d263926 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -1021,7 +1021,7 @@ static void __init retbleed_select_mitigation(void) setup_force_cpu_cap(X86_FEATURE_UNRET); if (IS_ENABLED(CONFIG_RETHUNK)) - x86_return_thunk = zen_return_thunk; + x86_return_thunk = retbleed_return_thunk; if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 9e06ebbdb15e..51d008840ffb 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -512,7 +512,7 @@ INIT_PER_CPU(irq_stack_backing_store); #endif #ifdef CONFIG_RETHUNK -. = ASSERT((zen_return_thunk & 0x3f) == 0, "zen_return_thunk not cacheline-aligned"); +. = ASSERT((retbleed_return_thunk & 0x3f) == 0, "retbleed_return_thunk not cacheline-aligned"); . = ASSERT((srso_safe_ret & 0x3f) == 0, "srso_safe_ret not cacheline-aligned"); #endif diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 0d0de750b87e..889882f8c3c8 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -129,32 +129,32 @@ SYM_CODE_END(srso_alias_return_thunk) /* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: - * 1) The RET at zen_return_thunk must be on a 64 byte boundary, for + * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for * alignment within the BTB. - * 2) The instruction at zen_untrain_ret must contain, and not + * 2) The instruction at retbleed_untrain_ret must contain, and not * end with, the 0xc3 byte of the RET. * 3) STIBP must be enabled, or SMT disabled, to prevent the sibling thread * from re-poisioning the BTB prediction. */ .align 64 - .skip 64 - (zen_return_thunk - zen_untrain_ret), 0xcc -SYM_FUNC_START_NOALIGN(zen_untrain_ret); + .skip 64 - (retbleed_return_thunk - retbleed_untrain_ret), 0xcc +SYM_FUNC_START_NOALIGN(retbleed_untrain_ret); /* - * As executed from zen_untrain_ret, this is: + * As executed from retbleed_untrain_ret, this is: * * TEST $0xcc, %bl * LFENCE - * JMP zen_return_thunk + * JMP retbleed_return_thunk * * Executing the TEST instruction has a side effect of evicting any BTB * prediction (potentially attacker controlled) attached to the RET, as - * zen_return_thunk + 1 isn't an instruction boundary at the moment. + * retbleed_return_thunk + 1 isn't an instruction boundary at the moment. */ .byte 0xf6 /* - * As executed from zen_return_thunk, this is a plain RET. + * As executed from retbleed_return_thunk, this is a plain RET. * * As part of the TEST above, RET is the ModRM byte, and INT3 the imm8. * @@ -166,13 +166,13 @@ SYM_FUNC_START_NOALIGN(zen_untrain_ret); * With SMT enabled and STIBP active, a sibling thread cannot poison * RET's prediction to a type of its choice, but can evict the * prediction due to competitive sharing. If the prediction is - * evicted, zen_return_thunk will suffer Straight Line Speculation + * evicted, retbleed_return_thunk will suffer Straight Line Speculation * which will be contained safely by the INT3. */ -SYM_INNER_LABEL(zen_return_thunk, SYM_L_GLOBAL) +SYM_INNER_LABEL(retbleed_return_thunk, SYM_L_GLOBAL) ret int3 -SYM_CODE_END(zen_return_thunk) +SYM_CODE_END(retbleed_return_thunk) /* * Ensure the TEST decoding / BTB invalidation is complete. @@ -183,13 +183,13 @@ SYM_CODE_END(zen_return_thunk) * Jump back and execute the RET in the middle of the TEST instruction. * INT3 is for SLS protection. */ - jmp zen_return_thunk + jmp retbleed_return_thunk int3 -SYM_FUNC_END(zen_untrain_ret) -__EXPORT_THUNK(zen_untrain_ret) +SYM_FUNC_END(retbleed_untrain_ret) +__EXPORT_THUNK(retbleed_untrain_ret) /* - * SRSO untraining sequence for Zen1/2, similar to zen_untrain_ret() + * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret() * above. On kernel entry, srso_untrain_ret() is executed which is a * * movabs $0xccccccc308c48348,%rax diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c index 66b6fdb8e2f9..a9c5d54f1688 100644 --- a/tools/objtool/arch/x86/decode.c +++ b/tools/objtool/arch/x86/decode.c @@ -728,5 +728,5 @@ bool arch_is_rethunk(struct symbol *sym) return !strcmp(sym->name, "__x86_return_thunk") || !strcmp(sym->name, "srso_untrain_ret") || !strcmp(sym->name, "srso_safe_ret") || - !strcmp(sym->name, "zen_return_thunk"); + !strcmp(sym->name, "retbleed_return_thunk"); } diff --git a/tools/objtool/check.c b/tools/objtool/check.c index f331780f0425..f78a12ccd441 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -1228,7 +1228,7 @@ static int add_jump_destinations(struct objtool_file *file) continue; /* - * This is a special case for zen_untrain_ret(). + * This is a special case for retbleed_untrain_ret(). * It jumps to __x86_return_thunk(), but objtool * can't find the thunk's starting RET * instruction, because the RET is also in the From 035e906bfc9367c3f7fe0d98011aa702edcfeeef Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:33 +0200 Subject: [PATCH 502/513] x86/cpu: Rename srso_(.*)_alias to srso_alias_\1 commit 42be649dd1f2eee6b1fb185f1a231b9494cf095f upstream. For a more consistent namespace. [ bp: Fixup names in the doc too. ] Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121148.976236447@infradead.org Signed-off-by: Greg Kroah-Hartman --- Documentation/admin-guide/hw-vuln/srso.rst | 4 ++-- arch/x86/include/asm/nospec-branch.h | 4 ++-- arch/x86/kernel/vmlinux.lds.S | 8 +++---- arch/x86/lib/retpoline.S | 26 +++++++++++----------- 4 files changed, 21 insertions(+), 21 deletions(-) diff --git a/Documentation/admin-guide/hw-vuln/srso.rst b/Documentation/admin-guide/hw-vuln/srso.rst index 2f923c805802..f79cb11b080f 100644 --- a/Documentation/admin-guide/hw-vuln/srso.rst +++ b/Documentation/admin-guide/hw-vuln/srso.rst @@ -124,8 +124,8 @@ sequence. To ensure the safety of this mitigation, the kernel must ensure that the safe return sequence is itself free from attacker interference. In Zen3 and Zen4, this is accomplished by creating a BTB alias between the -untraining function srso_untrain_ret_alias() and the safe return -function srso_safe_ret_alias() which results in evicting a potentially +untraining function srso_alias_untrain_ret() and the safe return +function srso_alias_safe_ret() which results in evicting a potentially poisoned BTB entry and using that safe one for all function returns. In older Zen1 and Zen2, this is accomplished using a reinterpretation diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index c2c99c43b7cd..b472f2c84b2f 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -183,7 +183,7 @@ #ifdef CONFIG_CPU_SRSO ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_untrain_ret_alias", X86_FEATURE_SRSO_ALIAS + "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS #endif .endm @@ -207,7 +207,7 @@ extern void srso_alias_return_thunk(void); extern void retbleed_untrain_ret(void); extern void srso_untrain_ret(void); -extern void srso_untrain_ret_alias(void); +extern void srso_alias_untrain_ret(void); extern void entry_ibpb(void); diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 51d008840ffb..7578527f43dd 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -141,10 +141,10 @@ SECTIONS #ifdef CONFIG_CPU_SRSO /* - * See the comment above srso_untrain_ret_alias()'s + * See the comment above srso_alias_untrain_ret()'s * definition. */ - . = srso_untrain_ret_alias | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); + . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); *(.text.__x86.rethunk_safe) #endif ALIGN_ENTRY_TEXT_END @@ -527,8 +527,8 @@ INIT_PER_CPU(irq_stack_backing_store); * Instead do: (A | B) - (A & B) in order to compute the XOR * of the two function addresses: */ -. = ASSERT(((ABSOLUTE(srso_untrain_ret_alias) | srso_safe_ret_alias) - - (ABSOLUTE(srso_untrain_ret_alias) & srso_safe_ret_alias)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), +. = ASSERT(((ABSOLUTE(srso_alias_untrain_ret) | srso_alias_safe_ret) - + (ABSOLUTE(srso_alias_untrain_ret) & srso_alias_safe_ret)) == ((1 << 2) | (1 << 8) | (1 << 14) | (1 << 20)), "SRSO function pair won't alias"); #endif diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 889882f8c3c8..b5697ba1ca71 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -75,55 +75,55 @@ SYM_CODE_END(__x86_indirect_thunk_array) #ifdef CONFIG_RETHUNK /* - * srso_untrain_ret_alias() and srso_safe_ret_alias() are placed at + * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at * special addresses: * - * - srso_untrain_ret_alias() is 2M aligned - * - srso_safe_ret_alias() is also in the same 2M page but bits 2, 8, 14 + * - srso_alias_untrain_ret() is 2M aligned + * - srso_alias_safe_ret() is also in the same 2M page but bits 2, 8, 14 * and 20 in its virtual address are set (while those bits in the - * srso_untrain_ret_alias() function are cleared). + * srso_alias_untrain_ret() function are cleared). * * This guarantees that those two addresses will alias in the branch * target buffer of Zen3/4 generations, leading to any potential * poisoned entries at that BTB slot to get evicted. * - * As a result, srso_safe_ret_alias() becomes a safe return. + * As a result, srso_alias_safe_ret() becomes a safe return. */ #ifdef CONFIG_CPU_SRSO .section .text.__x86.rethunk_untrain -SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) UNWIND_HINT_FUNC ASM_NOP2 lfence jmp srso_alias_return_thunk -SYM_FUNC_END(srso_untrain_ret_alias) -__EXPORT_THUNK(srso_untrain_ret_alias) +SYM_FUNC_END(srso_alias_untrain_ret) +__EXPORT_THUNK(srso_alias_untrain_ret) .section .text.__x86.rethunk_safe #else /* dummy definition for alternatives */ -SYM_START(srso_untrain_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) ANNOTATE_UNRET_SAFE ret int3 -SYM_FUNC_END(srso_untrain_ret_alias) +SYM_FUNC_END(srso_alias_untrain_ret) #endif -SYM_START(srso_safe_ret_alias, SYM_L_GLOBAL, SYM_A_NONE) +SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) add $8, %_ASM_SP UNWIND_HINT_FUNC ANNOTATE_UNRET_SAFE ret int3 -SYM_FUNC_END(srso_safe_ret_alias) +SYM_FUNC_END(srso_alias_safe_ret) .section .text.__x86.return_thunk SYM_CODE_START(srso_alias_return_thunk) UNWIND_HINT_FUNC ANNOTATE_NOENDBR - call srso_safe_ret_alias + call srso_alias_safe_ret ud2 SYM_CODE_END(srso_alias_return_thunk) From 43548590ad7e794392ad6f3f8c13d8c1593bdf61 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 14 Aug 2023 13:44:34 +0200 Subject: [PATCH 503/513] x86/cpu: Cleanup the untrain mess commit e7c25c441e9e0fa75b4c83e0b26306b702cfe90d upstream. Since there can only be one active return_thunk, there only needs be one (matching) untrain_ret. It fundamentally doesn't make sense to allow multiple untrain_ret at the same time. Fold all the 3 different untrain methods into a single (temporary) helper stub. Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814121149.042774962@infradead.org Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/nospec-branch.h | 12 ++++-------- arch/x86/kernel/cpu/bugs.c | 1 + arch/x86/lib/retpoline.S | 7 +++++++ 3 files changed, 12 insertions(+), 8 deletions(-) diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h index b472f2c84b2f..940c15ee5650 100644 --- a/arch/x86/include/asm/nospec-branch.h +++ b/arch/x86/include/asm/nospec-branch.h @@ -156,9 +156,9 @@ .endm #ifdef CONFIG_CPU_UNRET_ENTRY -#define CALL_ZEN_UNTRAIN_RET "call retbleed_untrain_ret" +#define CALL_UNTRAIN_RET "call entry_untrain_ret" #else -#define CALL_ZEN_UNTRAIN_RET "" +#define CALL_UNTRAIN_RET "" #endif /* @@ -177,14 +177,9 @@ defined(CONFIG_CPU_SRSO) ANNOTATE_UNRET_END ALTERNATIVE_2 "", \ - CALL_ZEN_UNTRAIN_RET, X86_FEATURE_UNRET, \ + CALL_UNTRAIN_RET, X86_FEATURE_UNRET, \ "call entry_ibpb", X86_FEATURE_ENTRY_IBPB #endif - -#ifdef CONFIG_CPU_SRSO - ALTERNATIVE_2 "", "call srso_untrain_ret", X86_FEATURE_SRSO, \ - "call srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS -#endif .endm #else /* __ASSEMBLY__ */ @@ -209,6 +204,7 @@ extern void retbleed_untrain_ret(void); extern void srso_untrain_ret(void); extern void srso_alias_untrain_ret(void); +extern void entry_untrain_ret(void); extern void entry_ibpb(void); #ifdef CONFIG_RETPOLINE diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 030a7d263926..91f71355c6f7 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2429,6 +2429,7 @@ static void __init srso_select_mitigation(void) * like ftrace, static_call, etc. */ setup_force_cpu_cap(X86_FEATURE_RETHUNK); + setup_force_cpu_cap(X86_FEATURE_UNRET); if (boot_cpu_data.x86 == 0x19) { setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS); diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index b5697ba1ca71..e807df6e56a7 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -230,6 +230,13 @@ SYM_CODE_START(srso_return_thunk) ud2 SYM_CODE_END(srso_return_thunk) +SYM_FUNC_START(entry_untrain_ret) + ALTERNATIVE_2 "jmp retbleed_untrain_ret", \ + "jmp srso_untrain_ret", X86_FEATURE_SRSO, \ + "jmp srso_alias_untrain_ret", X86_FEATURE_SRSO_ALIAS +SYM_FUNC_END(entry_untrain_ret) +__EXPORT_THUNK(entry_untrain_ret) + SYM_CODE_START(__x86_return_thunk) UNWIND_HINT_FUNC ANNOTATE_NOENDBR From df6495f203a77be3f799630b02ab012e06659554 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Mon, 14 Aug 2023 21:29:50 +0200 Subject: [PATCH 504/513] x86/srso: Explain the untraining sequences a bit more commit 9dbd23e42ff0b10c9b02c9e649c76e5228241a8e upstream. The goal is to eventually have a proper documentation about all this. Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230814164447.GFZNpZ/64H4lENIe94@fat_crate.local Signed-off-by: Greg Kroah-Hartman --- arch/x86/lib/retpoline.S | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index e807df6e56a7..666dce6002db 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -127,6 +127,25 @@ SYM_CODE_START(srso_alias_return_thunk) ud2 SYM_CODE_END(srso_alias_return_thunk) +/* + * Some generic notes on the untraining sequences: + * + * They are interchangeable when it comes to flushing potentially wrong + * RET predictions from the BTB. + * + * The SRSO Zen1/2 (MOVABS) untraining sequence is longer than the + * Retbleed sequence because the return sequence done there + * (srso_safe_ret()) is longer and the return sequence must fully nest + * (end before) the untraining sequence. Therefore, the untraining + * sequence must fully overlap the return sequence. + * + * Regarding alignment - the instructions which need to be untrained, + * must all start at a cacheline boundary for Zen1/2 generations. That + * is, instruction sequences starting at srso_safe_ret() and + * the respective instruction sequences at retbleed_return_thunk() + * must start at a cacheline boundary. + */ + /* * Safety details here pertain to the AMD Zen{1,2} microarchitecture: * 1) The RET at retbleed_return_thunk must be on a 64 byte boundary, for From 19f23d16b0e0bf433644772ef6593d7492e9b03b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 16 Aug 2023 12:44:19 +0200 Subject: [PATCH 505/513] x86/static_call: Fix __static_call_fixup() commit 54097309620ef0dc2d7083783dc521c6a5fef957 upstream. Christian reported spurious module load crashes after some of Song's module memory layout patches. Turns out that if the very last instruction on the very last page of the module is a 'JMP __x86_return_thunk' then __static_call_fixup() will trip a fault and die. And while the module rework made this slightly more likely to happen, it's always been possible. Fixes: ee88d363d156 ("x86,static_call: Use alternative RET encoding") Reported-by: Christian Bricart Signed-off-by: Peter Zijlstra (Intel) Acked-by: Josh Poimboeuf Link: https://lkml.kernel.org/r/20230816104419.GA982867@hirez.programming.kicks-ass.net Signed-off-by: Sasha Levin Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/static_call.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/x86/kernel/static_call.c b/arch/x86/kernel/static_call.c index 2fc4f96702e6..b48b659ccf6f 100644 --- a/arch/x86/kernel/static_call.c +++ b/arch/x86/kernel/static_call.c @@ -135,6 +135,19 @@ EXPORT_SYMBOL_GPL(arch_static_call_transform); */ bool __static_call_fixup(void *tramp, u8 op, void *dest) { + unsigned long addr = (unsigned long)tramp; + /* + * Not all .return_sites are a static_call trampoline (most are not). + * Check if the 3 bytes after the return are still kernel text, if not, + * then this definitely is not a trampoline and we need not worry + * further. + * + * This avoids the memcmp() below tripping over pagefaults etc.. + */ + if (((addr >> PAGE_SHIFT) != ((addr + 7) >> PAGE_SHIFT)) && + !kernel_text_address(addr + 7)) + return false; + if (memcmp(tramp+5, tramp_ud, 3)) { /* Not a trampoline site, not our problem. */ return false; From bbe585239d4fb4dd5f0235446fa86c5abdd0098b Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 11 Aug 2023 08:52:55 -0700 Subject: [PATCH 506/513] x86/retpoline: Don't clobber RFLAGS during srso_safe_ret() commit ba5ca5e5e6a1d55923e88b4a83da452166f5560e upstream. Use LEA instead of ADD when adjusting %rsp in srso_safe_ret{,_alias}() so as to avoid clobbering flags. Drop one of the INT3 instructions to account for the LEA consuming one more byte than the ADD. KVM's emulator makes indirect calls into a jump table of sorts, where the destination of each call is a small blob of code that performs fast emulation by executing the target instruction with fixed operands. E.g. to emulate ADC, fastop() invokes adcb_al_dl(): adcb_al_dl: <+0>: adc %dl,%al <+2>: jmp <__x86_return_thunk> A major motivation for doing fast emulation is to leverage the CPU to handle consumption and manipulation of arithmetic flags, i.e. RFLAGS is both an input and output to the target of the call. fastop() collects the RFLAGS result by pushing RFLAGS onto the stack and popping them back into a variable (held in %rdi in this case): asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n" <+71>: mov 0xc0(%r8),%rdx <+78>: mov 0x100(%r8),%rcx <+85>: push %rdi <+86>: popf <+87>: call *%rsi <+89>: nop <+90>: nop <+91>: nop <+92>: pushf <+93>: pop %rdi and then propagating the arithmetic flags into the vCPU's emulator state: ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK); <+64>: and $0xfffffffffffff72a,%r9 <+94>: and $0x8d5,%edi <+109>: or %rdi,%r9 <+122>: mov %r9,0x10(%r8) The failures can be most easily reproduced by running the "emulator" test in KVM-Unit-Tests. If you're feeling a bit of deja vu, see commit b63f20a778c8 ("x86/retpoline: Don't clobber RFLAGS during CALL_NOSPEC on i386"). In addition, this breaks booting of clang-compiled guest on a gcc-compiled host where the host contains the %rsp-modifying SRSO mitigations. [ bp: Massage commit message, extend, remove addresses. ] Fixes: fb3bd914b3ec ("x86/srso: Add a Speculative RAS Overflow mitigation") Closes: https://lore.kernel.org/all/de474347-122d-54cd-eabf-9dcc95ab9eae@amd.com Reported-by: Srikanth Aithal Reported-by: Nathan Chancellor Signed-off-by: Sean Christopherson Signed-off-by: Borislav Petkov (AMD) Tested-by: Nathan Chancellor Cc: stable@vger.kernel.org Link: https://lore.kernel.org/20230810013334.GA5354@dev-arch.thelio-3990X/ Link: https://lore.kernel.org/r/20230811155255.250835-1-seanjc@google.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/lib/retpoline.S | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 666dce6002db..bfa7f51aadfd 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -111,7 +111,7 @@ SYM_FUNC_END(srso_alias_untrain_ret) #endif SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) - add $8, %_ASM_SP + lea 8(%_ASM_SP), %_ASM_SP UNWIND_HINT_FUNC ANNOTATE_UNRET_SAFE ret @@ -211,7 +211,7 @@ __EXPORT_THUNK(retbleed_untrain_ret) * SRSO untraining sequence for Zen1/2, similar to retbleed_untrain_ret() * above. On kernel entry, srso_untrain_ret() is executed which is a * - * movabs $0xccccccc308c48348,%rax + * movabs $0xccccc30824648d48,%rax * * and when the return thunk executes the inner label srso_safe_ret() * later, it is a stack manipulation and a RET which is mispredicted and @@ -229,11 +229,10 @@ SYM_START(srso_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) * the stack. */ SYM_INNER_LABEL(srso_safe_ret, SYM_L_GLOBAL) - add $8, %_ASM_SP + lea 8(%_ASM_SP), %_ASM_SP ret int3 int3 - int3 /* end of movabs */ lfence call srso_safe_ret From aa0777ce0d3d3237ffb59cd065420f64ad85d4c2 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Fri, 11 Aug 2023 23:38:24 +0200 Subject: [PATCH 507/513] x86/CPU/AMD: Fix the DIV(0) initial fix attempt commit f58d6fbcb7c848b7f2469be339bc571f2e9d245b upstream. Initially, it was thought that doing an innocuous division in the #DE handler would take care to prevent any leaking of old data from the divider but by the time the fault is raised, the speculation has already advanced too far and such data could already have been used by younger operations. Therefore, do the innocuous division on every exit to userspace so that userspace doesn't see any potentially old data from integer divisions in kernel space. Do the same before VMRUN too, to protect host data from leaking into the guest too. Fixes: 77245f1c3c64 ("x86/CPU/AMD: Do not leak quotient data after a division by 0") Signed-off-by: Borislav Petkov (AMD) Cc: Link: https://lore.kernel.org/r/20230811213824.10025-1-bp@alien8.de Signed-off-by: Greg Kroah-Hartman --- arch/x86/include/asm/entry-common.h | 1 + arch/x86/kernel/cpu/amd.c | 1 + arch/x86/kernel/traps.c | 2 -- arch/x86/kvm/svm/svm.c | 2 ++ 4 files changed, 4 insertions(+), 2 deletions(-) diff --git a/arch/x86/include/asm/entry-common.h b/arch/x86/include/asm/entry-common.h index 43184640b579..a12fdf01dc26 100644 --- a/arch/x86/include/asm/entry-common.h +++ b/arch/x86/include/asm/entry-common.h @@ -92,6 +92,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, static __always_inline void arch_exit_to_user_mode(void) { mds_user_clear_cpu_buffers(); + amd_clear_divider(); } #define arch_exit_to_user_mode arch_exit_to_user_mode diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 0ca7123417ab..0a0230bd5089 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -1320,3 +1320,4 @@ void noinstr amd_clear_divider(void) asm volatile(ALTERNATIVE("", "div %2\n\t", X86_BUG_DIV0) :: "a" (0), "d" (0), "r" (1)); } +EXPORT_SYMBOL_GPL(amd_clear_divider); diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c index 3361d32d090f..ca47080e3774 100644 --- a/arch/x86/kernel/traps.c +++ b/arch/x86/kernel/traps.c @@ -202,8 +202,6 @@ DEFINE_IDTENTRY(exc_divide_error) { do_error_trap(regs, 0, "divide error", X86_TRAP_DE, SIGFPE, FPE_INTDIV, error_get_trap_addr(regs)); - - amd_clear_divider(); } DEFINE_IDTENTRY(exc_overflow) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index d63c3843e493..8e9a6c41f9ee 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -1452,6 +1452,8 @@ static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); struct svm_cpu_data *sd = per_cpu(svm_data, vcpu->cpu); + amd_clear_divider(); + if (sev_es_guest(vcpu->kvm)) sev_es_unmap_ghcb(svm); From fa24cd0fbcb7245229509dfef6db528bc9f187f9 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Sun, 13 Aug 2023 12:39:34 +0200 Subject: [PATCH 508/513] x86/srso: Disable the mitigation on unaffected configurations commit e9fbc47b818b964ddff5df5b2d5c0f5f32f4a147 upstream. Skip the srso cmd line parsing which is not needed on Zen1/2 with SMT disabled and with the proper microcode applied (latter should be the case anyway) as those are not affected. Fixes: 5a15d8348881 ("x86/srso: Tie SBPB bit setting to microcode patch detection") Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230813104517.3346-1-bp@alien8.de Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/bugs.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 91f71355c6f7..817c7bb4feca 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2399,8 +2399,10 @@ static void __init srso_select_mitigation(void) * IBPB microcode has been applied. */ if ((boot_cpu_data.x86 < 0x19) && - (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) + (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) { setup_force_cpu_cap(X86_FEATURE_SRSO_NO); + return; + } } if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) { @@ -2686,6 +2688,9 @@ static ssize_t gds_show_state(char *buf) static ssize_t srso_show_state(char *buf) { + if (boot_cpu_has(X86_FEATURE_SRSO_NO)) + return sysfs_emit(buf, "Not affected\n"); + return sysfs_emit(buf, "%s%s\n", srso_strings[srso_mitigation], (cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode")); From 55f1cbeaa15945e38bf892380966a9eb90f7734b Mon Sep 17 00:00:00 2001 From: Petr Pavlu Date: Tue, 11 Jul 2023 11:19:51 +0200 Subject: [PATCH 509/513] x86/retpoline,kprobes: Fix position of thunk sections with CONFIG_LTO_CLANG commit 79cd2a11224eab86d6673fe8a11d2046ae9d2757 upstream. The linker script arch/x86/kernel/vmlinux.lds.S matches the thunk sections ".text.__x86.*" from arch/x86/lib/retpoline.S as follows: .text { [...] TEXT_TEXT [...] __indirect_thunk_start = .; *(.text.__x86.*) __indirect_thunk_end = .; [...] } Macro TEXT_TEXT references TEXT_MAIN which normally expands to only ".text". However, with CONFIG_LTO_CLANG, TEXT_MAIN becomes ".text .text.[0-9a-zA-Z_]*" which wrongly matches also the thunk sections. The output layout is then different than expected. For instance, the currently defined range [__indirect_thunk_start, __indirect_thunk_end] becomes empty. Prevent the problem by using ".." as the first separator, for example, ".text..__x86.indirect_thunk". This pattern is utilized by other explicit section names which start with one of the standard prefixes, such as ".text" or ".data", and that need to be individually selected in the linker script. [ nathan: Fix conflicts with SRSO and fold in fix issue brought up by Andrew Cooper in post-review: https://lore.kernel.org/20230803230323.1478869-1-andrew.cooper3@citrix.com ] Fixes: dc5723b02e52 ("kbuild: add support for Clang LTO") Signed-off-by: Petr Pavlu Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Nathan Chancellor Signed-off-by: Borislav Petkov (AMD) Link: https://lore.kernel.org/r/20230711091952.27944-2-petr.pavlu@suse.com Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/vmlinux.lds.S | 8 ++++---- arch/x86/lib/retpoline.S | 8 ++++---- tools/objtool/check.c | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 7578527f43dd..ca1a7595edac 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S @@ -134,7 +134,7 @@ SECTIONS KPROBES_TEXT ALIGN_ENTRY_TEXT_BEGIN #ifdef CONFIG_CPU_SRSO - *(.text.__x86.rethunk_untrain) + *(.text..__x86.rethunk_untrain) #endif ENTRY_TEXT @@ -145,7 +145,7 @@ SECTIONS * definition. */ . = srso_alias_untrain_ret | (1 << 2) | (1 << 8) | (1 << 14) | (1 << 20); - *(.text.__x86.rethunk_safe) + *(.text..__x86.rethunk_safe) #endif ALIGN_ENTRY_TEXT_END SOFTIRQENTRY_TEXT @@ -155,8 +155,8 @@ SECTIONS #ifdef CONFIG_RETPOLINE __indirect_thunk_start = .; - *(.text.__x86.indirect_thunk) - *(.text.__x86.return_thunk) + *(.text..__x86.indirect_thunk) + *(.text..__x86.return_thunk) __indirect_thunk_end = .; #endif } :text =0xcccc diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index bfa7f51aadfd..6f5321b36dbb 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -11,7 +11,7 @@ #include #include - .section .text.__x86.indirect_thunk + .section .text..__x86.indirect_thunk .macro RETPOLINE reg ANNOTATE_INTRA_FUNCTION_CALL @@ -90,7 +90,7 @@ SYM_CODE_END(__x86_indirect_thunk_array) * As a result, srso_alias_safe_ret() becomes a safe return. */ #ifdef CONFIG_CPU_SRSO - .section .text.__x86.rethunk_untrain + .section .text..__x86.rethunk_untrain SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) UNWIND_HINT_FUNC @@ -100,7 +100,7 @@ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) SYM_FUNC_END(srso_alias_untrain_ret) __EXPORT_THUNK(srso_alias_untrain_ret) - .section .text.__x86.rethunk_safe + .section .text..__x86.rethunk_safe #else /* dummy definition for alternatives */ SYM_START(srso_alias_untrain_ret, SYM_L_GLOBAL, SYM_A_NONE) @@ -118,7 +118,7 @@ SYM_START(srso_alias_safe_ret, SYM_L_GLOBAL, SYM_A_NONE) int3 SYM_FUNC_END(srso_alias_safe_ret) - .section .text.__x86.return_thunk + .section .text..__x86.return_thunk SYM_CODE_START(srso_alias_return_thunk) UNWIND_HINT_FUNC diff --git a/tools/objtool/check.c b/tools/objtool/check.c index f78a12ccd441..d99ba1bd7349 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -370,7 +370,7 @@ static int decode_instructions(struct objtool_file *file) if (!strcmp(sec->name, ".noinstr.text") || !strcmp(sec->name, ".entry.text") || - !strncmp(sec->name, ".text.__x86.", 12)) + !strncmp(sec->name, ".text..__x86.", 13)) sec->noinstr = true; for (offset = 0; offset < sec->sh.sh_size; offset += insn->len) { From 484eefc6ff9c6dc03f056d0ec4056fc667cef3d2 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 16 Aug 2023 13:59:21 +0200 Subject: [PATCH 510/513] objtool/x86: Fixup frame-pointer vs rethunk commit dbf46008775516f7f25c95b7760041c286299783 upstream. For stack-validation of a frame-pointer build, objtool validates that every CALL instruction is preceded by a frame-setup. The new SRSO return thunks violate this with their RSB stuffing trickery. Extend the __fentry__ exception to also cover the embedded_insn case used for this. This cures: vmlinux.o: warning: objtool: srso_untrain_ret+0xd: call without frame pointer save/setup Fixes: 4ae68b26c3ab ("objtool/x86: Fix SRSO mess") Signed-off-by: Peter Zijlstra (Intel) Signed-off-by: Borislav Petkov (AMD) Acked-by: Josh Poimboeuf Link: https://lore.kernel.org/r/20230816115921.GH980931@hirez.programming.kicks-ass.net Signed-off-by: Greg Kroah-Hartman --- tools/objtool/check.c | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index d99ba1bd7349..f9ff878d6ba1 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -2174,12 +2174,17 @@ static int decode_sections(struct objtool_file *file) return 0; } -static bool is_fentry_call(struct instruction *insn) +static bool is_special_call(struct instruction *insn) { - if (insn->type == INSN_CALL && - insn->call_dest && - insn->call_dest->fentry) - return true; + if (insn->type == INSN_CALL) { + struct symbol *dest = insn->call_dest; + + if (!dest) + return false; + + if (dest->fentry) + return true; + } return false; } @@ -3125,7 +3130,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func, if (ret) return ret; - if (!no_fp && func && !is_fentry_call(insn) && + if (!no_fp && func && !is_special_call(insn) && !has_valid_stack_frame(&state)) { WARN_FUNC("call without frame pointer save/setup", sec, insn->offset); From 9080f4fcc20260a1c9e15f19e660e824ccc9b994 Mon Sep 17 00:00:00 2001 From: "Borislav Petkov (AMD)" Date: Tue, 15 Aug 2023 11:53:13 +0200 Subject: [PATCH 511/513] x86/srso: Correct the mitigation status when SMT is disabled commit 6405b72e8d17bd1875a56ae52d23ec3cd51b9d66 upstream. Specify how is SRSO mitigated when SMT is disabled. Also, correct the SMT check for that. Fixes: e9fbc47b818b ("x86/srso: Disable the mitigation on unaffected configurations") Suggested-by: Josh Poimboeuf Signed-off-by: Borislav Petkov (AMD) Acked-by: Josh Poimboeuf Link: https://lore.kernel.org/r/20230814200813.p5czl47zssuej7nv@treble Signed-off-by: Greg Kroah-Hartman --- arch/x86/kernel/cpu/bugs.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c index 817c7bb4feca..0d2c5fe84141 100644 --- a/arch/x86/kernel/cpu/bugs.c +++ b/arch/x86/kernel/cpu/bugs.c @@ -2398,8 +2398,7 @@ static void __init srso_select_mitigation(void) * Zen1/2 with SMT off aren't vulnerable after the right * IBPB microcode has been applied. */ - if ((boot_cpu_data.x86 < 0x19) && - (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) { + if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) { setup_force_cpu_cap(X86_FEATURE_SRSO_NO); return; } @@ -2689,7 +2688,7 @@ static ssize_t gds_show_state(char *buf) static ssize_t srso_show_state(char *buf) { if (boot_cpu_has(X86_FEATURE_SRSO_NO)) - return sysfs_emit(buf, "Not affected\n"); + return sysfs_emit(buf, "Mitigation: SMT disabled\n"); return sysfs_emit(buf, "%s%s\n", srso_strings[srso_mitigation], From 5ddfe5cc87167343bd4c17f776de7b7aa1475b0c Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Sat, 26 Aug 2023 14:23:41 +0200 Subject: [PATCH 512/513] Linux 5.15.128 Link: https://lore.kernel.org/r/20230824145023.559380953@linuxfoundation.org Tested-by: Florian Fainelli Tested-by: SeongJae Park Tested-by: Joel Fernandes (Google) Tested-by: Ron Economos Tested-by: Sudip Mukherjee Tested-by: Jon Hunter Tested-by: Linux Kernel Functional Testing Tested-by: Guenter Roeck Tested-by: Allen Pais Tested-by: Shuah Khan Tested-by: Harshit Mogalapalli Signed-off-by: Greg Kroah-Hartman --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index f5e69631ca58..2b5160227e82 100644 --- a/Makefile +++ b/Makefile @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 5 PATCHLEVEL = 15 -SUBLEVEL = 127 +SUBLEVEL = 128 EXTRAVERSION = NAME = Trick or Treat From 255037398f1c485eb75f28fc0e2a8ea55a71f4a4 Mon Sep 17 00:00:00 2001 From: Umer Saleem Date: Tue, 29 Aug 2023 10:59:57 +0500 Subject: [PATCH 513/513] Bump changelog after merging v5.15.128 Signed-off-by: Umer Saleem --- scripts/package/truenas/changelog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/scripts/package/truenas/changelog b/scripts/package/truenas/changelog index f463b78200de..593a6152f3b1 100644 --- a/scripts/package/truenas/changelog +++ b/scripts/package/truenas/changelog @@ -1,3 +1,9 @@ +linux-5.15.128+truenas (5.15.128+truenas-1) sid; urgency=low + + * Rebase local commits onto upstream v5.15.128 + + -- iXsystems engineering team Tue, 29 Aug 2023 11:00:00 +0500 + linux-5.15.123+truenas (5.15.123+truenas-1) sid; urgency=low * Rebase local commits onto upstream v5.15.123