diff --git a/.mailmap b/.mailmap index 428d721ffbb168..0411dea0ded768 100644 --- a/.mailmap +++ b/.mailmap @@ -786,7 +786,8 @@ Subash Abhinov Kasiviswanathan Subhash Jadavani Sudarshan Rajagopalan -Sudeep Holla Sudeep KarkadaNagesha +Sudeep Holla Sudeep KarkadaNagesha +Sudeep Holla Sumit Garg Sumit Semwal Surabhi Vishnoi @@ -851,6 +852,7 @@ Valentin Schneider Veera Sundaram Sankaran Veerabhadrarao Badiganti Venkateswara Naralasetty +Viacheslav Bocharov Vikash Garodia Vikash Garodia Vincent Mailhol diff --git a/Documentation/ABI/testing/sysfs-class-tsm b/Documentation/ABI/testing/sysfs-class-tsm index 6fc1a5ac6da1af..2949468deaf72c 100644 --- a/Documentation/ABI/testing/sysfs-class-tsm +++ b/Documentation/ABI/testing/sysfs-class-tsm @@ -7,13 +7,3 @@ Description: signals when the PCI layer is able to support establishment of link encryption and other device-security features coordinated through a platform tsm. - -What: /sys/class/tsm/tsmN/streamH.R.E -Contact: linux-pci@vger.kernel.org -Description: - (RO) When a host bridge has established a secure connection via - the platform TSM, symlink appears. The primary function of this - is have a system global review of TSM resource consumption - across host bridges. The link points to the endpoint PCI device - and matches the same link published by the host bridge. See - Documentation/ABI/testing/sysfs-devices-pci-host-bridge. diff --git a/Documentation/admin-guide/sysctl/vm.rst b/Documentation/admin-guide/sysctl/vm.rst index 245bf63949356c..06d0ebddeefa21 100644 --- a/Documentation/admin-guide/sysctl/vm.rst +++ b/Documentation/admin-guide/sysctl/vm.rst @@ -231,6 +231,8 @@ eventually gets pushed out to disk. This tunable is used to define when dirty inode is old enough to be eligible for writeback by the kernel flusher threads. And, it is also used as the interval to wakeup dirtytime_writeback thread. +Setting this to zero disables periodic dirtytime writeback. + dirty_writeback_centisecs ========================= diff --git a/Documentation/devicetree/bindings/pinctrl/marvell,armada3710-xb-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/marvell,armada3710-xb-pinctrl.yaml index 51bad2e8d6f1f7..4f9013d3687499 100644 --- a/Documentation/devicetree/bindings/pinctrl/marvell,armada3710-xb-pinctrl.yaml +++ b/Documentation/devicetree/bindings/pinctrl/marvell,armada3710-xb-pinctrl.yaml @@ -88,7 +88,7 @@ patternProperties: pcie1_clkreq, pcie1_wakeup, pmic0, pmic1, ptp, ptp_clk, ptp_trig, pwm0, pwm1, pwm2, pwm3, rgmii, sdio0, sdio_sb, smi, spi_cs1, spi_cs2, spi_cs3, spi_quad, uart1, uart2, - usb2_drvvbus1, usb32_drvvbus ] + usb2_drvvbus1, usb32_drvvbus0 ] function: enum: [ drvbus, emmc, gpio, i2c, jtag, led, mii, mii_err, onewire, diff --git a/Documentation/devicetree/bindings/sound/fsl,sai.yaml b/Documentation/devicetree/bindings/sound/fsl,sai.yaml index 0d733e5b08a4ef..d838ee0b61cb21 100644 --- a/Documentation/devicetree/bindings/sound/fsl,sai.yaml +++ b/Documentation/devicetree/bindings/sound/fsl,sai.yaml @@ -44,6 +44,7 @@ properties: - items: - enum: - fsl,imx94-sai + - fsl,imx952-sai - const: fsl,imx95-sai reg: diff --git a/MAINTAINERS b/MAINTAINERS index 67db88b04537b4..36abe938f960df 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -335,7 +335,7 @@ F: tools/power/acpi/ ACPI FOR ARM64 (ACPI/arm64) M: Lorenzo Pieralisi M: Hanjun Guo -M: Sudeep Holla +M: Sudeep Holla L: linux-acpi@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -351,7 +351,7 @@ F: drivers/acpi/riscv/ F: include/linux/acpi_rimt.h ACPI PCC(Platform Communication Channel) MAILBOX DRIVER -M: Sudeep Holla +M: Sudeep Holla L: linux-acpi@vger.kernel.org S: Supported F: drivers/mailbox/pcc.c @@ -2747,14 +2747,14 @@ F: arch/arm/include/asm/hardware/dec21285.h F: arch/arm/mach-footbridge/ ARM/FREESCALE IMX / MXC ARM ARCHITECTURE -M: Shawn Guo +M: Frank Li M: Sascha Hauer R: Pengutronix Kernel Team R: Fabio Estevam L: imx@lists.linux.dev L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/frank.li/linux.git F: Documentation/devicetree/bindings/firmware/fsl* F: Documentation/devicetree/bindings/firmware/nxp* F: arch/arm/boot/dts/nxp/imx/ @@ -2769,22 +2769,22 @@ N: mxs N: \bmxc[^\d] ARM/FREESCALE LAYERSCAPE ARM ARCHITECTURE -M: Shawn Guo +M: Frank Li L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/frank.li/linux.git F: arch/arm/boot/dts/nxp/ls/ F: arch/arm64/boot/dts/freescale/fsl-* F: arch/arm64/boot/dts/freescale/qoriq-* ARM/FREESCALE VYBRID ARM ARCHITECTURE -M: Shawn Guo +M: Frank Li M: Sascha Hauer R: Pengutronix Kernel Team R: Stefan Agner L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained -T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/frank.li/linux.git F: arch/arm/boot/dts/nxp/vf/ F: arch/arm/mach-imx/*vf610* @@ -3681,7 +3681,7 @@ N: uniphier ARM/VERSATILE EXPRESS PLATFORM M: Liviu Dudau -M: Sudeep Holla +M: Sudeep Holla M: Lorenzo Pieralisi L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -6513,7 +6513,7 @@ F: drivers/i2c/busses/i2c-cp2615.c CPU FREQUENCY DRIVERS - VEXPRESS SPC ARM BIG LITTLE M: Viresh Kumar -M: Sudeep Holla +M: Sudeep Holla L: linux-pm@vger.kernel.org S: Maintained W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php @@ -6609,7 +6609,7 @@ F: include/linux/platform_data/cpuidle-exynos.h CPUIDLE DRIVER - ARM PSCI M: Lorenzo Pieralisi -M: Sudeep Holla +M: Sudeep Holla M: Ulf Hansson L: linux-pm@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -9260,7 +9260,6 @@ F: drivers/scsi/be2iscsi/ EMULEX 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER (be2net) M: Ajit Khaparde M: Sriharsha Basavapatna -M: Somnath Kotur L: netdev@vger.kernel.org S: Maintained W: http://www.emulex.com @@ -9816,7 +9815,7 @@ F: include/uapi/linux/firewire*.h F: tools/firewire/ FIRMWARE FRAMEWORK FOR ARMV8-A -M: Sudeep Holla +M: Sudeep Holla L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/firmware/arm_ffa/ @@ -10514,7 +10513,7 @@ S: Maintained F: scripts/gendwarfksyms/ GENERIC ARCHITECTURE TOPOLOGY -M: Sudeep Holla +M: Sudeep Holla L: linux-kernel@vger.kernel.org S: Maintained F: drivers/base/arch_topology.c @@ -11376,6 +11375,11 @@ F: Documentation/ABI/testing/sysfs-devices-platform-kunpeng_hccs F: drivers/soc/hisilicon/kunpeng_hccs.c F: drivers/soc/hisilicon/kunpeng_hccs.h +HISILICON SOC HHA DRIVER +M: Yushan Wang +S: Maintained +F: drivers/cache/hisi_soc_hha.c + HISILICON LPC BUS DRIVER M: Jay Fang S: Maintained @@ -15101,7 +15105,7 @@ F: drivers/mailbox/arm_mhuv2.c F: include/linux/mailbox/arm_mhuv2_message.h MAILBOX ARM MHUv3 -M: Sudeep Holla +M: Sudeep Holla M: Cristian Marussi L: linux-kernel@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -20578,7 +20582,7 @@ F: drivers/pinctrl/pinctrl-amd.c PIN CONTROLLER - FREESCALE M: Dong Aisheng M: Fabio Estevam -M: Shawn Guo +M: Frank Li M: Jacky Bai R: Pengutronix Kernel Team R: NXP S32 Linux Team @@ -23631,7 +23635,7 @@ F: include/uapi/linux/sed* SECURE MONITOR CALL(SMC) CALLING CONVENTION (SMCCC) M: Mark Rutland M: Lorenzo Pieralisi -M: Sudeep Holla +M: Sudeep Holla L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained F: drivers/firmware/smccc/ @@ -25395,7 +25399,7 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/lee/mfd.git F: drivers/mfd/syscon.c SYSTEM CONTROL & POWER/MANAGEMENT INTERFACE (SCPI/SCMI) Message Protocol drivers -M: Sudeep Holla +M: Sudeep Holla R: Cristian Marussi L: arm-scmi@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) @@ -26547,7 +26551,7 @@ F: samples/tsm-mr/ TRUSTED SERVICES TEE DRIVER M: Balint Dobszay -M: Sudeep Holla +M: Sudeep Holla L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: trusted-services@lists.trustedfirmware.org S: Maintained diff --git a/Makefile b/Makefile index 3373308d2217ce..bde507d5c03dbc 100644 --- a/Makefile +++ b/Makefile @@ -2,7 +2,7 @@ VERSION = 6 PATCHLEVEL = 19 SUBLEVEL = 0 -EXTRAVERSION = -rc7 +EXTRAVERSION = -rc8 NAME = Baby Opossum Posse # *DOCUMENTATION* @@ -1624,7 +1624,8 @@ MRPROPER_FILES += include/config include/generated \ certs/x509.genkey \ vmlinux-gdb.py \ rpmbuild \ - rust/libmacros.so rust/libmacros.dylib + rust/libmacros.so rust/libmacros.dylib \ + rust/libpin_init_internal.so rust/libpin_init_internal.dylib # clean - Delete most, but leave enough to build external modules # diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig index 45288ec9eaf736..35e9eb180c9a1c 100644 --- a/arch/arm64/configs/defconfig +++ b/arch/arm64/configs/defconfig @@ -670,7 +670,6 @@ CONFIG_PINCTRL_LPASS_LPI=m CONFIG_PINCTRL_SC7280_LPASS_LPI=m CONFIG_PINCTRL_SM6115_LPASS_LPI=m CONFIG_PINCTRL_SM8250_LPASS_LPI=m -CONFIG_PINCTRL_SM8350_LPASS_LPI=m CONFIG_PINCTRL_SM8450_LPASS_LPI=m CONFIG_PINCTRL_SC8280XP_LPASS_LPI=m CONFIG_PINCTRL_SM8550_LPASS_LPI=m diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c index e5000bef90f2ae..7cf9310de0ec1f 100644 --- a/arch/powerpc/kvm/book3s_hv_uvmem.c +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c @@ -723,7 +723,7 @@ static struct page *kvmppc_uvmem_get_page(unsigned long gpa, struct kvm *kvm) dpage = pfn_to_page(uvmem_pfn); dpage->zone_device_data = pvt; - zone_device_page_init(dpage, 0); + zone_device_page_init(dpage, &kvmppc_uvmem_pgmap, 0); return dpage; out_clear: spin_lock(&kvmppc_uvmem_bitmap_lock); diff --git a/arch/riscv/errata/sifive/errata.c b/arch/riscv/errata/sifive/errata.c index 38aac2c47845a0..d0c61f86cba337 100644 --- a/arch/riscv/errata/sifive/errata.c +++ b/arch/riscv/errata/sifive/errata.c @@ -75,26 +75,12 @@ static u32 __init_or_module sifive_errata_probe(unsigned long archid, return cpu_req_errata; } -static void __init_or_module warn_miss_errata(u32 miss_errata) -{ - int i; - - pr_warn("----------------------------------------------------------------\n"); - pr_warn("WARNING: Missing the following errata may cause potential issues\n"); - for (i = 0; i < ERRATA_SIFIVE_NUMBER; i++) - if (miss_errata & 0x1 << i) - pr_warn("\tSiFive Errata[%d]:%s\n", i, errata_list[i].name); - pr_warn("Please enable the corresponding Kconfig to apply them\n"); - pr_warn("----------------------------------------------------------------\n"); -} - void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, unsigned long archid, unsigned long impid, unsigned int stage) { struct alt_entry *alt; u32 cpu_req_errata; - u32 cpu_apply_errata = 0; u32 tmp; BUILD_BUG_ON(ERRATA_SIFIVE_NUMBER >= RISCV_VENDOR_EXT_ALTERNATIVES_BASE); @@ -118,10 +104,6 @@ void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, patch_text_nosync(ALT_OLD_PTR(alt), ALT_ALT_PTR(alt), alt->alt_len); mutex_unlock(&text_mutex); - cpu_apply_errata |= tmp; } } - if (stage != RISCV_ALTERNATIVES_MODULE && - cpu_apply_errata != cpu_req_errata) - warn_miss_errata(cpu_req_errata - cpu_apply_errata); } diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h index 6081327e55f5b6..28e115eed21805 100644 --- a/arch/riscv/include/asm/compat.h +++ b/arch/riscv/include/asm/compat.h @@ -2,7 +2,7 @@ #ifndef __ASM_COMPAT_H #define __ASM_COMPAT_H -#define COMPAT_UTS_MACHINE "riscv\0\0" +#define COMPAT_UTS_MACHINE "riscv32\0\0" /* * Architecture specific compatibility types diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h index 34313387f9774c..8067e666a4ca68 100644 --- a/arch/riscv/include/asm/syscall.h +++ b/arch/riscv/include/asm/syscall.h @@ -20,7 +20,7 @@ extern void * const sys_call_table[]; extern void * const compat_sys_call_table[]; /* - * Only the low 32 bits of orig_r0 are meaningful, so we return int. + * Only the low 32 bits of orig_a0 are meaningful, so we return int. * This importantly ignores the high bits on 64-bit, so comparisons * sign-extend the low 32 bits. */ diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 5a956108b1eaf5..dbb067e345f05b 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -145,14 +145,14 @@ struct arch_ext_priv { long (*save)(struct pt_regs *regs, void __user *sc_vec); }; -struct arch_ext_priv arch_ext_list[] = { +static struct arch_ext_priv arch_ext_list[] = { { .magic = RISCV_V_MAGIC, .save = &save_v_state, }, }; -const size_t nr_arch_exts = ARRAY_SIZE(arch_ext_list); +static const size_t nr_arch_exts = ARRAY_SIZE(arch_ext_list); static long restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) @@ -297,7 +297,7 @@ static long setup_sigcontext(struct rt_sigframe __user *frame, } else { err |= __put_user(arch_ext->magic, &sc_ext_ptr->magic); err |= __put_user(ext_size, &sc_ext_ptr->size); - sc_ext_ptr = (void *)sc_ext_ptr + ext_size; + sc_ext_ptr = (void __user *)sc_ext_ptr + ext_size; } } /* Write zero to fp-reserved space and check it on restore_sigcontext */ diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c index 7cc8950005b6ec..4c7688670c2d8c 100644 --- a/arch/x86/kvm/irq.c +++ b/arch/x86/kvm/irq.c @@ -514,7 +514,8 @@ void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, */ spin_lock_irq(&kvm->irqfds.lock); - if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI) { + if (irqfd->irq_entry.type == KVM_IRQ_ROUTING_MSI || + WARN_ON_ONCE(irqfd->irq_bypass_vcpu)) { ret = kvm_pi_update_irte(irqfd, NULL); if (ret) pr_info("irq bypass consumer (eventfd %p) unregistration fails: %d\n", diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index 6b77b2033208f1..0f6c8596719b83 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -376,6 +376,7 @@ void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb) static int avic_init_backing_page(struct kvm_vcpu *vcpu) { + u32 max_id = x2avic_enabled ? x2avic_max_physical_id : AVIC_MAX_PHYSICAL_ID; struct kvm_svm *kvm_svm = to_kvm_svm(vcpu->kvm); struct vcpu_svm *svm = to_svm(vcpu); u32 id = vcpu->vcpu_id; @@ -388,8 +389,7 @@ static int avic_init_backing_page(struct kvm_vcpu *vcpu) * avic_vcpu_load() expects to be called if and only if the vCPU has * fully initialized AVIC. */ - if ((!x2avic_enabled && id > AVIC_MAX_PHYSICAL_ID) || - (id > x2avic_max_physical_id)) { + if (id > max_id) { kvm_set_apicv_inhibit(vcpu->kvm, APICV_INHIBIT_REASON_PHYSICAL_ID_TOO_BIG); vcpu->arch.apic->apicv_active = false; return 0; diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 24d59ccfa40d9a..4394be40fe78d7 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -5284,6 +5284,8 @@ static __init void svm_set_cpu_caps(void) */ kvm_cpu_cap_clear(X86_FEATURE_BUS_LOCK_DETECT); kvm_cpu_cap_clear(X86_FEATURE_MSR_IMM); + + kvm_setup_xss_caps(); } static __init int svm_hardware_setup(void) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 6b96f7aea20bd3..8c94241fbcca70 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -8051,6 +8051,8 @@ static __init void vmx_set_cpu_caps(void) kvm_cpu_cap_clear(X86_FEATURE_SHSTK); kvm_cpu_cap_clear(X86_FEATURE_IBT); } + + kvm_setup_xss_caps(); } static bool vmx_is_io_intercepted(struct kvm_vcpu *vcpu, diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 63afdb6bb078b2..72d37c8930ad78 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -9953,6 +9953,23 @@ static struct notifier_block pvclock_gtod_notifier = { }; #endif +void kvm_setup_xss_caps(void) +{ + if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) + kvm_caps.supported_xss = 0; + + if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) && + !kvm_cpu_cap_has(X86_FEATURE_IBT)) + kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL; + + if ((kvm_caps.supported_xss & XFEATURE_MASK_CET_ALL) != XFEATURE_MASK_CET_ALL) { + kvm_cpu_cap_clear(X86_FEATURE_SHSTK); + kvm_cpu_cap_clear(X86_FEATURE_IBT); + kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL; + } +} +EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_setup_xss_caps); + static inline void kvm_ops_update(struct kvm_x86_init_ops *ops) { memcpy(&kvm_x86_ops, ops->runtime_ops, sizeof(kvm_x86_ops)); @@ -10125,19 +10142,6 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops) if (!tdp_enabled) kvm_caps.supported_quirks &= ~KVM_X86_QUIRK_IGNORE_GUEST_PAT; - if (!kvm_cpu_cap_has(X86_FEATURE_XSAVES)) - kvm_caps.supported_xss = 0; - - if (!kvm_cpu_cap_has(X86_FEATURE_SHSTK) && - !kvm_cpu_cap_has(X86_FEATURE_IBT)) - kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL; - - if ((kvm_caps.supported_xss & XFEATURE_MASK_CET_ALL) != XFEATURE_MASK_CET_ALL) { - kvm_cpu_cap_clear(X86_FEATURE_SHSTK); - kvm_cpu_cap_clear(X86_FEATURE_IBT); - kvm_caps.supported_xss &= ~XFEATURE_MASK_CET_ALL; - } - if (kvm_caps.has_tsc_control) { /* * Make sure the user can only configure tsc_khz values that diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index fdab0ad490988e..00de24f55b1fe9 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -471,6 +471,8 @@ extern struct kvm_host_values kvm_host; extern bool enable_pmu; +void kvm_setup_xss_caps(void); + /* * Get a filtered version of KVM's supported XCR0 that strips out dynamic * features for which the current process doesn't (yet) have permission to use. diff --git a/drivers/block/rnbd/rnbd-clt.c b/drivers/block/rnbd/rnbd-clt.c index 8194a970f002ab..d1c354636315d2 100644 --- a/drivers/block/rnbd/rnbd-clt.c +++ b/drivers/block/rnbd/rnbd-clt.c @@ -1662,6 +1662,7 @@ static void destroy_sysfs(struct rnbd_clt_dev *dev, /* To avoid deadlock firstly remove itself */ sysfs_remove_file_self(&dev->kobj, sysfs_self); kobject_del(&dev->kobj); + kobject_put(&dev->kobj); } } diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index d0adae3267b41e..2b28515de92c40 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -685,6 +685,8 @@ static int hci_uart_register_dev(struct hci_uart *hu) return err; } + set_bit(HCI_UART_PROTO_INIT, &hu->flags); + if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) return 0; @@ -712,8 +714,6 @@ static int hci_uart_set_proto(struct hci_uart *hu, int id) hu->proto = p; - set_bit(HCI_UART_PROTO_INIT, &hu->flags); - err = hci_uart_register_dev(hu); if (err) { return err; diff --git a/drivers/bus/simple-pm-bus.c b/drivers/bus/simple-pm-bus.c index d8e029e7e53f72..3f00d953fb9a0e 100644 --- a/drivers/bus/simple-pm-bus.c +++ b/drivers/bus/simple-pm-bus.c @@ -142,6 +142,12 @@ static const struct of_device_id simple_pm_bus_of_match[] = { { .compatible = "simple-mfd", .data = ONLY_BUS }, { .compatible = "isa", .data = ONLY_BUS }, { .compatible = "arm,amba-bus", .data = ONLY_BUS }, + { .compatible = "fsl,ls1021a-scfg", }, + { .compatible = "fsl,ls1043a-scfg", }, + { .compatible = "fsl,ls1046a-scfg", }, + { .compatible = "fsl,ls1088a-isc", }, + { .compatible = "fsl,ls2080a-isc", }, + { .compatible = "fsl,lx2160a-isc", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, simple_pm_bus_of_match); diff --git a/drivers/cpufreq/qcom-cpufreq-nvmem.c b/drivers/cpufreq/qcom-cpufreq-nvmem.c index 81e16b5a0245ab..b8081acba928f5 100644 --- a/drivers/cpufreq/qcom-cpufreq-nvmem.c +++ b/drivers/cpufreq/qcom-cpufreq-nvmem.c @@ -263,6 +263,7 @@ static const struct of_device_id qcom_cpufreq_ipq806x_match_list[] __maybe_unuse { .compatible = "qcom,ipq8066", .data = (const void *)QCOM_ID_IPQ8066 }, { .compatible = "qcom,ipq8068", .data = (const void *)QCOM_ID_IPQ8068 }, { .compatible = "qcom,ipq8069", .data = (const void *)QCOM_ID_IPQ8069 }, + { /* sentinel */ } }; static int qcom_cpufreq_ipq8064_name_version(struct device *cpu_dev, diff --git a/drivers/crypto/ccp/sev-dev-tsm.c b/drivers/crypto/ccp/sev-dev-tsm.c index ea29cd5d0ff9fa..40d02adaf3f6da 100644 --- a/drivers/crypto/ccp/sev-dev-tsm.c +++ b/drivers/crypto/ccp/sev-dev-tsm.c @@ -19,12 +19,6 @@ MODULE_IMPORT_NS("PCI_IDE"); -#define TIO_DEFAULT_NR_IDE_STREAMS 1 - -static uint nr_ide_streams = TIO_DEFAULT_NR_IDE_STREAMS; -module_param_named(ide_nr, nr_ide_streams, uint, 0644); -MODULE_PARM_DESC(ide_nr, "Set the maximum number of IDE streams per PHB"); - #define dev_to_sp(dev) ((struct sp_device *)dev_get_drvdata(dev)) #define dev_to_psp(dev) ((struct psp_device *)(dev_to_sp(dev)->psp_data)) #define dev_to_sev(dev) ((struct sev_device *)(dev_to_psp(dev)->sev_data)) @@ -193,7 +187,6 @@ static void streams_teardown(struct pci_ide **ide) static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide, unsigned int tc) { - struct pci_dev *rp = pcie_find_root_port(pdev); struct pci_ide *ide1; if (ide[tc]) { @@ -201,17 +194,11 @@ static int stream_alloc(struct pci_dev *pdev, struct pci_ide **ide, return -EBUSY; } - /* FIXME: find a better way */ - if (nr_ide_streams != TIO_DEFAULT_NR_IDE_STREAMS) - pci_notice(pdev, "Enable non-default %d streams", nr_ide_streams); - pci_ide_set_nr_streams(to_pci_host_bridge(rp->bus->bridge), nr_ide_streams); - ide1 = pci_ide_stream_alloc(pdev); if (!ide1) return -EFAULT; - /* Blindly assign streamid=0 to TC=0, and so on */ - ide1->stream_id = tc; + ide1->stream_id = ide1->host_bridge_stream; ide[tc] = ide1; diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c index 7fea11a5e359ca..22ae387ae03ca6 100644 --- a/drivers/firewire/core-transaction.c +++ b/drivers/firewire/core-transaction.c @@ -173,20 +173,14 @@ static void split_transaction_timeout_callback(struct timer_list *timer) } } -static void start_split_transaction_timeout(struct fw_transaction *t, - struct fw_card *card) +// card->transactions.lock should be acquired in advance for the linked list. +static void start_split_transaction_timeout(struct fw_transaction *t, unsigned int delta) { - unsigned long delta; - if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) return; t->is_split_transaction = true; - // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for - // local destination never runs in any type of IRQ context. - scoped_guard(spinlock_irqsave, &card->split_timeout.lock) - delta = card->split_timeout.jiffies; mod_timer(&t->split_timeout_timer, jiffies + delta); } @@ -207,13 +201,20 @@ static void transmit_complete_callback(struct fw_packet *packet, break; case ACK_PENDING: { + unsigned int delta; + // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for // local destination never runs in any type of IRQ context. scoped_guard(spinlock_irqsave, &card->split_timeout.lock) { t->split_timeout_cycle = compute_split_timeout_timestamp(card, packet->timestamp) & 0xffff; + delta = card->split_timeout.jiffies; } - start_split_transaction_timeout(t, card); + + // NOTE: This can be without irqsave when we can guarantee that __fw_send_request() for + // local destination never runs in any type of IRQ context. + scoped_guard(spinlock_irqsave, &card->transactions.lock) + start_split_transaction_timeout(t, delta); break; } case ACK_BUSY_X: diff --git a/drivers/gpio/gpio-brcmstb.c b/drivers/gpio/gpio-brcmstb.c index af9287ff5dc43e..2352d099709c2b 100644 --- a/drivers/gpio/gpio-brcmstb.c +++ b/drivers/gpio/gpio-brcmstb.c @@ -301,12 +301,10 @@ static struct brcmstb_gpio_bank *brcmstb_gpio_hwirq_to_bank( struct brcmstb_gpio_priv *priv, irq_hw_number_t hwirq) { struct brcmstb_gpio_bank *bank; - int i = 0; - /* banks are in descending order */ - list_for_each_entry_reverse(bank, &priv->bank_list, node) { - i += bank->chip.gc.ngpio; - if (hwirq < i) + list_for_each_entry(bank, &priv->bank_list, node) { + if (hwirq >= bank->chip.gc.offset && + hwirq < (bank->chip.gc.offset + bank->chip.gc.ngpio)) return bank; } return NULL; diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index e136e81794dfb3..e39723b5901bac 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -799,10 +799,13 @@ static struct platform_device omap_mpuio_device = { static inline void omap_mpuio_init(struct gpio_bank *bank) { - platform_set_drvdata(&omap_mpuio_device, bank); + static bool registered; - if (platform_driver_register(&omap_mpuio_driver) == 0) - (void) platform_device_register(&omap_mpuio_device); + platform_set_drvdata(&omap_mpuio_device, bank); + if (!registered) { + (void)platform_device_register(&omap_mpuio_device); + registered = true; + } } /*---------------------------------------------------------------------*/ @@ -1575,13 +1578,24 @@ static struct platform_driver omap_gpio_driver = { */ static int __init omap_gpio_drv_reg(void) { - return platform_driver_register(&omap_gpio_driver); + int ret; + + ret = platform_driver_register(&omap_mpuio_driver); + if (ret) + return ret; + + ret = platform_driver_register(&omap_gpio_driver); + if (ret) + platform_driver_unregister(&omap_mpuio_driver); + + return ret; } postcore_initcall(omap_gpio_drv_reg); static void __exit omap_gpio_exit(void) { platform_driver_unregister(&omap_gpio_driver); + platform_driver_unregister(&omap_mpuio_driver); } module_exit(omap_gpio_exit); diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index 8727ae54bc578b..f93a3dbb2daaf3 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -914,6 +914,8 @@ static void pca953x_irq_shutdown(struct irq_data *d) clear_bit(hwirq, chip->irq_trig_fall); clear_bit(hwirq, chip->irq_trig_level_low); clear_bit(hwirq, chip->irq_trig_level_high); + + pca953x_irq_mask(d); } static void pca953x_irq_print_chip(struct irq_data *data, struct seq_file *p) diff --git a/drivers/gpio/gpio-rockchip.c b/drivers/gpio/gpio-rockchip.c index bae2061f15fc47..0fff4a699f12d1 100644 --- a/drivers/gpio/gpio-rockchip.c +++ b/drivers/gpio/gpio-rockchip.c @@ -18,7 +18,6 @@ #include #include #include -#include #include #include #include @@ -164,12 +163,6 @@ static int rockchip_gpio_set_direction(struct gpio_chip *chip, unsigned long flags; u32 data = input ? 0 : 1; - - if (input) - pinctrl_gpio_direction_input(chip, offset); - else - pinctrl_gpio_direction_output(chip, offset); - raw_spin_lock_irqsave(&bank->slock, flags); rockchip_gpio_writel_bit(bank, offset, data, bank->gpio_regs->port_ddr); raw_spin_unlock_irqrestore(&bank->slock, flags); @@ -593,7 +586,6 @@ static int rockchip_gpiolib_register(struct rockchip_pin_bank *bank) gc->ngpio = bank->nr_pins; gc->label = bank->name; gc->parent = bank->dev; - gc->can_sleep = true; ret = gpiochip_add_data(gc, bank); if (ret) { diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c index 413bcd0a424050..2cc8abe705cdb7 100644 --- a/drivers/gpio/gpio-sprd.c +++ b/drivers/gpio/gpio-sprd.c @@ -35,7 +35,7 @@ struct sprd_gpio { struct gpio_chip chip; void __iomem *base; - spinlock_t lock; + raw_spinlock_t lock; int irq; }; @@ -54,7 +54,7 @@ static void sprd_gpio_update(struct gpio_chip *chip, unsigned int offset, unsigned long flags; u32 tmp; - spin_lock_irqsave(&sprd_gpio->lock, flags); + raw_spin_lock_irqsave(&sprd_gpio->lock, flags); tmp = readl_relaxed(base + reg); if (val) @@ -63,7 +63,7 @@ static void sprd_gpio_update(struct gpio_chip *chip, unsigned int offset, tmp &= ~BIT(SPRD_GPIO_BIT(offset)); writel_relaxed(tmp, base + reg); - spin_unlock_irqrestore(&sprd_gpio->lock, flags); + raw_spin_unlock_irqrestore(&sprd_gpio->lock, flags); } static int sprd_gpio_read(struct gpio_chip *chip, unsigned int offset, u16 reg) @@ -236,7 +236,7 @@ static int sprd_gpio_probe(struct platform_device *pdev) if (IS_ERR(sprd_gpio->base)) return PTR_ERR(sprd_gpio->base); - spin_lock_init(&sprd_gpio->lock); + raw_spin_lock_init(&sprd_gpio->lock); sprd_gpio->chip.label = dev_name(&pdev->dev); sprd_gpio->chip.ngpio = SPRD_GPIO_NR; diff --git a/drivers/gpio/gpio-virtuser.c b/drivers/gpio/gpio-virtuser.c index 37f2ce20f1ae72..098e67d70ffa56 100644 --- a/drivers/gpio/gpio-virtuser.c +++ b/drivers/gpio/gpio-virtuser.c @@ -1682,10 +1682,10 @@ static void gpio_virtuser_device_config_group_release(struct config_item *item) { struct gpio_virtuser_device *dev = to_gpio_virtuser_device(item); - guard(mutex)(&dev->lock); - - if (gpio_virtuser_device_is_live(dev)) - gpio_virtuser_device_deactivate(dev); + scoped_guard(mutex, &dev->lock) { + if (gpio_virtuser_device_is_live(dev)) + gpio_virtuser_device_deactivate(dev); + } mutex_destroy(&dev->lock); ida_free(&gpio_virtuser_ida, dev->id); diff --git a/drivers/gpio/gpiolib-acpi-core.c b/drivers/gpio/gpiolib-acpi-core.c index 83dd227dbbecc7..9627b3a9c7f3d9 100644 --- a/drivers/gpio/gpiolib-acpi-core.c +++ b/drivers/gpio/gpiolib-acpi-core.c @@ -1104,6 +1104,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, unsigned int pin = agpio->pin_table[i]; struct acpi_gpio_connection *conn; struct gpio_desc *desc; + u16 word, shift; bool found; mutex_lock(&achip->conn_lock); @@ -1158,10 +1159,22 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address, mutex_unlock(&achip->conn_lock); - if (function == ACPI_WRITE) - gpiod_set_raw_value_cansleep(desc, !!(*value & BIT(i))); - else - *value |= (u64)gpiod_get_raw_value_cansleep(desc) << i; + /* + * For the cases when OperationRegion() consists of more than + * 64 bits calculate the word and bit shift to use that one to + * access the value. + */ + word = i / 64; + shift = i % 64; + + if (function == ACPI_WRITE) { + gpiod_set_raw_value_cansleep(desc, value[word] & BIT_ULL(shift)); + } else { + if (gpiod_get_raw_value_cansleep(desc)) + value[word] |= BIT_ULL(shift); + else + value[word] &= ~BIT_ULL(shift); + } } out: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 7e623f91f2d7ca..d9c7ad297293bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -498,8 +498,13 @@ void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, if (adev->irq.retry_cam_enabled) return; + else if (adev->irq.ih1.ring_size) + ih = &adev->irq.ih1; + else if (adev->irq.ih_soft.enabled) + ih = &adev->irq.ih_soft; + else + return; - ih = &adev->irq.ih1; /* Get the WPTR of the last entry in IH ring */ last_wptr = amdgpu_ih_get_wptr(adev, ih); /* Order wptr with ring data. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index 72ec455fa932ca..44f230d67da242 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -235,7 +235,7 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, amdgpu_ring_ib_begin(ring); - if (ring->funcs->emit_gfx_shadow) + if (ring->funcs->emit_gfx_shadow && adev->gfx.cp_gfx_shadow) amdgpu_ring_emit_gfx_shadow(ring, shadow_va, csa_va, gds_va, init_shadow, vmid); @@ -291,7 +291,8 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned int num_ibs, fence_flags | AMDGPU_FENCE_FLAG_64BIT); } - if (ring->funcs->emit_gfx_shadow && ring->funcs->init_cond_exec) { + if (ring->funcs->emit_gfx_shadow && ring->funcs->init_cond_exec && + adev->gfx.cp_gfx_shadow) { amdgpu_ring_emit_gfx_shadow(ring, 0, 0, 0, false, 0); amdgpu_ring_init_cond_exec(ring, ring->cond_exe_gpu_addr); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index d75b9940f24874..fc65fb36e115ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -6879,7 +6879,7 @@ static int gfx_v10_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; - *ring->wptr_cpu_addr = 0; + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); amdgpu_ring_clear_ring(ring); } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c index 8a2ee2de390f3f..e642236ea2c511 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c @@ -4201,7 +4201,7 @@ static int gfx_v11_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; - *ring->wptr_cpu_addr = 0; + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); amdgpu_ring_clear_ring(ring); } @@ -6823,11 +6823,12 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; + bool use_mmio = false; int r; amdgpu_ring_reset_helper_begin(ring, timedout_fence); - r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); + r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, use_mmio); if (r) { dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r); @@ -6836,16 +6837,18 @@ static int gfx_v11_0_reset_kgq(struct amdgpu_ring *ring, return r; } - r = gfx_v11_0_kgq_init_queue(ring, true); - if (r) { - dev_err(adev->dev, "failed to init kgq\n"); - return r; - } + if (use_mmio) { + r = gfx_v11_0_kgq_init_queue(ring, true); + if (r) { + dev_err(adev->dev, "failed to init kgq\n"); + return r; + } - r = amdgpu_mes_map_legacy_queue(adev, ring); - if (r) { - dev_err(adev->dev, "failed to remap kgq\n"); - return r; + r = amdgpu_mes_map_legacy_queue(adev, ring); + if (r) { + dev_err(adev->dev, "failed to remap kgq\n"); + return r; + } } return amdgpu_ring_reset_helper_end(ring, timedout_fence); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c index b786967022d29d..4aab89a9ab4011 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v12_0.c @@ -3079,7 +3079,7 @@ static int gfx_v12_0_kgq_init_queue(struct amdgpu_ring *ring, bool reset) memcpy_toio(mqd, adev->gfx.me.mqd_backup[mqd_idx], sizeof(*mqd)); /* reset the ring */ ring->wptr = 0; - *ring->wptr_cpu_addr = 0; + atomic64_set((atomic64_t *)ring->wptr_cpu_addr, 0); amdgpu_ring_clear_ring(ring); } @@ -5297,11 +5297,12 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, struct amdgpu_fence *timedout_fence) { struct amdgpu_device *adev = ring->adev; + bool use_mmio = false; int r; amdgpu_ring_reset_helper_begin(ring, timedout_fence); - r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, false); + r = amdgpu_mes_reset_legacy_queue(ring->adev, ring, vmid, use_mmio); if (r) { dev_warn(adev->dev, "reset via MES failed and try pipe reset %d\n", r); r = gfx_v12_reset_gfx_pipe(ring); @@ -5309,16 +5310,18 @@ static int gfx_v12_0_reset_kgq(struct amdgpu_ring *ring, return r; } - r = gfx_v12_0_kgq_init_queue(ring, true); - if (r) { - dev_err(adev->dev, "failed to init kgq\n"); - return r; - } + if (use_mmio) { + r = gfx_v12_0_kgq_init_queue(ring, true); + if (r) { + dev_err(adev->dev, "failed to init kgq\n"); + return r; + } - r = amdgpu_mes_map_legacy_queue(adev, ring); - if (r) { - dev_err(adev->dev, "failed to remap kgq\n"); - return r; + r = amdgpu_mes_map_legacy_queue(adev, ring); + if (r) { + dev_err(adev->dev, "failed to remap kgq\n"); + return r; + } } return amdgpu_ring_reset_helper_end(ring, timedout_fence); diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index ad36c96478a826..25536d89635d35 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -225,7 +225,13 @@ static u32 soc21_get_config_memsize(struct amdgpu_device *adev) static u32 soc21_get_xclk(struct amdgpu_device *adev) { - return adev->clock.spll.reference_freq; + u32 reference_clock = adev->clock.spll.reference_freq; + + /* reference clock is actually 99.81 Mhz rather than 100 Mhz */ + if ((adev->flags & AMD_IS_APU) && reference_clock == 10000) + return 9981; + + return reference_clock; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index af53e796ea1baa..6ada7b4af7c685 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -217,7 +217,7 @@ svm_migrate_get_vram_page(struct svm_range *prange, unsigned long pfn) page = pfn_to_page(pfn); svm_range_bo_ref(prange->svm_bo); page->zone_device_data = prange->svm_bo; - zone_device_page_init(page, 0); + zone_device_page_init(page, page_pgmap(page), 0); } static void diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1ea5a250440f60..a8a59126b2d2b0 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7754,10 +7754,12 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector) drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); /* Cancel and flush any pending HDMI HPD debounce work */ - cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work); - if (aconnector->hdmi_prev_sink) { - dc_sink_release(aconnector->hdmi_prev_sink); - aconnector->hdmi_prev_sink = NULL; + if (aconnector->hdmi_hpd_debounce_delay_ms) { + cancel_delayed_work_sync(&aconnector->hdmi_hpd_debounce_work); + if (aconnector->hdmi_prev_sink) { + dc_sink_release(aconnector->hdmi_prev_sink); + aconnector->hdmi_prev_sink = NULL; + } } if (aconnector->bl_idx != -1) { diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 79b174e5326da6..302af1fb6901e7 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -80,15 +80,15 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, enum ip_power_state pwr_state = gate ? POWER_STATE_OFF : POWER_STATE_ON; bool is_vcn = block_type == AMD_IP_BLOCK_TYPE_VCN; + mutex_lock(&adev->pm.mutex); + if (atomic_read(&adev->pm.pwr_state[block_type]) == pwr_state && (!is_vcn || adev->vcn.num_vcn_inst == 1)) { dev_dbg(adev->dev, "IP block%d already in the target %s state!", block_type, gate ? "gate" : "ungate"); - return 0; + goto out_unlock; } - mutex_lock(&adev->pm.mutex); - switch (block_type) { case AMD_IP_BLOCK_TYPE_UVD: case AMD_IP_BLOCK_TYPE_VCE: @@ -115,6 +115,7 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, if (!ret) atomic_set(&adev->pm.pwr_state[block_type], pwr_state); +out_unlock: mutex_unlock(&adev->pm.mutex); return ret; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 4263798d716b76..8e592a477c33a1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -56,6 +56,7 @@ #define SMUQ10_TO_UINT(x) ((x) >> 10) #define SMUQ10_FRAC(x) ((x) & 0x3ff) #define SMUQ10_ROUND(x) ((SMUQ10_TO_UINT(x)) + ((SMUQ10_FRAC(x)) >= 0x200)) +#define SMU_V13_SOFT_FREQ_ROUND(x) ((x) + 1) extern const int pmfw_decoded_link_speed[5]; extern const int pmfw_decoded_link_width[7]; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h index 29a4583db8734b..0b1e6f25e61129 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v14_0.h @@ -57,6 +57,7 @@ extern const int decoded_link_width[8]; #define DECODE_GEN_SPEED(gen_speed_idx) (decoded_link_speed[gen_speed_idx]) #define DECODE_LANE_WIDTH(lane_width_idx) (decoded_link_width[lane_width_idx]) +#define SMU_V14_SOFT_FREQ_ROUND(x) ((x) + 1) struct smu_14_0_max_sustainable_clocks { uint32_t display_clock; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index a89075e257174a..2efd914d81e5b8 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -1555,6 +1555,7 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { + max = SMU_V13_SOFT_FREQ_ROUND(max); if (automatic) param = (uint32_t)((clk_id << 16) | 0xffff); else diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c index f2a16dfee59981..06a81533759cda 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0.c @@ -1178,6 +1178,7 @@ int smu_v14_0_set_soft_freq_limited_range(struct smu_context *smu, return clk_id; if (max > 0) { + max = SMU_V14_SOFT_FREQ_ROUND(max); if (automatic) param = (uint32_t)((clk_id << 16) | 0xffff); else diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index e4df434273945e..25f68fed9b4823 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -960,16 +960,21 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data, { struct drm_gem_change_handle *args = data; struct drm_gem_object *obj; - int ret; + int handle, ret; if (!drm_core_check_feature(dev, DRIVER_GEM)) return -EOPNOTSUPP; + /* idr_alloc() limitation. */ + if (args->new_handle > INT_MAX) + return -EINVAL; + handle = args->new_handle; + obj = drm_gem_object_lookup(file_priv, args->handle); if (!obj) return -ENOENT; - if (args->handle == args->new_handle) { + if (args->handle == handle) { ret = 0; goto out; } @@ -977,18 +982,19 @@ int drm_gem_change_handle_ioctl(struct drm_device *dev, void *data, mutex_lock(&file_priv->prime.lock); spin_lock(&file_priv->table_lock); - ret = idr_alloc(&file_priv->object_idr, obj, - args->new_handle, args->new_handle + 1, GFP_NOWAIT); + ret = idr_alloc(&file_priv->object_idr, obj, handle, handle + 1, + GFP_NOWAIT); spin_unlock(&file_priv->table_lock); if (ret < 0) goto out_unlock; if (obj->dma_buf) { - ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf, args->new_handle); + ret = drm_prime_add_buf_handle(&file_priv->prime, obj->dma_buf, + handle); if (ret < 0) { spin_lock(&file_priv->table_lock); - idr_remove(&file_priv->object_idr, args->new_handle); + idr_remove(&file_priv->object_idr, handle); spin_unlock(&file_priv->table_lock); goto out_unlock; } diff --git a/drivers/gpu/drm/drm_pagemap.c b/drivers/gpu/drm/drm_pagemap.c index 06c1bd8fc4d170..704f2f94501909 100644 --- a/drivers/gpu/drm/drm_pagemap.c +++ b/drivers/gpu/drm/drm_pagemap.c @@ -197,7 +197,7 @@ static void drm_pagemap_get_devmem_page(struct page *page, struct drm_pagemap_zdd *zdd) { page->zone_device_data = drm_pagemap_zdd_get(zdd); - zone_device_page_init(page, 0); + zone_device_page_init(page, page_pgmap(page), 0); } /** diff --git a/drivers/gpu/drm/imx/ipuv3/imx-tve.c b/drivers/gpu/drm/imx/ipuv3/imx-tve.c index c5c6e070cc063d..e861b8b9d8fa81 100644 --- a/drivers/gpu/drm/imx/ipuv3/imx-tve.c +++ b/drivers/gpu/drm/imx/ipuv3/imx-tve.c @@ -528,6 +528,13 @@ static const struct component_ops imx_tve_ops = { .bind = imx_tve_bind, }; +static void imx_tve_put_device(void *_dev) +{ + struct device *dev = _dev; + + put_device(dev); +} + static int imx_tve_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -549,6 +556,12 @@ static int imx_tve_probe(struct platform_device *pdev) if (ddc_node) { tve->ddc = of_find_i2c_adapter_by_node(ddc_node); of_node_put(ddc_node); + if (tve->ddc) { + ret = devm_add_action_or_reset(dev, imx_tve_put_device, + &tve->ddc->dev); + if (ret) + return ret; + } } tve->mode = of_get_tve_mode(np); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c index ac9a95aab2fb56..4c042133261c96 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c @@ -501,8 +501,6 @@ static const struct adreno_reglist a690_hwcg[] = { {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222}, {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111}, {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}, - {REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, 0x10111}, - {REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, 0x5555}, {} }; diff --git a/drivers/gpu/drm/nouveau/nouveau_dmem.c b/drivers/gpu/drm/nouveau/nouveau_dmem.c index 58071652679d8a..3d8031296eed64 100644 --- a/drivers/gpu/drm/nouveau/nouveau_dmem.c +++ b/drivers/gpu/drm/nouveau/nouveau_dmem.c @@ -425,7 +425,7 @@ nouveau_dmem_page_alloc_locked(struct nouveau_drm *drm, bool is_large) order = ilog2(DMEM_CHUNK_NPAGES); } - zone_device_folio_init(folio, order); + zone_device_folio_init(folio, page_pgmap(folio_page(folio, 0)), order); return page; } diff --git a/drivers/gpu/drm/tyr/Kconfig b/drivers/gpu/drm/tyr/Kconfig index 4b55308fd2eb4a..e933e647802722 100644 --- a/drivers/gpu/drm/tyr/Kconfig +++ b/drivers/gpu/drm/tyr/Kconfig @@ -6,6 +6,7 @@ config DRM_TYR depends on RUST depends on ARM || ARM64 || COMPILE_TEST depends on !GENERIC_ATOMIC64 # for IOMMU_IO_PGTABLE_LPAE + depends on COMMON_CLK default n help Rust DRM driver for ARM Mali CSF-based GPUs. diff --git a/drivers/gpu/drm/xe/xe_configfs.c b/drivers/gpu/drm/xe/xe_configfs.c index 9f6251b1008bac..82edd046600557 100644 --- a/drivers/gpu/drm/xe/xe_configfs.c +++ b/drivers/gpu/drm/xe/xe_configfs.c @@ -347,11 +347,10 @@ static bool is_bound(struct xe_config_group_device *dev) return false; ret = pci_get_drvdata(pdev); - pci_dev_put(pdev); - if (ret) pci_dbg(pdev, "Already bound to driver\n"); + pci_dev_put(pdev); return ret; } diff --git a/drivers/gpu/drm/xe/xe_device.c b/drivers/gpu/drm/xe/xe_device.c index cf29e259861f95..9a6d49fcd8e49b 100644 --- a/drivers/gpu/drm/xe/xe_device.c +++ b/drivers/gpu/drm/xe/xe_device.c @@ -984,8 +984,6 @@ void xe_device_remove(struct xe_device *xe) { xe_display_unregister(xe); - xe_nvm_fini(xe); - drm_dev_unplug(&xe->drm); xe_bo_pci_dev_remove_all(xe); diff --git a/drivers/gpu/drm/xe/xe_exec.c b/drivers/gpu/drm/xe/xe_exec.c index fd948003175069..8e3614b240107a 100644 --- a/drivers/gpu/drm/xe/xe_exec.c +++ b/drivers/gpu/drm/xe/xe_exec.c @@ -190,9 +190,9 @@ int xe_exec_ioctl(struct drm_device *dev, void *data, struct drm_file *file) goto err_syncs; } - if (xe_exec_queue_is_parallel(q)) { - err = copy_from_user(addresses, addresses_user, sizeof(u64) * - q->width); + if (args->num_batch_buffer && xe_exec_queue_is_parallel(q)) { + err = copy_from_user(addresses, addresses_user, + sizeof(u64) * q->width); if (err) { err = -EFAULT; goto err_syncs; diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c index 281286f2b5f989..b8c1dd953665ba 100644 --- a/drivers/gpu/drm/xe/xe_lrc.c +++ b/drivers/gpu/drm/xe/xe_lrc.c @@ -1185,7 +1185,7 @@ static ssize_t setup_invalidate_state_cache_wa(struct xe_lrc *lrc, return -ENOSPC; *cmd++ = MI_LOAD_REGISTER_IMM | MI_LRI_NUM_REGS(1); - *cmd++ = CS_DEBUG_MODE1(0).addr; + *cmd++ = CS_DEBUG_MODE2(0).addr; *cmd++ = _MASKED_BIT_ENABLE(INSTRUCTION_STATE_CACHE_INVALIDATE); return cmd - batch; diff --git a/drivers/gpu/drm/xe/xe_nvm.c b/drivers/gpu/drm/xe/xe_nvm.c index 33f4ac82fc80ab..6da42b2b5e4673 100644 --- a/drivers/gpu/drm/xe/xe_nvm.c +++ b/drivers/gpu/drm/xe/xe_nvm.c @@ -83,6 +83,27 @@ static bool xe_nvm_writable_override(struct xe_device *xe) return writable_override; } +static void xe_nvm_fini(void *arg) +{ + struct xe_device *xe = arg; + struct intel_dg_nvm_dev *nvm = xe->nvm; + + if (!xe->info.has_gsc_nvm) + return; + + /* No access to internal NVM from VFs */ + if (IS_SRIOV_VF(xe)) + return; + + /* Nvm pointer should not be NULL here */ + if (WARN_ON(!nvm)) + return; + + auxiliary_device_delete(&nvm->aux_dev); + auxiliary_device_uninit(&nvm->aux_dev); + xe->nvm = NULL; +} + int xe_nvm_init(struct xe_device *xe) { struct pci_dev *pdev = to_pci_dev(xe->drm.dev); @@ -132,39 +153,17 @@ int xe_nvm_init(struct xe_device *xe) ret = auxiliary_device_init(aux_dev); if (ret) { drm_err(&xe->drm, "xe-nvm aux init failed %d\n", ret); - goto err; + kfree(nvm); + xe->nvm = NULL; + return ret; } ret = auxiliary_device_add(aux_dev); if (ret) { drm_err(&xe->drm, "xe-nvm aux add failed %d\n", ret); auxiliary_device_uninit(aux_dev); - goto err; + xe->nvm = NULL; + return ret; } - return 0; - -err: - kfree(nvm); - xe->nvm = NULL; - return ret; -} - -void xe_nvm_fini(struct xe_device *xe) -{ - struct intel_dg_nvm_dev *nvm = xe->nvm; - - if (!xe->info.has_gsc_nvm) - return; - - /* No access to internal NVM from VFs */ - if (IS_SRIOV_VF(xe)) - return; - - /* Nvm pointer should not be NULL here */ - if (WARN_ON(!nvm)) - return; - - auxiliary_device_delete(&nvm->aux_dev); - auxiliary_device_uninit(&nvm->aux_dev); - xe->nvm = NULL; + return devm_add_action_or_reset(xe->drm.dev, xe_nvm_fini, xe); } diff --git a/drivers/gpu/drm/xe/xe_nvm.h b/drivers/gpu/drm/xe/xe_nvm.h index 7f3d5f57bed088..fd3467ad35a4d9 100644 --- a/drivers/gpu/drm/xe/xe_nvm.h +++ b/drivers/gpu/drm/xe/xe_nvm.h @@ -10,6 +10,4 @@ struct xe_device; int xe_nvm_init(struct xe_device *xe); -void xe_nvm_fini(struct xe_device *xe); - #endif diff --git a/drivers/gpu/drm/xe/xe_pci.c b/drivers/gpu/drm/xe/xe_pci.c index 9c9ea10d994c4b..2aa883f5ef797e 100644 --- a/drivers/gpu/drm/xe/xe_pci.c +++ b/drivers/gpu/drm/xe/xe_pci.c @@ -342,7 +342,6 @@ static const struct xe_device_desc lnl_desc = { .has_display = true, .has_flat_ccs = 1, .has_pxp = true, - .has_mem_copy_instr = true, .max_gt_per_tile = 2, .needs_scratch = true, .va_bits = 48, @@ -363,7 +362,6 @@ static const struct xe_device_desc bmg_desc = { .has_heci_cscfi = 1, .has_late_bind = true, .has_sriov = true, - .has_mem_copy_instr = true, .max_gt_per_tile = 2, .needs_scratch = true, .subplatforms = (const struct xe_subplatform_desc[]) { @@ -380,7 +378,6 @@ static const struct xe_device_desc ptl_desc = { .has_display = true, .has_flat_ccs = 1, .has_sriov = true, - .has_mem_copy_instr = true, .max_gt_per_tile = 2, .needs_scratch = true, .needs_shared_vf_gt_wq = true, @@ -393,7 +390,6 @@ static const struct xe_device_desc nvls_desc = { .dma_mask_size = 46, .has_display = true, .has_flat_ccs = 1, - .has_mem_copy_instr = true, .max_gt_per_tile = 2, .require_force_probe = true, .va_bits = 48, @@ -675,7 +671,6 @@ static int xe_info_init_early(struct xe_device *xe, xe->info.has_pxp = desc->has_pxp; xe->info.has_sriov = xe_configfs_primary_gt_allowed(to_pci_dev(xe->drm.dev)) && desc->has_sriov; - xe->info.has_mem_copy_instr = desc->has_mem_copy_instr; xe->info.skip_guc_pc = desc->skip_guc_pc; xe->info.skip_mtcfg = desc->skip_mtcfg; xe->info.skip_pcode = desc->skip_pcode; @@ -864,6 +859,7 @@ static int xe_info_init(struct xe_device *xe, xe->info.has_range_tlb_inval = graphics_desc->has_range_tlb_inval; xe->info.has_usm = graphics_desc->has_usm; xe->info.has_64bit_timestamp = graphics_desc->has_64bit_timestamp; + xe->info.has_mem_copy_instr = GRAPHICS_VER(xe) >= 20; xe_info_probe_tile_count(xe); diff --git a/drivers/gpu/drm/xe/xe_pci_types.h b/drivers/gpu/drm/xe/xe_pci_types.h index 9892c063a9c543..a4451bdc79fb35 100644 --- a/drivers/gpu/drm/xe/xe_pci_types.h +++ b/drivers/gpu/drm/xe/xe_pci_types.h @@ -46,7 +46,6 @@ struct xe_device_desc { u8 has_late_bind:1; u8 has_llc:1; u8 has_mbx_power_limits:1; - u8 has_mem_copy_instr:1; u8 has_pxp:1; u8 has_sriov:1; u8 needs_scratch:1; diff --git a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c index 378104cd395e59..04cc7a9036e431 100644 --- a/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c +++ b/drivers/iommu/arm/arm-smmu-v3/tegra241-cmdqv.c @@ -1078,6 +1078,9 @@ static int tegra241_vcmdq_hw_init_user(struct tegra241_vcmdq *vcmdq) { char header[64]; + /* Reset VCMDQ */ + tegra241_vcmdq_hw_deinit(vcmdq); + /* Configure the vcmdq only; User space does the enabling */ writeq_relaxed(vcmdq->cmdq.q.q_base, REG_VCMDQ_PAGE1(vcmdq, BASE)); diff --git a/drivers/iommu/generic_pt/iommu_pt.h b/drivers/iommu/generic_pt/iommu_pt.h index 52ef028ed2db97..d575f3ba9d3416 100644 --- a/drivers/iommu/generic_pt/iommu_pt.h +++ b/drivers/iommu/generic_pt/iommu_pt.h @@ -931,6 +931,8 @@ static __maybe_unused int __unmap_range(struct pt_range *range, void *arg, struct pt_table_p *table) { struct pt_state pts = pt_init(range, level, table); + unsigned int flush_start_index = UINT_MAX; + unsigned int flush_end_index = UINT_MAX; struct pt_unmap_args *unmap = arg; unsigned int num_oas = 0; unsigned int start_index; @@ -986,6 +988,9 @@ static __maybe_unused int __unmap_range(struct pt_range *range, void *arg, iommu_pages_list_add(&unmap->free_list, pts.table_lower); pt_clear_entries(&pts, ilog2(1)); + if (pts.index < flush_start_index) + flush_start_index = pts.index; + flush_end_index = pts.index + 1; } pts.index++; } else { @@ -999,7 +1004,10 @@ static __maybe_unused int __unmap_range(struct pt_range *range, void *arg, num_contig_lg2 = pt_entry_num_contig_lg2(&pts); pt_clear_entries(&pts, num_contig_lg2); num_oas += log2_to_int(num_contig_lg2); + if (pts.index < flush_start_index) + flush_start_index = pts.index; pts.index += log2_to_int(num_contig_lg2); + flush_end_index = pts.index; } if (pts.index >= pts.end_index) break; @@ -1007,7 +1015,8 @@ static __maybe_unused int __unmap_range(struct pt_range *range, void *arg, } while (true); unmap->unmapped += log2_mul(num_oas, pt_table_item_lg2sz(&pts)); - flush_writes_range(&pts, start_index, pts.index); + if (flush_start_index != flush_end_index) + flush_writes_range(&pts, flush_start_index, flush_end_index); return ret; } diff --git a/drivers/iommu/iommufd/pages.c b/drivers/iommu/iommufd/pages.c index dbe51ecb9a20f8..f606148920fa72 100644 --- a/drivers/iommu/iommufd/pages.c +++ b/drivers/iommu/iommufd/pages.c @@ -289,6 +289,7 @@ static void batch_clear(struct pfn_batch *batch) batch->end = 0; batch->pfns[0] = 0; batch->npfns[0] = 0; + batch->kind = 0; } /* diff --git a/drivers/irqchip/irq-ls-extirq.c b/drivers/irqchip/irq-ls-extirq.c index 50a7b38381b986..96f9c20621cf54 100644 --- a/drivers/irqchip/irq-ls-extirq.c +++ b/drivers/irqchip/irq-ls-extirq.c @@ -168,40 +168,34 @@ ls_extirq_parse_map(struct ls_extirq_data *priv, struct device_node *node) return 0; } -static int __init -ls_extirq_of_init(struct device_node *node, struct device_node *parent) +static int ls_extirq_probe(struct platform_device *pdev) { struct irq_domain *domain, *parent_domain; + struct device_node *node, *parent; + struct device *dev = &pdev->dev; struct ls_extirq_data *priv; int ret; + node = dev->of_node; + parent = of_irq_find_parent(node); + if (!parent) + return dev_err_probe(dev, -ENODEV, "Failed to get IRQ parent node\n"); + parent_domain = irq_find_host(parent); - if (!parent_domain) { - pr_err("Cannot find parent domain\n"); - ret = -ENODEV; - goto err_irq_find_host; - } + if (!parent_domain) + return dev_err_probe(dev, -EPROBE_DEFER, "Cannot find parent domain\n"); - priv = kzalloc(sizeof(*priv), GFP_KERNEL); - if (!priv) { - ret = -ENOMEM; - goto err_alloc_priv; - } + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return dev_err_probe(dev, -ENOMEM, "Failed to allocate memory\n"); - /* - * All extirq OF nodes are under a scfg/syscon node with - * the 'ranges' property - */ - priv->intpcr = of_iomap(node, 0); - if (!priv->intpcr) { - pr_err("Cannot ioremap OF node %pOF\n", node); - ret = -ENOMEM; - goto err_iomap; - } + priv->intpcr = devm_of_iomap(dev, node, 0, NULL); + if (!priv->intpcr) + return dev_err_probe(dev, -ENOMEM, "Cannot ioremap OF node %pOF\n", node); ret = ls_extirq_parse_map(priv, node); if (ret) - goto err_parse_map; + return dev_err_probe(dev, ret, "Failed to parse IRQ map\n"); priv->big_endian = of_device_is_big_endian(node->parent); priv->is_ls1021a_or_ls1043a = of_device_is_compatible(node, "fsl,ls1021a-extirq") || @@ -210,23 +204,26 @@ ls_extirq_of_init(struct device_node *node, struct device_node *parent) domain = irq_domain_create_hierarchy(parent_domain, 0, priv->nirq, of_fwnode_handle(node), &extirq_domain_ops, priv); - if (!domain) { - ret = -ENOMEM; - goto err_add_hierarchy; - } + if (!domain) + return dev_err_probe(dev, -ENOMEM, "Failed to add IRQ domain\n"); return 0; - -err_add_hierarchy: -err_parse_map: - iounmap(priv->intpcr); -err_iomap: - kfree(priv); -err_alloc_priv: -err_irq_find_host: - return ret; } -IRQCHIP_DECLARE(ls1021a_extirq, "fsl,ls1021a-extirq", ls_extirq_of_init); -IRQCHIP_DECLARE(ls1043a_extirq, "fsl,ls1043a-extirq", ls_extirq_of_init); -IRQCHIP_DECLARE(ls1088a_extirq, "fsl,ls1088a-extirq", ls_extirq_of_init); +static const struct of_device_id ls_extirq_dt_ids[] = { + { .compatible = "fsl,ls1021a-extirq" }, + { .compatible = "fsl,ls1043a-extirq" }, + { .compatible = "fsl,ls1088a-extirq" }, + {} +}; +MODULE_DEVICE_TABLE(of, ls_extirq_dt_ids); + +static struct platform_driver ls_extirq_driver = { + .probe = ls_extirq_probe, + .driver = { + .name = "ls-extirq", + .of_match_table = ls_extirq_dt_ids, + } +}; + +builtin_platform_driver(ls_extirq_driver); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index a02aecac05cdf9..3fa3b13a410f42 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -1107,17 +1107,13 @@ static void detached_dev_do_request(struct bcache_device *d, if (bio_op(orig_bio) == REQ_OP_DISCARD && !bdev_max_discard_sectors(dc->bdev)) { + bio_end_io_acct(orig_bio, start_time); bio_endio(orig_bio); return; } clone_bio = bio_alloc_clone(dc->bdev, orig_bio, GFP_NOIO, &d->bio_detached); - if (!clone_bio) { - orig_bio->bi_status = BLK_STS_RESOURCE; - bio_endio(orig_bio); - return; - } ddip = container_of(clone_bio, struct detached_dev_io_private, bio); /* Count on the bcache device */ diff --git a/drivers/mtd/nand/spi/esmt.c b/drivers/mtd/nand/spi/esmt.c index e60e4ac1fd6fb1..3e86f346f751a3 100644 --- a/drivers/mtd/nand/spi/esmt.c +++ b/drivers/mtd/nand/spi/esmt.c @@ -215,7 +215,7 @@ static const struct spinand_info esmt_c8_spinand_table[] = { SPINAND_FACT_OTP_INFO(2, 0, &f50l1g41lb_fact_otp_ops)), SPINAND_INFO("F50D1G41LB", SPINAND_ID(SPINAND_READID_METHOD_OPCODE_ADDR, 0x11, 0x7f, - 0x7f), + 0x7f, 0x7f), NAND_MEMORG(1, 2048, 64, 64, 1024, 20, 1, 1, 1), NAND_ECCREQ(1, 512), SPINAND_INFO_OP_VARIANTS(&read_cache_variants, diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index e7caf400a59cbd..45bd2bb102ffdf 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -2202,11 +2202,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, unblock_netpoll_tx(); } - /* broadcast mode uses the all_slaves to loop through slaves. */ - if (bond_mode_can_use_xmit_hash(bond) || - BOND_MODE(bond) == BOND_MODE_BROADCAST) - bond_update_slave_arr(bond, NULL); - if (!slave_dev->netdev_ops->ndo_bpf || !slave_dev->netdev_ops->ndo_xdp_xmit) { if (bond->xdp_prog) { @@ -2240,6 +2235,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, bpf_prog_inc(bond->xdp_prog); } + /* broadcast mode uses the all_slaves to loop through slaves. */ + if (bond_mode_can_use_xmit_hash(bond) || + BOND_MODE(bond) == BOND_MODE_BROADCAST) + bond_update_slave_arr(bond, NULL); + bond_xdp_set_features(bond_dev); slave_info(bond_dev, slave_dev, "Enslaving as %s interface with %s link\n", @@ -3047,8 +3047,8 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32 __func__, &sip); return; } - slave->last_rx = jiffies; - slave->target_last_arp_rx[i] = jiffies; + WRITE_ONCE(slave->last_rx, jiffies); + WRITE_ONCE(slave->target_last_arp_rx[i], jiffies); } static int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond, @@ -3267,8 +3267,8 @@ static void bond_validate_na(struct bonding *bond, struct slave *slave, __func__, saddr); return; } - slave->last_rx = jiffies; - slave->target_last_arp_rx[i] = jiffies; + WRITE_ONCE(slave->last_rx, jiffies); + WRITE_ONCE(slave->target_last_arp_rx[i], jiffies); } static int bond_na_rcv(const struct sk_buff *skb, struct bonding *bond, @@ -3338,7 +3338,7 @@ int bond_rcv_validate(const struct sk_buff *skb, struct bonding *bond, (slave_do_arp_validate_only(bond) && is_ipv6) || #endif !slave_do_arp_validate_only(bond)) - slave->last_rx = jiffies; + WRITE_ONCE(slave->last_rx, jiffies); return RX_HANDLER_ANOTHER; } else if (is_arp) { return bond_arp_rcv(skb, bond, slave); @@ -3406,7 +3406,7 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) if (slave->link != BOND_LINK_UP) { if (bond_time_in_interval(bond, last_tx, 1) && - bond_time_in_interval(bond, slave->last_rx, 1)) { + bond_time_in_interval(bond, READ_ONCE(slave->last_rx), 1)) { bond_propose_link_state(slave, BOND_LINK_UP); slave_state_changed = 1; @@ -3430,8 +3430,10 @@ static void bond_loadbalance_arp_mon(struct bonding *bond) * when the source ip is 0, so don't take the link down * if we don't know our ip yet */ - if (!bond_time_in_interval(bond, last_tx, bond->params.missed_max) || - !bond_time_in_interval(bond, slave->last_rx, bond->params.missed_max)) { + if (!bond_time_in_interval(bond, last_tx, + bond->params.missed_max) || + !bond_time_in_interval(bond, READ_ONCE(slave->last_rx), + bond->params.missed_max)) { bond_propose_link_state(slave, BOND_LINK_DOWN); slave_state_changed = 1; diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 384499c869b8da..f1c6e9d8f61671 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -1152,7 +1152,7 @@ static void _bond_options_arp_ip_target_set(struct bonding *bond, int slot, if (slot >= 0 && slot < BOND_MAX_ARP_TARGETS) { bond_for_each_slave(bond, slave, iter) - slave->target_last_arp_rx[slot] = last_rx; + WRITE_ONCE(slave->target_last_arp_rx[slot], last_rx); targets[slot] = target; } } @@ -1221,8 +1221,8 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target) bond_for_each_slave(bond, slave, iter) { targets_rx = slave->target_last_arp_rx; for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++) - targets_rx[i] = targets_rx[i+1]; - targets_rx[i] = 0; + WRITE_ONCE(targets_rx[i], READ_ONCE(targets_rx[i+1])); + WRITE_ONCE(targets_rx[i], 0); } for (i = ind; (i < BOND_MAX_ARP_TARGETS-1) && targets[i+1]; i++) targets[i] = targets[i+1]; @@ -1377,7 +1377,7 @@ static void _bond_options_ns_ip6_target_set(struct bonding *bond, int slot, if (slot >= 0 && slot < BOND_MAX_NS_TARGETS) { bond_for_each_slave(bond, slave, iter) { - slave->target_last_arp_rx[slot] = last_rx; + WRITE_ONCE(slave->target_last_arp_rx[slot], last_rx); slave_set_ns_maddr(bond, slave, target, &targets[slot]); } targets[slot] = *target; diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c index c2a3a4eef5b281..58da323f14d7c1 100644 --- a/drivers/net/can/at91_can.c +++ b/drivers/net/can/at91_can.c @@ -1099,7 +1099,7 @@ static int at91_can_probe(struct platform_device *pdev) if (IS_ERR(transceiver)) { err = PTR_ERR(transceiver); dev_err_probe(&pdev->dev, err, "failed to get phy\n"); - goto exit_iounmap; + goto exit_free; } dev->netdev_ops = &at91_netdev_ops; diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c index 192338b481f21f..d8b2dd74b3a1a4 100644 --- a/drivers/net/can/usb/gs_usb.c +++ b/drivers/net/can/usb/gs_usb.c @@ -610,7 +610,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) { struct gs_usb *parent = urb->context; struct gs_can *dev; - struct net_device *netdev; + struct net_device *netdev = NULL; int rc; struct net_device_stats *stats; struct gs_host_frame *hf = urb->transfer_buffer; @@ -768,7 +768,7 @@ static void gs_usb_receive_bulk_callback(struct urb *urb) } } else if (rc != -ESHUTDOWN && net_ratelimit()) { netdev_info(netdev, "failed to re-submit IN URB: %pe\n", - ERR_PTR(urb->status)); + ERR_PTR(rc)); } } diff --git a/drivers/net/dsa/yt921x.c b/drivers/net/dsa/yt921x.c index 1c511f5dc6ab23..7b8c1549a0fb57 100644 --- a/drivers/net/dsa/yt921x.c +++ b/drivers/net/dsa/yt921x.c @@ -682,21 +682,22 @@ static int yt921x_read_mib(struct yt921x_priv *priv, int port) const struct yt921x_mib_desc *desc = &yt921x_mib_descs[i]; u32 reg = YT921X_MIBn_DATA0(port) + desc->offset; u64 *valp = &((u64 *)mib)[i]; - u64 val = *valp; u32 val0; - u32 val1; + u64 val; res = yt921x_reg_read(priv, reg, &val0); if (res) break; if (desc->size <= 1) { - if (val < (u32)val) - /* overflow */ - val += (u64)U32_MAX + 1; - val &= ~U32_MAX; - val |= val0; + u64 old_val = *valp; + + val = (old_val & ~(u64)U32_MAX) | val0; + if (val < old_val) + val += 1ull << 32; } else { + u32 val1; + res = yt921x_reg_read(priv, reg + 4, &val1); if (res) break; diff --git a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c index b9973956c48095..ceb6c11431dd9c 100644 --- a/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c +++ b/drivers/net/ethernet/broadcom/asp2/bcmasp_intf.c @@ -1261,7 +1261,7 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv, netdev_err(intf->ndev, "invalid PHY mode: %s for port %d\n", phy_modes(intf->phy_interface), intf->port); ret = -EINVAL; - goto err_free_netdev; + goto err_deregister_fixed_link; } ret = of_get_ethdev_address(ndev_dn, ndev); @@ -1286,6 +1286,9 @@ struct bcmasp_intf *bcmasp_interface_create(struct bcmasp_priv *priv, return intf; +err_deregister_fixed_link: + if (of_phy_is_fixed_link(ndev_dn)) + of_phy_deregister_fixed_link(ndev_dn); err_free_netdev: free_netdev(ndev); err: diff --git a/drivers/net/ethernet/google/gve/gve.h b/drivers/net/ethernet/google/gve/gve.h index 970d5ca8cddeec..cbdf3a842cfe00 100644 --- a/drivers/net/ethernet/google/gve/gve.h +++ b/drivers/net/ethernet/google/gve/gve.h @@ -1206,6 +1206,11 @@ static inline bool gve_supports_xdp_xmit(struct gve_priv *priv) } } +static inline bool gve_is_clock_enabled(struct gve_priv *priv) +{ + return priv->nic_ts_report; +} + /* gqi napi handler defined in gve_main.c */ int gve_napi_poll(struct napi_struct *napi, int budget); diff --git a/drivers/net/ethernet/google/gve/gve_ethtool.c b/drivers/net/ethernet/google/gve/gve_ethtool.c index 52500ae8348e62..311b106160b20a 100644 --- a/drivers/net/ethernet/google/gve/gve_ethtool.c +++ b/drivers/net/ethernet/google/gve/gve_ethtool.c @@ -938,7 +938,7 @@ static int gve_get_ts_info(struct net_device *netdev, ethtool_op_get_ts_info(netdev, info); - if (priv->nic_timestamp_supported) { + if (gve_is_clock_enabled(priv)) { info->so_timestamping |= SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; diff --git a/drivers/net/ethernet/google/gve/gve_main.c b/drivers/net/ethernet/google/gve/gve_main.c index 7eb64e1e4d858b..52c5e4942cd489 100644 --- a/drivers/net/ethernet/google/gve/gve_main.c +++ b/drivers/net/ethernet/google/gve/gve_main.c @@ -680,10 +680,12 @@ static int gve_setup_device_resources(struct gve_priv *priv) } } - err = gve_init_clock(priv); - if (err) { - dev_err(&priv->pdev->dev, "Failed to init clock"); - goto abort_with_ptype_lut; + if (priv->nic_timestamp_supported) { + err = gve_init_clock(priv); + if (err) { + dev_warn(&priv->pdev->dev, "Failed to init clock, continuing without PTP support"); + err = 0; + } } err = gve_init_rss_config(priv, priv->rx_cfg.num_queues); @@ -2183,7 +2185,7 @@ static int gve_set_ts_config(struct net_device *dev, } if (kernel_config->rx_filter != HWTSTAMP_FILTER_NONE) { - if (!priv->nic_ts_report) { + if (!gve_is_clock_enabled(priv)) { NL_SET_ERR_MSG_MOD(extack, "RX timestamping is not supported"); kernel_config->rx_filter = HWTSTAMP_FILTER_NONE; diff --git a/drivers/net/ethernet/google/gve/gve_ptp.c b/drivers/net/ethernet/google/gve/gve_ptp.c index 073677d82ee8e5..de42fc2c19a1a6 100644 --- a/drivers/net/ethernet/google/gve/gve_ptp.c +++ b/drivers/net/ethernet/google/gve/gve_ptp.c @@ -70,11 +70,6 @@ static int gve_ptp_init(struct gve_priv *priv) struct gve_ptp *ptp; int err; - if (!priv->nic_timestamp_supported) { - dev_dbg(&priv->pdev->dev, "Device does not support PTP\n"); - return -EOPNOTSUPP; - } - priv->ptp = kzalloc(sizeof(*priv->ptp), GFP_KERNEL); if (!priv->ptp) return -ENOMEM; @@ -116,9 +111,6 @@ int gve_init_clock(struct gve_priv *priv) { int err; - if (!priv->nic_timestamp_supported) - return 0; - err = gve_ptp_init(priv); if (err) return err; diff --git a/drivers/net/ethernet/google/gve/gve_rx_dqo.c b/drivers/net/ethernet/google/gve/gve_rx_dqo.c index f1bd8f5d573239..63a96106a69371 100644 --- a/drivers/net/ethernet/google/gve/gve_rx_dqo.c +++ b/drivers/net/ethernet/google/gve/gve_rx_dqo.c @@ -484,7 +484,7 @@ int gve_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp) { const struct gve_xdp_buff *ctx = (void *)_ctx; - if (!ctx->gve->nic_ts_report) + if (!gve_is_clock_enabled(ctx->gve)) return -ENODATA; if (!(ctx->compl_desc->ts_sub_nsecs_low & GVE_DQO_RX_HWTSTAMP_VALID)) diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c index 98010354db155e..d47af94f31a991 100644 --- a/drivers/net/ethernet/intel/ice/ice_lib.c +++ b/drivers/net/ethernet/intel/ice/ice_lib.c @@ -2783,12 +2783,14 @@ void ice_vsi_set_napi_queues(struct ice_vsi *vsi) ASSERT_RTNL(); ice_for_each_rxq(vsi, q_idx) - netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, - &vsi->rx_rings[q_idx]->q_vector->napi); + if (vsi->rx_rings[q_idx] && vsi->rx_rings[q_idx]->q_vector) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_RX, + &vsi->rx_rings[q_idx]->q_vector->napi); ice_for_each_txq(vsi, q_idx) - netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, - &vsi->tx_rings[q_idx]->q_vector->napi); + if (vsi->tx_rings[q_idx] && vsi->tx_rings[q_idx]->q_vector) + netif_queue_set_napi(netdev, q_idx, NETDEV_QUEUE_TYPE_TX, + &vsi->tx_rings[q_idx]->q_vector->napi); /* Also set the interrupt number for the NAPI */ ice_for_each_q_vector(vsi, v_idx) { struct ice_q_vector *q_vector = vsi->q_vectors[v_idx]; diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index de488185cd4a87..71c6d53b461e9f 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -6982,7 +6982,6 @@ void ice_update_vsi_stats(struct ice_vsi *vsi) cur_ns->rx_errors = pf->stats.crc_errors + pf->stats.illegal_bytes + pf->stats.rx_undersize + - pf->hw_csum_rx_error + pf->stats.rx_jabber + pf->stats.rx_fragments + pf->stats.rx_oversize; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index 034618e79169e4..c58051e4350be2 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -11468,20 +11468,17 @@ static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) */ static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter) { - struct net_device *netdev = adapter->netdev; struct pci_dev *pdev = adapter->pdev; struct ixgbe_hw *hw = &adapter->hw; - bool disable_dev; int err = -EIO; if (hw->mac.type != ixgbe_mac_e610) - goto clean_up_probe; + return err; ixgbe_get_hw_control(adapter); - mutex_init(&hw->aci.lock); err = ixgbe_get_flash_data(&adapter->hw); if (err) - goto shutdown_aci; + goto err_release_hw_control; timer_setup(&adapter->service_timer, ixgbe_service_timer, 0); INIT_WORK(&adapter->service_task, ixgbe_recovery_service_task); @@ -11504,16 +11501,8 @@ static int ixgbe_recovery_probe(struct ixgbe_adapter *adapter) devl_unlock(adapter->devlink); return 0; -shutdown_aci: - mutex_destroy(&adapter->hw.aci.lock); +err_release_hw_control: ixgbe_release_hw_control(adapter); -clean_up_probe: - disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); - free_netdev(netdev); - devlink_free(adapter->devlink); - pci_release_mem_regions(pdev); - if (disable_dev) - pci_disable_device(pdev); return err; } @@ -11655,8 +11644,13 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) if (err) goto err_sw_init; - if (ixgbe_check_fw_error(adapter)) - return ixgbe_recovery_probe(adapter); + if (ixgbe_check_fw_error(adapter)) { + err = ixgbe_recovery_probe(adapter); + if (err) + goto err_sw_init; + + return 0; + } if (adapter->hw.mac.type == ixgbe_mac_e610) { err = ixgbe_get_caps(&adapter->hw); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c index 44b201817d94c6..c116da7d7f18c8 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_cls.c @@ -1389,7 +1389,7 @@ int mvpp2_ethtool_cls_rule_ins(struct mvpp2_port *port, efs->rule.flow_type = mvpp2_cls_ethtool_flow_to_type(info->fs.flow_type); if (efs->rule.flow_type < 0) { ret = efs->rule.flow_type; - goto clean_rule; + goto clean_eth_rule; } ret = mvpp2_cls_rfs_parse_rule(&efs->rule); diff --git a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c index bcea3fc26a8c7d..57db7ea2f5be9c 100644 --- a/drivers/net/ethernet/marvell/octeon_ep/octep_main.c +++ b/drivers/net/ethernet/marvell/octeon_ep/octep_main.c @@ -1338,7 +1338,7 @@ int octep_device_setup(struct octep_device *oct) ret = octep_ctrl_net_init(oct); if (ret) - return ret; + goto unsupported_dev; INIT_WORK(&oct->tx_timeout_task, octep_tx_timeout_task); INIT_WORK(&oct->ctrl_mbox_task, octep_ctrl_mbox_task); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c index 36806e813c33cc..1301c56e20d653 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/debugfs.c @@ -613,3 +613,19 @@ void mlx5_debug_cq_remove(struct mlx5_core_dev *dev, struct mlx5_core_cq *cq) cq->dbg = NULL; } } + +static int vhca_id_show(struct seq_file *file, void *priv) +{ + struct mlx5_core_dev *dev = file->private; + + seq_printf(file, "0x%x\n", MLX5_CAP_GEN(dev, vhca_id)); + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(vhca_id); + +void mlx5_vhca_debugfs_init(struct mlx5_core_dev *dev) +{ + debugfs_create_file("vhca_id", 0400, dev->priv.dbg.dbg_root, dev, + &vhca_id_fops); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index 64c04f52990fe0..781e39b5aa1def 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c @@ -575,3 +575,17 @@ bool mlx5_same_hw_devs(struct mlx5_core_dev *dev, struct mlx5_core_dev *peer_dev return plen && flen && flen == plen && !memcmp(fsystem_guid, psystem_guid, flen); } + +void mlx5_core_reps_aux_devs_remove(struct mlx5_core_dev *dev) +{ + struct mlx5_priv *priv = &dev->priv; + + if (priv->adev[MLX5_INTERFACE_PROTOCOL_ETH]) + device_lock_assert(&priv->adev[MLX5_INTERFACE_PROTOCOL_ETH]->adev.dev); + else + mlx5_core_err(dev, "ETH driver already removed\n"); + if (priv->adev[MLX5_INTERFACE_PROTOCOL_IB_REP]) + del_adev(&priv->adev[MLX5_INTERFACE_PROTOCOL_IB_REP]->adev); + if (priv->adev[MLX5_INTERFACE_PROTOCOL_ETH_REP]) + del_adev(&priv->adev[MLX5_INTERFACE_PROTOCOL_ETH_REP]->adev); +} diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c index a8fb4bec369cf4..9c7064187ed0ff 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c @@ -430,7 +430,8 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry, attrs->replay_esn.esn = sa_entry->esn_state.esn; attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb; attrs->replay_esn.overlap = sa_entry->esn_state.overlap; - if (attrs->dir == XFRM_DEV_OFFLOAD_OUT) + if (attrs->dir == XFRM_DEV_OFFLOAD_OUT || + x->xso.type != XFRM_DEV_OFFLOAD_PACKET) goto skip_replay_window; switch (x->replay_esn->replay_window) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c index c17ea0fcd8efe4..ef7f5338540fdf 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/psp_rxtx.c @@ -177,8 +177,6 @@ bool mlx5e_psp_handle_tx_skb(struct net_device *netdev, { struct mlx5e_priv *priv = netdev_priv(netdev); struct net *net = sock_net(skb->sk); - const struct ipv6hdr *ip6; - struct tcphdr *th; if (!mlx5e_psp_set_state(priv, skb, psp_st)) return true; @@ -190,11 +188,18 @@ bool mlx5e_psp_handle_tx_skb(struct net_device *netdev, return false; } if (skb_is_gso(skb)) { - ip6 = ipv6_hdr(skb); - th = inner_tcp_hdr(skb); + int len = skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb); + struct tcphdr *th = inner_tcp_hdr(skb); - th->check = ~tcp_v6_check(skb_shinfo(skb)->gso_size + inner_tcp_hdrlen(skb), &ip6->saddr, - &ip6->daddr, 0); + if (skb->protocol == htons(ETH_P_IP)) { + const struct iphdr *ip = ip_hdr(skb); + + th->check = ~tcp_v4_check(len, ip->saddr, ip->daddr, 0); + } else { + const struct ipv6hdr *ip6 = ipv6_hdr(skb); + + th->check = ~tcp_v6_check(len, &ip6->saddr, &ip6->daddr, 0); + } } return true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 9042c8a388e42b..4b2963bbe7ff45 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4052,6 +4052,8 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) mlx5e_queue_update_stats(priv); } + netdev_stats_to_stats64(stats, &dev->stats); + if (mlx5e_is_uplink_rep(priv)) { struct mlx5e_vport_stats *vstats = &priv->stats.vport; @@ -4068,21 +4070,21 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) mlx5e_fold_sw_stats64(priv, stats); } - stats->rx_missed_errors = priv->stats.qcnt.rx_out_of_buffer; - stats->rx_dropped = PPORT_2863_GET(pstats, if_in_discards); + stats->rx_missed_errors += priv->stats.qcnt.rx_out_of_buffer; + stats->rx_dropped += PPORT_2863_GET(pstats, if_in_discards); - stats->rx_length_errors = + stats->rx_length_errors += PPORT_802_3_GET(pstats, a_in_range_length_errors) + PPORT_802_3_GET(pstats, a_out_of_range_length_field) + PPORT_802_3_GET(pstats, a_frame_too_long_errors) + VNIC_ENV_GET(&priv->stats.vnic, eth_wqe_too_small); - stats->rx_crc_errors = + stats->rx_crc_errors += PPORT_802_3_GET(pstats, a_frame_check_sequence_errors); - stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors); - stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards); - stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + - stats->rx_frame_errors; - stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors; + stats->rx_frame_errors += PPORT_802_3_GET(pstats, a_alignment_errors); + stats->tx_aborted_errors += PPORT_2863_GET(pstats, if_out_discards); + stats->rx_errors += stats->rx_length_errors + stats->rx_crc_errors + + stats->rx_frame_errors; + stats->tx_errors += stats->tx_aborted_errors + stats->tx_carrier_errors; } static void mlx5e_nic_set_rx_mode(struct mlx5e_priv *priv) @@ -6842,6 +6844,7 @@ static void _mlx5e_remove(struct auxiliary_device *adev) struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = edev->mdev; + mlx5_eswitch_safe_aux_devs_remove(mdev); mlx5_core_uplink_netdev_set(mdev, NULL); if (priv->profile) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index a8773b2342c2af..424786f489ecfc 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -2147,11 +2147,14 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow, static void mlx5e_tc_del_fdb_peers_flow(struct mlx5e_tc_flow *flow) { + struct mlx5_devcom_comp_dev *devcom; + struct mlx5_devcom_comp_dev *pos; + struct mlx5_eswitch *peer_esw; int i; - for (i = 0; i < MLX5_MAX_PORTS; i++) { - if (i == mlx5_get_dev_index(flow->priv->mdev)) - continue; + devcom = flow->priv->mdev->priv.eswitch->devcom; + mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) { + i = mlx5_get_dev_index(peer_esw->dev); mlx5e_tc_del_fdb_peer_flow(flow, i); } } @@ -5513,12 +5516,16 @@ int mlx5e_tc_num_filters(struct mlx5e_priv *priv, unsigned long flags) void mlx5e_tc_clean_fdb_peer_flows(struct mlx5_eswitch *esw) { + struct mlx5_devcom_comp_dev *devcom; + struct mlx5_devcom_comp_dev *pos; struct mlx5e_tc_flow *flow, *tmp; + struct mlx5_eswitch *peer_esw; int i; - for (i = 0; i < MLX5_MAX_PORTS; i++) { - if (i == mlx5_get_dev_index(esw->dev)) - continue; + devcom = esw->devcom; + + mlx5_devcom_for_each_peer_entry(devcom, peer_esw, pos) { + i = mlx5_get_dev_index(peer_esw->dev); list_for_each_entry_safe(flow, tmp, &esw->offloads.peer_flows[i], peer[i]) mlx5e_tc_del_fdb_peers_flow(flow); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c index 1c37098e09ea51..49a637829c5941 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/acl/ingress_lgcy.c @@ -188,7 +188,7 @@ int esw_acl_ingress_lgcy_setup(struct mlx5_eswitch *esw, if (IS_ERR(vport->ingress.acl)) { err = PTR_ERR(vport->ingress.acl); vport->ingress.acl = NULL; - return err; + goto out; } err = esw_acl_ingress_lgcy_groups_create(esw, vport); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h index ad1073f7b79f78..714ad28e8445b4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h @@ -929,6 +929,7 @@ int mlx5_esw_ipsec_vf_packet_offload_set(struct mlx5_eswitch *esw, struct mlx5_v int mlx5_esw_ipsec_vf_packet_offload_supported(struct mlx5_core_dev *dev, u16 vport_num); bool mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev); +void mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev); #else /* CONFIG_MLX5_ESWITCH */ /* eswitch API stubs */ static inline int mlx5_eswitch_init(struct mlx5_core_dev *dev) { return 0; } @@ -1009,9 +1010,12 @@ mlx5_esw_host_functions_enabled(const struct mlx5_core_dev *dev) static inline bool mlx5_esw_vport_vhca_id(struct mlx5_eswitch *esw, u16 vportn, u16 *vhca_id) { - return -EOPNOTSUPP; + return false; } +static inline void +mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev) {} + #endif /* CONFIG_MLX5_ESWITCH */ #endif /* __MLX5_ESWITCH_H__ */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index ea94a727633f1f..02b7e474586d9c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -3981,6 +3981,32 @@ static bool mlx5_devlink_switchdev_active_mode_change(struct mlx5_eswitch *esw, return true; } +#define MLX5_ESW_HOLD_TIMEOUT_MS 7000 +#define MLX5_ESW_HOLD_RETRY_DELAY_MS 500 + +void mlx5_eswitch_safe_aux_devs_remove(struct mlx5_core_dev *dev) +{ + unsigned long timeout; + bool hold_esw = true; + + /* Wait for any concurrent eswitch mode transition to complete. */ + if (!mlx5_esw_hold(dev)) { + timeout = jiffies + msecs_to_jiffies(MLX5_ESW_HOLD_TIMEOUT_MS); + while (!mlx5_esw_hold(dev)) { + if (!time_before(jiffies, timeout)) { + hold_esw = false; + break; + } + msleep(MLX5_ESW_HOLD_RETRY_DELAY_MS); + } + } + if (hold_esw) { + if (mlx5_eswitch_mode(dev) == MLX5_ESWITCH_OFFLOADS) + mlx5_core_reps_aux_devs_remove(dev); + mlx5_esw_release(dev); + } +} + int mlx5_devlink_eswitch_mode_set(struct devlink *devlink, u16 mode, struct netlink_ext_ack *extack) { diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c index ced747bef6415f..c348ee62cd3af9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c @@ -1198,7 +1198,8 @@ int mlx5_fs_cmd_set_tx_flow_table_root(struct mlx5_core_dev *dev, u32 ft_id, boo u32 out[MLX5_ST_SZ_DW(set_flow_table_root_out)] = {}; u32 in[MLX5_ST_SZ_DW(set_flow_table_root_in)] = {}; - if (disconnect && MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default)) + if (disconnect && + !MLX5_CAP_FLOWTABLE_NIC_TX(dev, reset_root_to_default)) return -EOPNOTSUPP; MLX5_SET(set_flow_table_root_in, in, opcode, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 4209da722f9a6c..55b4e0cceae2f5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1806,16 +1806,6 @@ static int mlx5_hca_caps_alloc(struct mlx5_core_dev *dev) return -ENOMEM; } -static int vhca_id_show(struct seq_file *file, void *priv) -{ - struct mlx5_core_dev *dev = file->private; - - seq_printf(file, "0x%x\n", MLX5_CAP_GEN(dev, vhca_id)); - return 0; -} - -DEFINE_SHOW_ATTRIBUTE(vhca_id); - static int mlx5_notifiers_init(struct mlx5_core_dev *dev) { int err; @@ -1884,7 +1874,7 @@ int mlx5_mdev_init(struct mlx5_core_dev *dev, int profile_idx) priv->numa_node = dev_to_node(mlx5_core_dma_dev(dev)); priv->dbg.dbg_root = debugfs_create_dir(dev_name(dev->device), mlx5_debugfs_root); - debugfs_create_file("vhca_id", 0400, priv->dbg.dbg_root, dev, &vhca_id_fops); + INIT_LIST_HEAD(&priv->traps); err = mlx5_cmd_init(dev); @@ -2022,6 +2012,8 @@ static int probe_one(struct pci_dev *pdev, const struct pci_device_id *id) goto err_init_one; } + mlx5_vhca_debugfs_init(dev); + pci_save_state(pdev); return 0; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h index cfebc110c02fd9..f2d74382fb85d9 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h @@ -258,6 +258,7 @@ int mlx5_wait_for_pages(struct mlx5_core_dev *dev, int *pages); void mlx5_cmd_flush(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_init(struct mlx5_core_dev *dev); void mlx5_cq_debugfs_cleanup(struct mlx5_core_dev *dev); +void mlx5_vhca_debugfs_init(struct mlx5_core_dev *dev); int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group, u8 access_reg_group); @@ -290,6 +291,7 @@ int mlx5_register_device(struct mlx5_core_dev *dev); void mlx5_unregister_device(struct mlx5_core_dev *dev); void mlx5_dev_set_lightweight(struct mlx5_core_dev *dev); bool mlx5_dev_is_lightweight(struct mlx5_core_dev *dev); +void mlx5_core_reps_aux_devs_remove(struct mlx5_core_dev *dev); void mlx5_fw_reporters_create(struct mlx5_core_dev *dev); int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c index b706f1486504a7..c45540fe7d9d95 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/sf/dev/driver.c @@ -76,6 +76,7 @@ static int mlx5_sf_dev_probe(struct auxiliary_device *adev, const struct auxilia goto init_one_err; } + mlx5_vhca_debugfs_init(mdev); return 0; init_one_err: diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c index 36af94a2e062aa..2794f75df8fcba 100644 --- a/drivers/net/ethernet/rocker/rocker_main.c +++ b/drivers/net/ethernet/rocker/rocker_main.c @@ -1524,9 +1524,8 @@ static void rocker_world_port_post_fini(struct rocker_port *rocker_port) { struct rocker_world_ops *wops = rocker_port->rocker->wops; - if (!wops->port_post_fini) - return; - wops->port_post_fini(rocker_port); + if (wops->port_post_fini) + wops->port_post_fini(rocker_port); kfree(rocker_port->wpriv); } diff --git a/drivers/net/ethernet/sfc/mcdi_filters.c b/drivers/net/ethernet/sfc/mcdi_filters.c index 6ef96292909a2f..3db589b90b68ad 100644 --- a/drivers/net/ethernet/sfc/mcdi_filters.c +++ b/drivers/net/ethernet/sfc/mcdi_filters.c @@ -2182,12 +2182,7 @@ int efx_mcdi_rx_pull_rss_context_config(struct efx_nic *efx, int efx_mcdi_rx_pull_rss_config(struct efx_nic *efx) { - int rc; - - mutex_lock(&efx->net_dev->ethtool->rss_lock); - rc = efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context); - mutex_unlock(&efx->net_dev->ethtool->rss_lock); - return rc; + return efx_mcdi_rx_pull_rss_context_config(efx, &efx->rss_context); } void efx_mcdi_rx_restore_rss_contexts(struct efx_nic *efx) diff --git a/drivers/net/ethernet/spacemit/k1_emac.c b/drivers/net/ethernet/spacemit/k1_emac.c index 220eb5ce75833d..88e9424d2d51aa 100644 --- a/drivers/net/ethernet/spacemit/k1_emac.c +++ b/drivers/net/ethernet/spacemit/k1_emac.c @@ -1099,7 +1099,13 @@ static int emac_read_stat_cnt(struct emac_priv *priv, u8 cnt, u32 *res, 100, 10000); if (ret) { - netdev_err(priv->ndev, "Read stat timeout\n"); + /* + * This could be caused by the PHY stopping its refclk even when + * the link is up, for power saving. See also comments in + * emac_stats_update(). + */ + dev_err_ratelimited(&priv->ndev->dev, + "Read stat timeout. PHY clock stopped?\n"); return ret; } @@ -1147,17 +1153,25 @@ static void emac_stats_update(struct emac_priv *priv) assert_spin_locked(&priv->stats_lock); - if (!netif_running(priv->ndev) || !netif_device_present(priv->ndev)) { - /* Not up, don't try to update */ + /* + * We can't read statistics if the interface is not up. Also, some PHYs + * stop their reference clocks for link down power saving, which also + * causes reading statistics to time out. Don't update and don't + * reschedule in these cases. + */ + if (!netif_running(priv->ndev) || + !netif_carrier_ok(priv->ndev) || + !netif_device_present(priv->ndev)) { return; } for (i = 0; i < sizeof(priv->tx_stats) / sizeof(*tx_stats); i++) { /* - * If reading stats times out, everything is broken and there's - * nothing we can do. Reading statistics also can't return an - * error, so just return without updating and without - * rescheduling. + * If reading stats times out anyway, the stat registers will be + * stuck, and we can't really recover from that. + * + * Reading statistics also can't return an error, so just return + * without updating and without rescheduling. */ if (emac_tx_read_stat_cnt(priv, i, &res)) return; @@ -1636,6 +1650,12 @@ static void emac_adjust_link(struct net_device *dev) emac_wr(priv, MAC_GLOBAL_CONTROL, ctrl); emac_set_fc_autoneg(priv); + + /* + * Reschedule stats updates now that link is up. See comments in + * emac_stats_update(). + */ + mod_timer(&priv->stats_timer, jiffies); } phy_print_status(phydev); diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c index 05de68b9f7191f..8208ecbb575c5a 100644 --- a/drivers/net/phy/micrel.c +++ b/drivers/net/phy/micrel.c @@ -2643,11 +2643,21 @@ static int kszphy_probe(struct phy_device *phydev) kszphy_parse_led_mode(phydev); - clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, "rmii-ref"); + clk = devm_clk_get_optional(&phydev->mdio.dev, "rmii-ref"); /* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */ if (!IS_ERR_OR_NULL(clk)) { - unsigned long rate = clk_get_rate(clk); bool rmii_ref_clk_sel_25_mhz; + unsigned long rate; + int err; + + err = clk_prepare_enable(clk); + if (err) { + phydev_err(phydev, "Failed to enable rmii-ref clock\n"); + return err; + } + + rate = clk_get_rate(clk); + clk_disable_unprepare(clk); if (type) priv->rmii_ref_clk_sel = type->has_rmii_ref_clk_sel; @@ -2665,13 +2675,12 @@ static int kszphy_probe(struct phy_device *phydev) } } else if (!clk) { /* unnamed clock from the generic ethernet-phy binding */ - clk = devm_clk_get_optional_enabled(&phydev->mdio.dev, NULL); + clk = devm_clk_get_optional(&phydev->mdio.dev, NULL); } if (IS_ERR(clk)) return PTR_ERR(clk); - clk_disable_unprepare(clk); priv->clk = clk; if (ksz8041_fiber_mode(phydev)) diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c index b76bea6ab2d762..5af90ca6e06318 100644 --- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c +++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c @@ -395,6 +395,7 @@ static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq, struct sk_buff *skb) { unsigned long long data_bus_addr, data_base_addr; + struct skb_shared_info *shinfo = skb_shinfo(skb); struct device *dev = rxq->dpmaif_ctrl->dev; struct dpmaif_bat_page *page_info; unsigned int data_len; @@ -402,18 +403,22 @@ static int t7xx_dpmaif_set_frag_to_skb(const struct dpmaif_rx_queue *rxq, page_info = rxq->bat_frag->bat_skb; page_info += t7xx_normal_pit_bid(pkt_info); - dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE); if (!page_info->page) return -EINVAL; + if (shinfo->nr_frags >= MAX_SKB_FRAGS) + return -EINVAL; + + dma_unmap_page(dev, page_info->data_bus_addr, page_info->data_len, DMA_FROM_DEVICE); + data_bus_addr = le32_to_cpu(pkt_info->pd.data_addr_h); data_bus_addr = (data_bus_addr << 32) + le32_to_cpu(pkt_info->pd.data_addr_l); data_base_addr = page_info->data_bus_addr; data_offset = data_bus_addr - data_base_addr; data_offset += page_info->offset; data_len = FIELD_GET(PD_PIT_DATA_LEN, le32_to_cpu(pkt_info->header)); - skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page_info->page, + skb_add_rx_frag(skb, shinfo->nr_frags, page_info->page, data_offset, data_len, page_info->data_len); page_info->page = NULL; diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 58f3097888a76e..c2bee32332fee3 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -806,8 +806,8 @@ static void nvme_unmap_data(struct request *req) if (!blk_rq_dma_unmap(req, dma_dev, &iod->dma_state, iod->total_len, map)) { if (nvme_pci_cmd_use_sgl(&iod->cmd)) - nvme_free_sgls(req, iod->descriptors[0], - &iod->cmd.common.dptr.sgl, attrs); + nvme_free_sgls(req, &iod->cmd.common.dptr.sgl, + iod->descriptors[0], attrs); else nvme_free_prps(req, attrs); } diff --git a/drivers/nvme/target/io-cmd-bdev.c b/drivers/nvme/target/io-cmd-bdev.c index 8d246b8ca604c5..0103815542d49f 100644 --- a/drivers/nvme/target/io-cmd-bdev.c +++ b/drivers/nvme/target/io-cmd-bdev.c @@ -180,9 +180,10 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts) static void nvmet_bio_done(struct bio *bio) { struct nvmet_req *req = bio->bi_private; + blk_status_t blk_status = bio->bi_status; - nvmet_req_complete(req, blk_to_nvme_status(req, bio->bi_status)); nvmet_req_bio_put(req, bio); + nvmet_req_complete(req, blk_to_nvme_status(req, blk_status)); } #ifdef CONFIG_BLK_DEV_INTEGRITY diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c index 5619ec91785871..a2a13617c6f49e 100644 --- a/drivers/of/of_reserved_mem.c +++ b/drivers/of/of_reserved_mem.c @@ -157,13 +157,19 @@ static int __init __reserved_mem_reserve_reg(unsigned long node, phys_addr_t base, size; int i, len; const __be32 *prop; - bool nomap; + bool nomap, default_cma; prop = of_flat_dt_get_addr_size_prop(node, "reg", &len); if (!prop) return -ENOENT; nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; + default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); + + if (default_cma && cma_skip_dt_default_reserved_mem()) { + pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n"); + return -EINVAL; + } for (i = 0; i < len; i++) { u64 b, s; @@ -248,10 +254,13 @@ void __init fdt_scan_reserved_mem_reg_nodes(void) fdt_for_each_subnode(child, fdt, node) { const char *uname; + bool default_cma = of_get_flat_dt_prop(child, "linux,cma-default", NULL); u64 b, s; if (!of_fdt_device_is_available(fdt, child)) continue; + if (default_cma && cma_skip_dt_default_reserved_mem()) + continue; if (!of_flat_dt_get_addr_size(child, "reg", &b, &s)) continue; @@ -389,7 +398,7 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam phys_addr_t base = 0, align = 0, size; int i, len; const __be32 *prop; - bool nomap; + bool nomap, default_cma; int ret; prop = of_get_flat_dt_prop(node, "size", &len); @@ -413,6 +422,12 @@ static int __init __reserved_mem_alloc_size(unsigned long node, const char *unam } nomap = of_get_flat_dt_prop(node, "no-map", NULL) != NULL; + default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL); + + if (default_cma && cma_skip_dt_default_reserved_mem()) { + pr_err("Skipping dt linux,cma-default for \"cma=\" kernel param.\n"); + return -EINVAL; + } /* Need adjust the alignment to satisfy the CMA requirement */ if (IS_ENABLED(CONFIG_CMA) diff --git a/drivers/pci/ide.c b/drivers/pci/ide.c index f0ef474e1a0da7..23f554490539f2 100644 --- a/drivers/pci/ide.c +++ b/drivers/pci/ide.c @@ -11,7 +11,6 @@ #include #include #include -#include #include "pci.h" @@ -168,7 +167,7 @@ void pci_ide_init(struct pci_dev *pdev) for (u16 i = 0; i < nr_streams; i++) { int pos = __sel_ide_offset(ide_cap, nr_link_ide, i, nr_ide_mem); - pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CAP, &val); + pci_read_config_dword(pdev, pos + PCI_IDE_SEL_CTL, &val); if (val & PCI_IDE_SEL_CTL_EN) continue; val &= ~PCI_IDE_SEL_CTL_ID; @@ -283,8 +282,8 @@ struct pci_ide *pci_ide_stream_alloc(struct pci_dev *pdev) /* for SR-IOV case, cover all VFs */ num_vf = pci_num_vf(pdev); if (num_vf) - rid_end = PCI_DEVID(pci_iov_virtfn_bus(pdev, num_vf), - pci_iov_virtfn_devfn(pdev, num_vf)); + rid_end = PCI_DEVID(pci_iov_virtfn_bus(pdev, num_vf - 1), + pci_iov_virtfn_devfn(pdev, num_vf - 1)); else rid_end = pci_dev_id(pdev); @@ -373,9 +372,6 @@ void pci_ide_stream_release(struct pci_ide *ide) if (ide->partner[PCI_IDE_EP].enable) pci_ide_stream_disable(pdev, ide); - if (ide->tsm_dev) - tsm_ide_stream_unregister(ide); - if (ide->partner[PCI_IDE_RP].setup) pci_ide_stream_teardown(rp, ide); diff --git a/drivers/pinctrl/meson/pinctrl-meson.c b/drivers/pinctrl/meson/pinctrl-meson.c index 18295b15ecd9dd..4507dc8b5563c0 100644 --- a/drivers/pinctrl/meson/pinctrl-meson.c +++ b/drivers/pinctrl/meson/pinctrl-meson.c @@ -619,7 +619,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc) pc->chip.set = meson_gpio_set; pc->chip.base = -1; pc->chip.ngpio = pc->data->num_pins; - pc->chip.can_sleep = false; + pc->chip.can_sleep = true; ret = gpiochip_add_data(&pc->chip, pc); if (ret) { diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index e44ef262beec6e..2fc67aeafdb39e 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -3545,10 +3545,9 @@ static int rockchip_pmx_set(struct pinctrl_dev *pctldev, unsigned selector, return 0; } -static int rockchip_pmx_gpio_set_direction(struct pinctrl_dev *pctldev, - struct pinctrl_gpio_range *range, - unsigned offset, - bool input) +static int rockchip_pmx_gpio_request_enable(struct pinctrl_dev *pctldev, + struct pinctrl_gpio_range *range, + unsigned int offset) { struct rockchip_pinctrl *info = pinctrl_dev_get_drvdata(pctldev); struct rockchip_pin_bank *bank; @@ -3562,7 +3561,7 @@ static const struct pinmux_ops rockchip_pmx_ops = { .get_function_name = rockchip_pmx_get_func_name, .get_function_groups = rockchip_pmx_get_groups, .set_mux = rockchip_pmx_set, - .gpio_set_direction = rockchip_pmx_gpio_set_direction, + .gpio_request_enable = rockchip_pmx_gpio_request_enable, }; /* diff --git a/drivers/pinctrl/pinctrl-th1520.c b/drivers/pinctrl/pinctrl-th1520.c index e641bad6728cc5..83e9c9f77370c6 100644 --- a/drivers/pinctrl/pinctrl-th1520.c +++ b/drivers/pinctrl/pinctrl-th1520.c @@ -287,7 +287,7 @@ static const struct pinctrl_pin_desc th1520_group3_pins[] = { TH1520_PAD(5, QSPI0_D0_MOSI, QSPI, PWM, I2S, GPIO, ____, ____, 0), TH1520_PAD(6, QSPI0_D1_MISO, QSPI, PWM, I2S, GPIO, ____, ____, 0), TH1520_PAD(7, QSPI0_D2_WP, QSPI, PWM, I2S, GPIO, ____, ____, 0), - TH1520_PAD(8, QSPI1_D3_HOLD, QSPI, ____, I2S, GPIO, ____, ____, 0), + TH1520_PAD(8, QSPI0_D3_HOLD, QSPI, ____, I2S, GPIO, ____, ____, 0), TH1520_PAD(9, I2C2_SCL, I2C, UART, ____, GPIO, ____, ____, 0), TH1520_PAD(10, I2C2_SDA, I2C, UART, ____, GPIO, ____, ____, 0), TH1520_PAD(11, I2C3_SCL, I2C, ____, ____, GPIO, ____, ____, 0), diff --git a/drivers/pinctrl/qcom/Kconfig b/drivers/pinctrl/qcom/Kconfig index c480e8b7850329..f56592411cf6d9 100644 --- a/drivers/pinctrl/qcom/Kconfig +++ b/drivers/pinctrl/qcom/Kconfig @@ -61,13 +61,14 @@ config PINCTRL_LPASS_LPI (Low Power Island) found on the Qualcomm Technologies Inc SoCs. config PINCTRL_SC7280_LPASS_LPI - tristate "Qualcomm Technologies Inc SC7280 LPASS LPI pin controller driver" + tristate "Qualcomm Technologies Inc SC7280 and SM8350 LPASS LPI pin controller driver" depends on ARM64 || COMPILE_TEST depends on PINCTRL_LPASS_LPI help This is the pinctrl, pinmux, pinconf and gpiolib driver for the Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI - (Low Power Island) found on the Qualcomm Technologies Inc SC7280 platform. + (Low Power Island) found on the Qualcomm Technologies Inc SC7280 + and SM8350 platforms. config PINCTRL_SDM660_LPASS_LPI tristate "Qualcomm Technologies Inc SDM660 LPASS LPI pin controller driver" @@ -106,16 +107,6 @@ config PINCTRL_SM8250_LPASS_LPI Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI (Low Power Island) found on the Qualcomm Technologies Inc SM8250 platform. -config PINCTRL_SM8350_LPASS_LPI - tristate "Qualcomm Technologies Inc SM8350 LPASS LPI pin controller driver" - depends on ARM64 || COMPILE_TEST - depends on PINCTRL_LPASS_LPI - help - This is the pinctrl, pinmux, pinconf and gpiolib driver for the - Qualcomm Technologies Inc LPASS (Low Power Audio SubSystem) LPI - (Low Power Island) found on the Qualcomm Technologies Inc SM8350 - platform. - config PINCTRL_SM8450_LPASS_LPI tristate "Qualcomm Technologies Inc SM8450 LPASS LPI pin controller driver" depends on ARM64 || COMPILE_TEST diff --git a/drivers/pinctrl/qcom/Makefile b/drivers/pinctrl/qcom/Makefile index 748b17a77b2cc4..4269d1781015ed 100644 --- a/drivers/pinctrl/qcom/Makefile +++ b/drivers/pinctrl/qcom/Makefile @@ -64,7 +64,6 @@ obj-$(CONFIG_PINCTRL_SM8150) += pinctrl-sm8150.o obj-$(CONFIG_PINCTRL_SM8250) += pinctrl-sm8250.o obj-$(CONFIG_PINCTRL_SM8250_LPASS_LPI) += pinctrl-sm8250-lpass-lpi.o obj-$(CONFIG_PINCTRL_SM8350) += pinctrl-sm8350.o -obj-$(CONFIG_PINCTRL_SM8350_LPASS_LPI) += pinctrl-sm8350-lpass-lpi.o obj-$(CONFIG_PINCTRL_SM8450) += pinctrl-sm8450.o obj-$(CONFIG_PINCTRL_SM8450_LPASS_LPI) += pinctrl-sm8450-lpass-lpi.o obj-$(CONFIG_PINCTRL_SM8550) += pinctrl-sm8550.o diff --git a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c index 78212f9928430c..76aed32962794e 100644 --- a/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-lpass-lpi.c @@ -312,6 +312,22 @@ static const struct pinconf_ops lpi_gpio_pinconf_ops = { .pin_config_group_set = lpi_config_set, }; +static int lpi_gpio_get_direction(struct gpio_chip *chip, unsigned int pin) +{ + unsigned long config = pinconf_to_config_packed(PIN_CONFIG_LEVEL, 0); + struct lpi_pinctrl *state = gpiochip_get_data(chip); + unsigned long arg; + int ret; + + ret = lpi_config_get(state->ctrl, pin, &config); + if (ret) + return ret; + + arg = pinconf_to_config_argument(config); + + return arg ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; +} + static int lpi_gpio_direction_input(struct gpio_chip *chip, unsigned int pin) { struct lpi_pinctrl *state = gpiochip_get_data(chip); @@ -409,6 +425,7 @@ static void lpi_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) #endif static const struct gpio_chip lpi_gpio_template = { + .get_direction = lpi_gpio_get_direction, .direction_input = lpi_gpio_direction_input, .direction_output = lpi_gpio_direction_output, .get = lpi_gpio_get, diff --git a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c index 1161f0a91a002a..750f410311a8fb 100644 --- a/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c +++ b/drivers/pinctrl/qcom/pinctrl-sc7280-lpass-lpi.c @@ -131,6 +131,9 @@ static const struct of_device_id lpi_pinctrl_of_match[] = { { .compatible = "qcom,sc7280-lpass-lpi-pinctrl", .data = &sc7280_lpi_data, + }, { + .compatible = "qcom,sm8350-lpass-lpi-pinctrl", + .data = &sc7280_lpi_data, }, { } }; diff --git a/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c b/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c deleted file mode 100644 index 7b146b4acfdf42..00000000000000 --- a/drivers/pinctrl/qcom/pinctrl-sm8350-lpass-lpi.c +++ /dev/null @@ -1,151 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved. - * Copyright (c) 2020-2023 Linaro Ltd. - */ - -#include -#include -#include - -#include "pinctrl-lpass-lpi.h" - -enum lpass_lpi_functions { - LPI_MUX_dmic1_clk, - LPI_MUX_dmic1_data, - LPI_MUX_dmic2_clk, - LPI_MUX_dmic2_data, - LPI_MUX_dmic3_clk, - LPI_MUX_dmic3_data, - LPI_MUX_i2s1_clk, - LPI_MUX_i2s1_data, - LPI_MUX_i2s1_ws, - LPI_MUX_i2s2_clk, - LPI_MUX_i2s2_data, - LPI_MUX_i2s2_ws, - LPI_MUX_qua_mi2s_data, - LPI_MUX_qua_mi2s_sclk, - LPI_MUX_qua_mi2s_ws, - LPI_MUX_swr_rx_clk, - LPI_MUX_swr_rx_data, - LPI_MUX_swr_tx_clk, - LPI_MUX_swr_tx_data, - LPI_MUX_wsa_swr_clk, - LPI_MUX_wsa_swr_data, - LPI_MUX_gpio, - LPI_MUX__, -}; - -static const struct pinctrl_pin_desc sm8350_lpi_pins[] = { - PINCTRL_PIN(0, "gpio0"), - PINCTRL_PIN(1, "gpio1"), - PINCTRL_PIN(2, "gpio2"), - PINCTRL_PIN(3, "gpio3"), - PINCTRL_PIN(4, "gpio4"), - PINCTRL_PIN(5, "gpio5"), - PINCTRL_PIN(6, "gpio6"), - PINCTRL_PIN(7, "gpio7"), - PINCTRL_PIN(8, "gpio8"), - PINCTRL_PIN(9, "gpio9"), - PINCTRL_PIN(10, "gpio10"), - PINCTRL_PIN(11, "gpio11"), - PINCTRL_PIN(12, "gpio12"), - PINCTRL_PIN(13, "gpio13"), - PINCTRL_PIN(14, "gpio14"), -}; - -static const char * const swr_tx_clk_groups[] = { "gpio0" }; -static const char * const swr_tx_data_groups[] = { "gpio1", "gpio2", "gpio5", "gpio14" }; -static const char * const swr_rx_clk_groups[] = { "gpio3" }; -static const char * const swr_rx_data_groups[] = { "gpio4", "gpio5" }; -static const char * const dmic1_clk_groups[] = { "gpio6" }; -static const char * const dmic1_data_groups[] = { "gpio7" }; -static const char * const dmic2_clk_groups[] = { "gpio8" }; -static const char * const dmic2_data_groups[] = { "gpio9" }; -static const char * const i2s2_clk_groups[] = { "gpio10" }; -static const char * const i2s2_ws_groups[] = { "gpio11" }; -static const char * const dmic3_clk_groups[] = { "gpio12" }; -static const char * const dmic3_data_groups[] = { "gpio13" }; -static const char * const qua_mi2s_sclk_groups[] = { "gpio0" }; -static const char * const qua_mi2s_ws_groups[] = { "gpio1" }; -static const char * const qua_mi2s_data_groups[] = { "gpio2", "gpio3", "gpio4" }; -static const char * const i2s1_clk_groups[] = { "gpio6" }; -static const char * const i2s1_ws_groups[] = { "gpio7" }; -static const char * const i2s1_data_groups[] = { "gpio8", "gpio9" }; -static const char * const wsa_swr_clk_groups[] = { "gpio10" }; -static const char * const wsa_swr_data_groups[] = { "gpio11" }; -static const char * const i2s2_data_groups[] = { "gpio12", "gpio12" }; - -static const struct lpi_pingroup sm8350_groups[] = { - LPI_PINGROUP(0, 0, swr_tx_clk, qua_mi2s_sclk, _, _), - LPI_PINGROUP(1, 2, swr_tx_data, qua_mi2s_ws, _, _), - LPI_PINGROUP(2, 4, swr_tx_data, qua_mi2s_data, _, _), - LPI_PINGROUP(3, 8, swr_rx_clk, qua_mi2s_data, _, _), - LPI_PINGROUP(4, 10, swr_rx_data, qua_mi2s_data, _, _), - LPI_PINGROUP(5, 12, swr_tx_data, swr_rx_data, _, _), - LPI_PINGROUP(6, LPI_NO_SLEW, dmic1_clk, i2s1_clk, _, _), - LPI_PINGROUP(7, LPI_NO_SLEW, dmic1_data, i2s1_ws, _, _), - LPI_PINGROUP(8, LPI_NO_SLEW, dmic2_clk, i2s1_data, _, _), - LPI_PINGROUP(9, LPI_NO_SLEW, dmic2_data, i2s1_data, _, _), - LPI_PINGROUP(10, 16, i2s2_clk, wsa_swr_clk, _, _), - LPI_PINGROUP(11, 18, i2s2_ws, wsa_swr_data, _, _), - LPI_PINGROUP(12, LPI_NO_SLEW, dmic3_clk, i2s2_data, _, _), - LPI_PINGROUP(13, LPI_NO_SLEW, dmic3_data, i2s2_data, _, _), - LPI_PINGROUP(14, 6, swr_tx_data, _, _, _), -}; - -static const struct lpi_function sm8350_functions[] = { - LPI_FUNCTION(dmic1_clk), - LPI_FUNCTION(dmic1_data), - LPI_FUNCTION(dmic2_clk), - LPI_FUNCTION(dmic2_data), - LPI_FUNCTION(dmic3_clk), - LPI_FUNCTION(dmic3_data), - LPI_FUNCTION(i2s1_clk), - LPI_FUNCTION(i2s1_data), - LPI_FUNCTION(i2s1_ws), - LPI_FUNCTION(i2s2_clk), - LPI_FUNCTION(i2s2_data), - LPI_FUNCTION(i2s2_ws), - LPI_FUNCTION(qua_mi2s_data), - LPI_FUNCTION(qua_mi2s_sclk), - LPI_FUNCTION(qua_mi2s_ws), - LPI_FUNCTION(swr_rx_clk), - LPI_FUNCTION(swr_rx_data), - LPI_FUNCTION(swr_tx_clk), - LPI_FUNCTION(swr_tx_data), - LPI_FUNCTION(wsa_swr_clk), - LPI_FUNCTION(wsa_swr_data), -}; - -static const struct lpi_pinctrl_variant_data sm8350_lpi_data = { - .pins = sm8350_lpi_pins, - .npins = ARRAY_SIZE(sm8350_lpi_pins), - .groups = sm8350_groups, - .ngroups = ARRAY_SIZE(sm8350_groups), - .functions = sm8350_functions, - .nfunctions = ARRAY_SIZE(sm8350_functions), -}; - -static const struct of_device_id lpi_pinctrl_of_match[] = { - { - .compatible = "qcom,sm8350-lpass-lpi-pinctrl", - .data = &sm8350_lpi_data, - }, - { } -}; -MODULE_DEVICE_TABLE(of, lpi_pinctrl_of_match); - -static struct platform_driver lpi_pinctrl_driver = { - .driver = { - .name = "qcom-sm8350-lpass-lpi-pinctrl", - .of_match_table = lpi_pinctrl_of_match, - }, - .probe = lpi_pinctrl_probe, - .remove = lpi_pinctrl_remove, -}; -module_platform_driver(lpi_pinctrl_driver); - -MODULE_AUTHOR("Krzysztof Kozlowski "); -MODULE_DESCRIPTION("QTI SM8350 LPI GPIO pin control driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/platform/x86/amd/pmc/pmc-quirks.c b/drivers/platform/x86/amd/pmc/pmc-quirks.c index 404e62ad293a98..ed285afaf9b0d7 100644 --- a/drivers/platform/x86/amd/pmc/pmc-quirks.c +++ b/drivers/platform/x86/amd/pmc/pmc-quirks.c @@ -302,6 +302,13 @@ static const struct dmi_system_id fwbug_list[] = { DMI_MATCH(DMI_BOARD_NAME, "XxKK4NAx_XxSP4NAx"), } }, + { + .ident = "MECHREVO Wujie 15X Pro", + .driver_data = &quirk_spurious_8042, + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "WUJIE Series-X5SP4NAG"), + } + }, {} }; diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c index 6b1b8e444e241c..74d3eb83f56a63 100644 --- a/drivers/platform/x86/classmate-laptop.c +++ b/drivers/platform/x86/classmate-laptop.c @@ -207,7 +207,12 @@ static ssize_t cmpc_accel_sensitivity_show_v4(struct device *dev, acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); + if (!inputdev) + return -ENXIO; + accel = dev_get_drvdata(&inputdev->dev); + if (!accel) + return -ENXIO; return sysfs_emit(buf, "%d\n", accel->sensitivity); } @@ -224,7 +229,12 @@ static ssize_t cmpc_accel_sensitivity_store_v4(struct device *dev, acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); + if (!inputdev) + return -ENXIO; + accel = dev_get_drvdata(&inputdev->dev); + if (!accel) + return -ENXIO; r = kstrtoul(buf, 0, &sensitivity); if (r) @@ -256,7 +266,12 @@ static ssize_t cmpc_accel_g_select_show_v4(struct device *dev, acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); + if (!inputdev) + return -ENXIO; + accel = dev_get_drvdata(&inputdev->dev); + if (!accel) + return -ENXIO; return sysfs_emit(buf, "%d\n", accel->g_select); } @@ -273,7 +288,12 @@ static ssize_t cmpc_accel_g_select_store_v4(struct device *dev, acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); + if (!inputdev) + return -ENXIO; + accel = dev_get_drvdata(&inputdev->dev); + if (!accel) + return -ENXIO; r = kstrtoul(buf, 0, &g_select); if (r) @@ -302,6 +322,8 @@ static int cmpc_accel_open_v4(struct input_dev *input) acpi = to_acpi_device(input->dev.parent); accel = dev_get_drvdata(&input->dev); + if (!accel) + return -ENXIO; cmpc_accel_set_sensitivity_v4(acpi->handle, accel->sensitivity); cmpc_accel_set_g_select_v4(acpi->handle, accel->g_select); @@ -549,7 +571,12 @@ static ssize_t cmpc_accel_sensitivity_show(struct device *dev, acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); + if (!inputdev) + return -ENXIO; + accel = dev_get_drvdata(&inputdev->dev); + if (!accel) + return -ENXIO; return sysfs_emit(buf, "%d\n", accel->sensitivity); } @@ -566,7 +593,12 @@ static ssize_t cmpc_accel_sensitivity_store(struct device *dev, acpi = to_acpi_device(dev); inputdev = dev_get_drvdata(&acpi->dev); + if (!inputdev) + return -ENXIO; + accel = dev_get_drvdata(&inputdev->dev); + if (!accel) + return -ENXIO; r = kstrtoul(buf, 0, &sensitivity); if (r) diff --git a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c index dbe096eefa7582..51e8977d3eb4a8 100644 --- a/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c +++ b/drivers/platform/x86/hp/hp-bioscfg/bioscfg.c @@ -696,6 +696,11 @@ static int hp_init_bios_package_attribute(enum hp_wmi_data_type attr_type, return ret; } + if (!str_value || !str_value[0]) { + pr_debug("Ignoring attribute with empty name\n"); + goto pack_attr_exit; + } + /* All duplicate attributes found are ignored */ duplicate = kset_find_obj(temp_kset, str_value); if (duplicate) { diff --git a/drivers/platform/x86/intel/plr_tpmi.c b/drivers/platform/x86/intel/plr_tpmi.c index 58132da4774570..05727169f49c14 100644 --- a/drivers/platform/x86/intel/plr_tpmi.c +++ b/drivers/platform/x86/intel/plr_tpmi.c @@ -316,7 +316,7 @@ static int intel_plr_probe(struct auxiliary_device *auxdev, const struct auxilia snprintf(name, sizeof(name), "domain%d", i); dentry = debugfs_create_dir(name, plr->dbgfs_dir); - debugfs_create_file("status", 0444, dentry, &plr->die_info[i], + debugfs_create_file("status", 0644, dentry, &plr->die_info[i], &plr_status_fops); } diff --git a/drivers/platform/x86/intel/telemetry/debugfs.c b/drivers/platform/x86/intel/telemetry/debugfs.c index 70e5736c44c71a..189c61ff7ff0c0 100644 --- a/drivers/platform/x86/intel/telemetry/debugfs.c +++ b/drivers/platform/x86/intel/telemetry/debugfs.c @@ -449,7 +449,7 @@ static int telem_pss_states_show(struct seq_file *s, void *unused) for (index = 0; index < debugfs_conf->pss_ltr_evts; index++) { seq_printf(s, "%-32s\t%u\n", debugfs_conf->pss_ltr_data[index].name, - pss_s0ix_wakeup[index]); + pss_ltr_blkd[index]); } seq_puts(s, "\n--------------------------------------\n"); @@ -459,7 +459,7 @@ static int telem_pss_states_show(struct seq_file *s, void *unused) for (index = 0; index < debugfs_conf->pss_wakeup_evts; index++) { seq_printf(s, "%-32s\t%u\n", debugfs_conf->pss_wakeup[index].name, - pss_ltr_blkd[index]); + pss_s0ix_wakeup[index]); } return 0; diff --git a/drivers/platform/x86/intel/telemetry/pltdrv.c b/drivers/platform/x86/intel/telemetry/pltdrv.c index f23c170a55dc67..d9aa349f81e412 100644 --- a/drivers/platform/x86/intel/telemetry/pltdrv.c +++ b/drivers/platform/x86/intel/telemetry/pltdrv.c @@ -610,7 +610,7 @@ static int telemetry_setup(struct platform_device *pdev) /* Get telemetry Info */ events = (read_buf & TELEM_INFO_SRAMEVTS_MASK) >> TELEM_INFO_SRAMEVTS_SHIFT; - event_regs = read_buf & TELEM_INFO_SRAMEVTS_MASK; + event_regs = read_buf & TELEM_INFO_NENABLES_MASK; if ((events < TELEM_MAX_EVENTS_SRAM) || (event_regs < TELEM_MAX_EVENTS_SRAM)) { dev_err(&pdev->dev, "PSS:Insufficient Space for SRAM Trace\n"); diff --git a/drivers/platform/x86/intel/vsec.c b/drivers/platform/x86/intel/vsec.c index ecfc7703f20196..012d87878afdd8 100644 --- a/drivers/platform/x86/intel/vsec.c +++ b/drivers/platform/x86/intel/vsec.c @@ -766,6 +766,7 @@ static const struct intel_vsec_platform_info lnl_info = { #define PCI_DEVICE_ID_INTEL_VSEC_LNL_M 0x647d #define PCI_DEVICE_ID_INTEL_VSEC_PTL 0xb07d #define PCI_DEVICE_ID_INTEL_VSEC_WCL 0xfd7d +#define PCI_DEVICE_ID_INTEL_VSEC_NVL 0xd70d static const struct pci_device_id intel_vsec_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, VSEC_ADL, &tgl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_DG1, &dg1_info) }, @@ -778,6 +779,7 @@ static const struct pci_device_id intel_vsec_pci_ids[] = { { PCI_DEVICE_DATA(INTEL, VSEC_LNL_M, &lnl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_PTL, &mtl_info) }, { PCI_DEVICE_DATA(INTEL, VSEC_WCL, &mtl_info) }, + { PCI_DEVICE_DATA(INTEL, VSEC_NVL, &mtl_info) }, { } }; MODULE_DEVICE_TABLE(pci, intel_vsec_pci_ids); diff --git a/drivers/platform/x86/lg-laptop.c b/drivers/platform/x86/lg-laptop.c index f92e89c75db98a..61ef7a218a80d6 100644 --- a/drivers/platform/x86/lg-laptop.c +++ b/drivers/platform/x86/lg-laptop.c @@ -838,8 +838,17 @@ static int acpi_add(struct acpi_device *device) case 'P': year = 2021; break; - default: + case 'Q': year = 2022; + break; + case 'R': + year = 2023; + break; + case 'S': + year = 2024; + break; + default: + year = 2025; } break; default: diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c index 255317e6fec881..937f1a5b78edfb 100644 --- a/drivers/platform/x86/panasonic-laptop.c +++ b/drivers/platform/x86/panasonic-laptop.c @@ -1089,7 +1089,7 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) PLATFORM_DEVID_NONE, NULL, 0); if (IS_ERR(pcc->platform)) { result = PTR_ERR(pcc->platform); - goto out_backlight; + goto out_sysfs; } result = device_create_file(&pcc->platform->dev, &dev_attr_cdpower); @@ -1105,6 +1105,8 @@ static int acpi_pcc_hotkey_add(struct acpi_device *device) out_platform: platform_device_unregister(pcc->platform); +out_sysfs: + sysfs_remove_group(&device->dev.kobj, &pcc_attr_group); out_backlight: backlight_device_unregister(pcc->backlight); out_input: diff --git a/drivers/platform/x86/toshiba_haps.c b/drivers/platform/x86/toshiba_haps.c index 03dfddeee0c0af..e9324bf16aea4b 100644 --- a/drivers/platform/x86/toshiba_haps.c +++ b/drivers/platform/x86/toshiba_haps.c @@ -183,7 +183,7 @@ static int toshiba_haps_add(struct acpi_device *acpi_dev) pr_info("Toshiba HDD Active Protection Sensor device\n"); - haps = kzalloc(sizeof(struct toshiba_haps_dev), GFP_KERNEL); + haps = devm_kzalloc(&acpi_dev->dev, sizeof(*haps), GFP_KERNEL); if (!haps) return -ENOMEM; diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c index 4e899ec1477d46..b1cba986f0fbd3 100644 --- a/drivers/scsi/be2iscsi/be_mgmt.c +++ b/drivers/scsi/be2iscsi/be_mgmt.c @@ -1025,6 +1025,7 @@ unsigned int beiscsi_boot_get_sinfo(struct beiscsi_hba *phba) &nonemb_cmd->dma, GFP_KERNEL); if (!nonemb_cmd->va) { + free_mcc_wrb(ctrl, tag); mutex_unlock(&ctrl->mbox_lock); return 0; } diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 16a44c0917e18b..e939bc88e15197 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -4489,7 +4489,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len, fail_elsrej: dma_pool_destroy(ha->purex_dma_pool); fail_flt: - dma_free_coherent(&ha->pdev->dev, SFP_DEV_SIZE, + dma_free_coherent(&ha->pdev->dev, sizeof(struct qla_flt_header) + FLT_REGIONS_SIZE, ha->flt, ha->flt_dma); fail_flt_buffer: diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index fef840b5457407..c18a0c946f7627 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c @@ -396,7 +396,7 @@ EXPORT_SYMBOL_GPL(qcom_smem_bust_hwspin_lock_by_host); */ bool qcom_smem_is_available(void) { - return !!__smem; + return !IS_ERR(__smem); } EXPORT_SYMBOL_GPL(qcom_smem_is_available); @@ -1247,7 +1247,8 @@ static void qcom_smem_remove(struct platform_device *pdev) { platform_device_unregister(__smem->socinfo); - __smem = NULL; + /* Set to -EPROBE_DEFER to signal unprobed state */ + __smem = ERR_PTR(-EPROBE_DEFER); } static const struct of_device_id qcom_smem_of_match[] = { diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c index 9f167ff8da7b07..09120a538a401c 100644 --- a/drivers/target/sbp/sbp_target.c +++ b/drivers/target/sbp/sbp_target.c @@ -1960,12 +1960,12 @@ static struct se_portal_group *sbp_make_tpg(struct se_wwn *wwn, container_of(wwn, struct sbp_tport, tport_wwn); struct sbp_tpg *tpg; - unsigned long tpgt; + u16 tpgt; int ret; if (strstr(name, "tpgt_") != name) return ERR_PTR(-EINVAL); - if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX) + if (kstrtou16(name + 5, 10, &tpgt)) return ERR_PTR(-EINVAL); if (tport->tpg) { diff --git a/drivers/ufs/host/ufs-amd-versal2.c b/drivers/ufs/host/ufs-amd-versal2.c index 40543db621a136..6c454ae8a9c83a 100644 --- a/drivers/ufs/host/ufs-amd-versal2.c +++ b/drivers/ufs/host/ufs-amd-versal2.c @@ -367,7 +367,7 @@ static int ufs_versal2_hce_enable_notify(struct ufs_hba *hba, { int ret = 0; - if (status == PRE_CHANGE) { + if (status == POST_CHANGE) { ret = ufs_versal2_phy_init(hba); if (ret) dev_err(hba->dev, "Phy init failed (%d)\n", ret); diff --git a/drivers/vfio/pci/vfio_pci_dmabuf.c b/drivers/vfio/pci/vfio_pci_dmabuf.c index d4d0f7d08c53e2..4be4a85005cbcb 100644 --- a/drivers/vfio/pci/vfio_pci_dmabuf.c +++ b/drivers/vfio/pci/vfio_pci_dmabuf.c @@ -20,6 +20,16 @@ struct vfio_pci_dma_buf { u8 revoked : 1; }; +static int vfio_pci_dma_buf_pin(struct dma_buf_attachment *attachment) +{ + return -EOPNOTSUPP; +} + +static void vfio_pci_dma_buf_unpin(struct dma_buf_attachment *attachment) +{ + /* Do nothing */ +} + static int vfio_pci_dma_buf_attach(struct dma_buf *dmabuf, struct dma_buf_attachment *attachment) { @@ -76,6 +86,8 @@ static void vfio_pci_dma_buf_release(struct dma_buf *dmabuf) } static const struct dma_buf_ops vfio_pci_dmabuf_ops = { + .pin = vfio_pci_dma_buf_pin, + .unpin = vfio_pci_dma_buf_unpin, .attach = vfio_pci_dma_buf_attach, .map_dma_buf = vfio_pci_dma_buf_map, .unmap_dma_buf = vfio_pci_dma_buf_unmap, diff --git a/drivers/virt/coco/tsm-core.c b/drivers/virt/coco/tsm-core.c index f027876a2f1987..8712df8596a183 100644 --- a/drivers/virt/coco/tsm-core.c +++ b/drivers/virt/coco/tsm-core.c @@ -4,16 +4,12 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include -#include -#include #include #include #include #include -#include static struct class *tsm_class; -static DECLARE_RWSEM(tsm_rwsem); static DEFINE_IDA(tsm_ida); static int match_id(struct device *dev, const void *data) @@ -108,32 +104,6 @@ void tsm_unregister(struct tsm_dev *tsm_dev) } EXPORT_SYMBOL_GPL(tsm_unregister); -/* must be invoked between tsm_register / tsm_unregister */ -int tsm_ide_stream_register(struct pci_ide *ide) -{ - struct pci_dev *pdev = ide->pdev; - struct pci_tsm *tsm = pdev->tsm; - struct tsm_dev *tsm_dev = tsm->tsm_dev; - int rc; - - rc = sysfs_create_link(&tsm_dev->dev.kobj, &pdev->dev.kobj, ide->name); - if (rc) - return rc; - - ide->tsm_dev = tsm_dev; - return 0; -} -EXPORT_SYMBOL_GPL(tsm_ide_stream_register); - -void tsm_ide_stream_unregister(struct pci_ide *ide) -{ - struct tsm_dev *tsm_dev = ide->tsm_dev; - - ide->tsm_dev = NULL; - sysfs_remove_link(&tsm_dev->dev.kobj, ide->name); -} -EXPORT_SYMBOL_GPL(tsm_ide_stream_unregister); - static void tsm_release(struct device *dev) { struct tsm_dev *tsm_dev = container_of(dev, typeof(*tsm_dev), dev); diff --git a/fs/9p/vfs_dir.c b/fs/9p/vfs_dir.c index e0d34e4e9076e3..af7f72abbb76aa 100644 --- a/fs/9p/vfs_dir.c +++ b/fs/9p/vfs_dir.c @@ -242,6 +242,7 @@ const struct file_operations v9fs_dir_operations = { .iterate_shared = v9fs_dir_readdir, .open = v9fs_file_open, .release = v9fs_dir_release, + .setlease = simple_nosetlease, }; const struct file_operations v9fs_dir_operations_dotl = { @@ -251,4 +252,5 @@ const struct file_operations v9fs_dir_operations_dotl = { .open = v9fs_file_open, .release = v9fs_dir_release, .fsync = v9fs_file_fsync_dotl, + .setlease = simple_nosetlease, }; diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 89022e9f393b0a..2833b44f4b4f22 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -498,28 +498,6 @@ static int btree_migrate_folio(struct address_space *mapping, #define btree_migrate_folio NULL #endif -static int btree_writepages(struct address_space *mapping, - struct writeback_control *wbc) -{ - int ret; - - if (wbc->sync_mode == WB_SYNC_NONE) { - struct btrfs_fs_info *fs_info; - - if (wbc->for_kupdate) - return 0; - - fs_info = inode_to_fs_info(mapping->host); - /* this is a bit racy, but that's ok */ - ret = __percpu_counter_compare(&fs_info->dirty_metadata_bytes, - BTRFS_DIRTY_METADATA_THRESH, - fs_info->dirty_metadata_batch); - if (ret < 0) - return 0; - } - return btree_write_cache_pages(mapping, wbc); -} - static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags) { if (folio_test_writeback(folio) || folio_test_dirty(folio)) diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index a4b74023618d30..f6cca3c97166f7 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c @@ -2286,8 +2286,7 @@ void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start, } } -int btree_write_cache_pages(struct address_space *mapping, - struct writeback_control *wbc) +int btree_writepages(struct address_space *mapping, struct writeback_control *wbc) { struct btrfs_eb_write_context ctx = { .wbc = wbc }; struct btrfs_fs_info *fs_info = inode_to_fs_info(mapping->host); diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index 02ebb2f238afc1..73571d5d3d5ad9 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h @@ -237,8 +237,7 @@ void extent_write_locked_range(struct inode *inode, const struct folio *locked_f u64 start, u64 end, struct writeback_control *wbc, bool pages_dirty); int btrfs_writepages(struct address_space *mapping, struct writeback_control *wbc); -int btree_write_cache_pages(struct address_space *mapping, - struct writeback_control *wbc); +int btree_writepages(struct address_space *mapping, struct writeback_control *wbc); void btrfs_btree_wait_writeback_range(struct btrfs_fs_info *fs_info, u64 start, u64 end); void btrfs_readahead(struct readahead_control *rac); int set_folio_extent_mapped(struct folio *folio); diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c index f38d8305e46d7a..baadaaa189c05d 100644 --- a/fs/btrfs/raid56.c +++ b/fs/btrfs/raid56.c @@ -150,6 +150,7 @@ static void scrub_rbio_work_locked(struct work_struct *work); static void free_raid_bio_pointers(struct btrfs_raid_bio *rbio) { bitmap_free(rbio->error_bitmap); + bitmap_free(rbio->stripe_uptodate_bitmap); kfree(rbio->stripe_pages); kfree(rbio->bio_paddrs); kfree(rbio->stripe_paddrs); diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c index 6caba8be7c845c..10ed48d4a84665 100644 --- a/fs/btrfs/zlib.c +++ b/fs/btrfs/zlib.c @@ -139,6 +139,7 @@ static int copy_data_into_buffer(struct address_space *mapping, data_in = kmap_local_folio(folio, offset); memcpy(workspace->buf + cur - filepos, data_in, copy_length); kunmap_local(data_in); + folio_put(folio); cur += copy_length; } return 0; diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c index 86d7aa594ea993..804588524cd570 100644 --- a/fs/ceph/dir.c +++ b/fs/ceph/dir.c @@ -2214,6 +2214,7 @@ const struct file_operations ceph_dir_fops = { .fsync = ceph_fsync, .lock = ceph_lock, .flock = ceph_flock, + .setlease = simple_nosetlease, }; const struct file_operations ceph_snapdir_fops = { @@ -2221,6 +2222,7 @@ const struct file_operations ceph_snapdir_fops = { .llseek = ceph_dir_llseek, .open = ceph_open, .release = ceph_release, + .setlease = simple_nosetlease, }; const struct inode_operations ceph_dir_iops = { diff --git a/fs/dcache.c b/fs/dcache.c index dc2fff4811d153..66dd1bb830d1a8 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1104,6 +1104,16 @@ struct dentry *d_find_alias_rcu(struct inode *inode) return de; } +/** + * d_dispose_if_unused - move unreferenced dentries to shrink list + * @dentry: dentry in question + * @dispose: head of shrink list + * + * If dentry has no external references, move it to shrink list. + * + * NOTE!!! The caller is responsible for preventing eviction of the dentry by + * holding dentry->d_inode->i_lock or equivalent. + */ void d_dispose_if_unused(struct dentry *dentry, struct list_head *dispose) { spin_lock(&dentry->d_lock); diff --git a/fs/efivarfs/vars.c b/fs/efivarfs/vars.c index 6edc10958ecf55..70e13db260dba0 100644 --- a/fs/efivarfs/vars.c +++ b/fs/efivarfs/vars.c @@ -552,7 +552,7 @@ int efivar_entry_get(struct efivar_entry *entry, u32 *attributes, err = __efivar_entry_get(entry, attributes, size, data); efivar_unlock(); - return 0; + return err; } /** diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c index baa2f214114661..5444fc706ac7d6 100644 --- a/fs/fs-writeback.c +++ b/fs/fs-writeback.c @@ -2492,7 +2492,9 @@ static void wakeup_dirtytime_writeback(struct work_struct *w) wb_wakeup(wb); } rcu_read_unlock(); - schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); + if (dirtytime_expire_interval) + schedule_delayed_work(&dirtytime_work, + round_jiffies_relative(dirtytime_expire_interval * HZ)); } static int dirtytime_interval_handler(const struct ctl_table *table, int write, @@ -2501,8 +2503,12 @@ static int dirtytime_interval_handler(const struct ctl_table *table, int write, int ret; ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); - if (ret == 0 && write) - mod_delayed_work(system_percpu_wq, &dirtytime_work, 0); + if (ret == 0 && write) { + if (dirtytime_expire_interval) + mod_delayed_work(system_percpu_wq, &dirtytime_work, 0); + else + cancel_delayed_work_sync(&dirtytime_work); + } return ret; } @@ -2519,7 +2525,9 @@ static const struct ctl_table vm_fs_writeback_table[] = { static int __init start_dirtytime_writeback(void) { - schedule_delayed_work(&dirtytime_work, dirtytime_expire_interval * HZ); + if (dirtytime_expire_interval) + schedule_delayed_work(&dirtytime_work, + round_jiffies_relative(dirtytime_expire_interval * HZ)); register_sysctl_init("vm", vm_fs_writeback_table); return 0; } diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 4b6b3d2758ff22..3927cb069236e9 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -32,9 +32,9 @@ struct dentry_bucket { spinlock_t lock; }; -#define HASH_BITS 5 -#define HASH_SIZE (1 << HASH_BITS) -static struct dentry_bucket dentry_hash[HASH_SIZE]; +#define FUSE_HASH_BITS 5 +#define FUSE_HASH_SIZE (1 << FUSE_HASH_BITS) +static struct dentry_bucket dentry_hash[FUSE_HASH_SIZE]; struct delayed_work dentry_tree_work; /* Minimum invalidation work queue frequency */ @@ -83,7 +83,7 @@ MODULE_PARM_DESC(inval_wq, static inline struct dentry_bucket *get_dentry_bucket(struct dentry *dentry) { - int i = hash_ptr(dentry, HASH_BITS); + int i = hash_ptr(dentry, FUSE_HASH_BITS); return &dentry_hash[i]; } @@ -164,25 +164,31 @@ static void fuse_dentry_tree_work(struct work_struct *work) struct rb_node *node; int i; - for (i = 0; i < HASH_SIZE; i++) { + for (i = 0; i < FUSE_HASH_SIZE; i++) { spin_lock(&dentry_hash[i].lock); node = rb_first(&dentry_hash[i].tree); while (node) { fd = rb_entry(node, struct fuse_dentry, node); - if (time_after64(get_jiffies_64(), fd->time)) { - rb_erase(&fd->node, &dentry_hash[i].tree); - RB_CLEAR_NODE(&fd->node); + if (!time_before64(fd->time, get_jiffies_64())) + break; + + rb_erase(&fd->node, &dentry_hash[i].tree); + RB_CLEAR_NODE(&fd->node); + spin_lock(&fd->dentry->d_lock); + /* If dentry is still referenced, let next dput release it */ + fd->dentry->d_flags |= DCACHE_OP_DELETE; + spin_unlock(&fd->dentry->d_lock); + d_dispose_if_unused(fd->dentry, &dispose); + if (need_resched()) { spin_unlock(&dentry_hash[i].lock); - d_dispose_if_unused(fd->dentry, &dispose); cond_resched(); spin_lock(&dentry_hash[i].lock); - } else - break; + } node = rb_first(&dentry_hash[i].tree); } spin_unlock(&dentry_hash[i].lock); - shrink_dentry_list(&dispose); } + shrink_dentry_list(&dispose); if (inval_wq) schedule_delayed_work(&dentry_tree_work, @@ -213,7 +219,7 @@ void fuse_dentry_tree_init(void) { int i; - for (i = 0; i < HASH_SIZE; i++) { + for (i = 0; i < FUSE_HASH_SIZE; i++) { spin_lock_init(&dentry_hash[i].lock); dentry_hash[i].tree = RB_ROOT; } @@ -227,7 +233,7 @@ void fuse_dentry_tree_cleanup(void) inval_wq = 0; cancel_delayed_work_sync(&dentry_tree_work); - for (i = 0; i < HASH_SIZE; i++) + for (i = 0; i < FUSE_HASH_SIZE; i++) WARN_ON_ONCE(!RB_EMPTY_ROOT(&dentry_hash[i].tree)); } @@ -479,18 +485,12 @@ static int fuse_dentry_init(struct dentry *dentry) return 0; } -static void fuse_dentry_prune(struct dentry *dentry) +static void fuse_dentry_release(struct dentry *dentry) { struct fuse_dentry *fd = dentry->d_fsdata; if (!RB_EMPTY_NODE(&fd->node)) fuse_dentry_tree_del_node(dentry); -} - -static void fuse_dentry_release(struct dentry *dentry) -{ - struct fuse_dentry *fd = dentry->d_fsdata; - kfree_rcu(fd, rcu); } @@ -527,7 +527,6 @@ const struct dentry_operations fuse_dentry_operations = { .d_revalidate = fuse_dentry_revalidate, .d_delete = fuse_dentry_delete, .d_init = fuse_dentry_init, - .d_prune = fuse_dentry_prune, .d_release = fuse_dentry_release, .d_automount = fuse_dentry_automount, }; @@ -1584,8 +1583,8 @@ int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid, { int err = -ENOTDIR; struct inode *parent; - struct dentry *dir; - struct dentry *entry; + struct dentry *dir = NULL; + struct dentry *entry = NULL; parent = fuse_ilookup(fc, parent_nodeid, NULL); if (!parent) @@ -1598,11 +1597,19 @@ int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid, dir = d_find_alias(parent); if (!dir) goto put_parent; - - entry = start_removing_noperm(dir, name); - dput(dir); - if (IS_ERR(entry)) - goto put_parent; + while (!entry) { + struct dentry *child = try_lookup_noperm(name, dir); + if (!child || IS_ERR(child)) + goto put_parent; + entry = start_removing_dentry(dir, child); + dput(child); + if (IS_ERR(entry)) + goto put_parent; + if (!d_same_name(entry, dir, name)) { + end_removing(entry); + entry = NULL; + } + } fuse_dir_changed(parent); if (!(flags & FUSE_EXPIRE_ONLY)) @@ -1640,6 +1647,7 @@ int fuse_reverse_inval_entry(struct fuse_conn *fc, u64 parent_nodeid, end_removing(entry); put_parent: + dput(dir); iput(parent); return err; } diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c index b2d23c98c99655..86376f0dbf3a55 100644 --- a/fs/gfs2/file.c +++ b/fs/gfs2/file.c @@ -1608,6 +1608,7 @@ const struct file_operations gfs2_dir_fops = { .lock = gfs2_lock, .flock = gfs2_flock, .llseek = default_llseek, + .setlease = simple_nosetlease, .fop_flags = FOP_ASYNC_LOCK, }; diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index fd9a2cf9562024..6beb876658c09b 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -851,6 +851,7 @@ static struct folio *__iomap_get_folio(struct iomap_iter *iter, } folio_get(folio); + folio_wait_stable(folio); return folio; } diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index 8f9ea79b788230..e3654f4a1b9aab 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -66,6 +66,7 @@ const struct file_operations nfs_dir_operations = { .open = nfs_opendir, .release = nfs_closedir, .fsync = nfs_fsync_dir, + .setlease = simple_nosetlease, }; const struct address_space_operations nfs_dir_aops = { diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c index 7317f26892c578..7f43e890d3564a 100644 --- a/fs/nfs/nfs4file.c +++ b/fs/nfs/nfs4file.c @@ -431,8 +431,6 @@ void nfs42_ssc_unregister_ops(void) static int nfs4_setlease(struct file *file, int arg, struct file_lease **lease, void **priv) { - if (!S_ISREG(file_inode(file)->i_mode)) - return -EINVAL; return nfs4_proc_setlease(file, arg, lease, priv); } diff --git a/fs/readdir.c b/fs/readdir.c index 7764b863897888..73707b6816e9aa 100644 --- a/fs/readdir.c +++ b/fs/readdir.c @@ -316,6 +316,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd, struct getdents_callback buf = { .ctx.actor = filldir, .ctx.count = count, + .ctx.dt_flags_mask = FILLDIR_FLAG_NOINTR, .current_dir = dirent }; int error; @@ -400,6 +401,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd, struct getdents_callback64 buf = { .ctx.actor = filldir64, .ctx.count = count, + .ctx.dt_flags_mask = FILLDIR_FLAG_NOINTR, .current_dir = dirent }; int error; @@ -569,6 +571,7 @@ COMPAT_SYSCALL_DEFINE3(getdents, unsigned int, fd, struct compat_getdents_callback buf = { .ctx.actor = compat_filldir, .ctx.count = count, + .ctx.dt_flags_mask = FILLDIR_FLAG_NOINTR, .current_dir = dirent, }; int error; diff --git a/fs/romfs/super.c b/fs/romfs/super.c index 360b008541154e..ac55193bf39889 100644 --- a/fs/romfs/super.c +++ b/fs/romfs/super.c @@ -458,7 +458,10 @@ static int romfs_fill_super(struct super_block *sb, struct fs_context *fc) #ifdef CONFIG_BLOCK if (!sb->s_mtd) { - sb_set_blocksize(sb, ROMBSIZE); + if (!sb_set_blocksize(sb, ROMBSIZE)) { + errorf(fc, "romfs: unable to set blocksize\n"); + return -EINVAL; + } } else { sb->s_blocksize = ROMBSIZE; sb->s_blocksize_bits = blksize_bits(ROMBSIZE); diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c index d9664634144d3e..a3dc7cb1ab541d 100644 --- a/fs/smb/client/cifsfs.c +++ b/fs/smb/client/cifsfs.c @@ -1149,9 +1149,6 @@ cifs_setlease(struct file *file, int arg, struct file_lease **lease, void **priv struct inode *inode = file_inode(file); struct cifsFileInfo *cfile = file->private_data; - if (!S_ISREG(inode->i_mode)) - return -EINVAL; - /* Check if file is oplocked if this is request for new lease */ if (arg == F_UNLCK || ((arg == F_RDLCK) && CIFS_CACHE_READ(CIFS_I(inode))) || @@ -1712,6 +1709,7 @@ const struct file_operations cifs_dir_ops = { .remap_file_range = cifs_remap_file_range, .llseek = generic_file_llseek, .fsync = cifs_dir_fsync, + .setlease = simple_nosetlease, }; static void diff --git a/fs/smb/client/cifstransport.c b/fs/smb/client/cifstransport.c index 28d1cee9062505..98287132626e34 100644 --- a/fs/smb/client/cifstransport.c +++ b/fs/smb/client/cifstransport.c @@ -251,13 +251,15 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, rc = cifs_send_recv(xid, ses, ses->server, &rqst, &resp_buf_type, flags, &resp_iov); if (rc < 0) - return rc; + goto out; if (out_buf) { *pbytes_returned = resp_iov.iov_len; if (resp_iov.iov_len) memcpy(out_buf, resp_iov.iov_base, resp_iov.iov_len); } + +out: free_rsp_buf(resp_buf_type, resp_iov.iov_base); return rc; } diff --git a/fs/smb/client/smb2file.c b/fs/smb/client/smb2file.c index 7f11ae6bb785dd..2dd08388ea8733 100644 --- a/fs/smb/client/smb2file.c +++ b/fs/smb/client/smb2file.c @@ -178,6 +178,7 @@ int smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms, rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov, &err_buftype); if (rc == -EACCES && retry_without_read_attributes) { + free_rsp_buf(err_buftype, err_iov.iov_base); oparms->desired_access &= ~FILE_READ_ATTRIBUTES; rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL, &err_iov, &err_buftype); diff --git a/fs/vboxsf/dir.c b/fs/vboxsf/dir.c index 42bedc4ec7af77..230d7589d15cc9 100644 --- a/fs/vboxsf/dir.c +++ b/fs/vboxsf/dir.c @@ -186,6 +186,7 @@ const struct file_operations vboxsf_dir_fops = { .release = vboxsf_dir_release, .read = generic_read_dir, .llseek = generic_file_llseek, + .setlease = simple_nosetlease, }; /* diff --git a/include/linux/cma.h b/include/linux/cma.h index 62d9c1cf632652..2e6931735880bd 100644 --- a/include/linux/cma.h +++ b/include/linux/cma.h @@ -57,6 +57,15 @@ extern bool cma_intersects(struct cma *cma, unsigned long start, unsigned long e extern void cma_reserve_pages_on_error(struct cma *cma); +#ifdef CONFIG_DMA_CMA +extern bool cma_skip_dt_default_reserved_mem(void); +#else +static inline bool cma_skip_dt_default_reserved_mem(void) +{ + return false; +} +#endif + #ifdef CONFIG_CMA struct folio *cma_alloc_folio(struct cma *cma, int order, gfp_t gfp); bool cma_free_folio(struct cma *cma, const struct folio *folio); diff --git a/include/linux/fs.h b/include/linux/fs.h index f5c9cf28c4dcf9..a01621fa636a60 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1855,6 +1855,8 @@ struct dir_context { * INT_MAX unlimited */ int count; + /* @actor supports these flags in d_type high bits */ + unsigned int dt_flags_mask; }; /* If OR-ed with d_type, pending signals are not checked */ @@ -3524,7 +3526,9 @@ static inline bool dir_emit(struct dir_context *ctx, const char *name, int namelen, u64 ino, unsigned type) { - return ctx->actor(ctx, name, namelen, ctx->pos, ino, type); + unsigned int dt_mask = S_DT_MASK | ctx->dt_flags_mask; + + return ctx->actor(ctx, name, namelen, ctx->pos, ino, type & dt_mask); } static inline bool dir_emit_dot(struct file *file, struct dir_context *ctx) { diff --git a/include/linux/kasan.h b/include/linux/kasan.h index 9c6ac4b62eb995..338a1921a50ad4 100644 --- a/include/linux/kasan.h +++ b/include/linux/kasan.h @@ -641,6 +641,17 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, __kasan_unpoison_vmap_areas(vms, nr_vms, flags); } +void __kasan_vrealloc(const void *start, unsigned long old_size, + unsigned long new_size); + +static __always_inline void kasan_vrealloc(const void *start, + unsigned long old_size, + unsigned long new_size) +{ + if (kasan_enabled()) + __kasan_vrealloc(start, old_size, new_size); +} + #else /* CONFIG_KASAN_VMALLOC */ static inline void kasan_populate_early_vm_area_shadow(void *start, @@ -670,6 +681,9 @@ kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, kasan_vmalloc_flags_t flags) { } +static inline void kasan_vrealloc(const void *start, unsigned long old_size, + unsigned long new_size) { } + #endif /* CONFIG_KASAN_VMALLOC */ #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \ diff --git a/include/linux/memfd.h b/include/linux/memfd.h index cc74de3dbcfe93..c328a7b356d0e5 100644 --- a/include/linux/memfd.h +++ b/include/linux/memfd.h @@ -17,6 +17,7 @@ struct folio *memfd_alloc_folio(struct file *memfd, pgoff_t idx); * to by vm_flags_ptr. */ int memfd_check_seals_mmap(struct file *file, vm_flags_t *vm_flags_ptr); +struct file *memfd_alloc_file(const char *name, unsigned int flags); #else static inline long memfd_fcntl(struct file *f, unsigned int c, unsigned int a) { @@ -31,6 +32,11 @@ static inline int memfd_check_seals_mmap(struct file *file, { return 0; } + +static inline struct file *memfd_alloc_file(const char *name, unsigned int flags) +{ + return ERR_PTR(-EINVAL); +} #endif #endif /* __LINUX_MEMFD_H */ diff --git a/include/linux/memremap.h b/include/linux/memremap.h index 713ec0435b48ad..e3c2ccf872a8af 100644 --- a/include/linux/memremap.h +++ b/include/linux/memremap.h @@ -224,7 +224,8 @@ static inline bool is_fsdax_page(const struct page *page) } #ifdef CONFIG_ZONE_DEVICE -void zone_device_page_init(struct page *page, unsigned int order); +void zone_device_page_init(struct page *page, struct dev_pagemap *pgmap, + unsigned int order); void *memremap_pages(struct dev_pagemap *pgmap, int nid); void memunmap_pages(struct dev_pagemap *pgmap); void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap); @@ -234,9 +235,11 @@ bool pgmap_pfn_valid(struct dev_pagemap *pgmap, unsigned long pfn); unsigned long memremap_compat_align(void); -static inline void zone_device_folio_init(struct folio *folio, unsigned int order) +static inline void zone_device_folio_init(struct folio *folio, + struct dev_pagemap *pgmap, + unsigned int order) { - zone_device_page_init(&folio->page, order); + zone_device_page_init(&folio->page, pgmap, order); if (order) folio_set_large_rmappable(folio); } diff --git a/include/linux/pci-ide.h b/include/linux/pci-ide.h index 37a1ad9501b05d..ae07d9f699c001 100644 --- a/include/linux/pci-ide.h +++ b/include/linux/pci-ide.h @@ -26,7 +26,7 @@ enum pci_ide_partner_select { /** * struct pci_ide_partner - Per port pair Selective IDE Stream settings * @rid_start: Partner Port Requester ID range start - * @rid_end: Partner Port Requester ID range end + * @rid_end: Partner Port Requester ID range end (inclusive) * @stream_index: Selective IDE Stream Register Block selection * @mem_assoc: PCI bus memory address association for targeting peer partner * @pref_assoc: PCI bus prefetchable memory address association for @@ -82,7 +82,6 @@ struct pci_ide_regs { * @host_bridge_stream: allocated from host bridge @ide_stream_ida pool * @stream_id: unique Stream ID (within Partner Port pairing) * @name: name of the established Selective IDE Stream in sysfs - * @tsm_dev: For TSM established IDE, the TSM device context * * Negative @stream_id values indicate "uninitialized" on the * expectation that with TSM established IDE the TSM owns the stream_id @@ -94,7 +93,6 @@ struct pci_ide { u8 host_bridge_stream; int stream_id; const char *name; - struct tsm_dev *tsm_dev; }; /* diff --git a/include/linux/sched.h b/include/linux/sched.h index da0133524d08c2..5f00b5ed0f3b7d 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1776,6 +1776,11 @@ static __always_inline bool is_percpu_thread(void) (current->nr_cpus_allowed == 1); } +static __always_inline bool is_user_task(struct task_struct *task) +{ + return task->mm && !(task->flags & (PF_KTHREAD | PF_USER_WORKER)); +} + /* Per-process atomic flags. */ #define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */ #define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */ diff --git a/include/linux/tsm.h b/include/linux/tsm.h index a3b7ab668effff..22e05b2aac69cf 100644 --- a/include/linux/tsm.h +++ b/include/linux/tsm.h @@ -123,7 +123,4 @@ int tsm_report_unregister(const struct tsm_report_ops *ops); struct tsm_dev *tsm_register(struct device *parent, struct pci_tsm_ops *ops); void tsm_unregister(struct tsm_dev *tsm_dev); struct tsm_dev *find_tsm_dev(int id); -struct pci_ide; -int tsm_ide_stream_register(struct pci_ide *ide); -void tsm_ide_stream_unregister(struct pci_ide *ide); #endif /* __TSM_H */ diff --git a/include/net/bonding.h b/include/net/bonding.h index 49edc7da05867f..46207840355709 100644 --- a/include/net/bonding.h +++ b/include/net/bonding.h @@ -521,13 +521,14 @@ static inline int bond_is_ip6_target_ok(struct in6_addr *addr) static inline unsigned long slave_oldest_target_arp_rx(struct bonding *bond, struct slave *slave) { + unsigned long tmp, ret = READ_ONCE(slave->target_last_arp_rx[0]); int i = 1; - unsigned long ret = slave->target_last_arp_rx[0]; - - for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++) - if (time_before(slave->target_last_arp_rx[i], ret)) - ret = slave->target_last_arp_rx[i]; + for (; (i < BOND_MAX_ARP_TARGETS) && bond->params.arp_targets[i]; i++) { + tmp = READ_ONCE(slave->target_last_arp_rx[i]); + if (time_before(tmp, ret)) + ret = tmp; + } return ret; } @@ -537,7 +538,7 @@ static inline unsigned long slave_last_rx(struct bonding *bond, if (bond->params.arp_all_targets == BOND_ARP_TARGETS_ALL) return slave_oldest_target_arp_rx(bond, slave); - return slave->last_rx; + return READ_ONCE(slave->last_rx); } static inline void slave_update_last_tx(struct slave *slave) diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h index 127e6c7d910dcb..c54df042db6be2 100644 --- a/include/net/nfc/nfc.h +++ b/include/net/nfc/nfc.h @@ -219,6 +219,8 @@ static inline void nfc_free_device(struct nfc_dev *dev) int nfc_register_device(struct nfc_dev *dev); +void nfc_unregister_rfkill(struct nfc_dev *dev); +void nfc_remove_device(struct nfc_dev *dev); void nfc_unregister_device(struct nfc_dev *dev); /** diff --git a/kernel/cgroup/dmem.c b/kernel/cgroup/dmem.c index e12b946278b6c6..1ea6afffa985c4 100644 --- a/kernel/cgroup/dmem.c +++ b/kernel/cgroup/dmem.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include @@ -71,7 +72,9 @@ struct dmem_cgroup_pool_state { struct rcu_head rcu; struct page_counter cnt; + struct dmem_cgroup_pool_state *parent; + refcount_t ref; bool inited; }; @@ -88,6 +91,9 @@ struct dmem_cgroup_pool_state { static DEFINE_SPINLOCK(dmemcg_lock); static LIST_HEAD(dmem_cgroup_regions); +static void dmemcg_free_region(struct kref *ref); +static void dmemcg_pool_free_rcu(struct rcu_head *rcu); + static inline struct dmemcg_state * css_to_dmemcs(struct cgroup_subsys_state *css) { @@ -104,10 +110,38 @@ static struct dmemcg_state *parent_dmemcs(struct dmemcg_state *cg) return cg->css.parent ? css_to_dmemcs(cg->css.parent) : NULL; } +static void dmemcg_pool_get(struct dmem_cgroup_pool_state *pool) +{ + refcount_inc(&pool->ref); +} + +static bool dmemcg_pool_tryget(struct dmem_cgroup_pool_state *pool) +{ + return refcount_inc_not_zero(&pool->ref); +} + +static void dmemcg_pool_put(struct dmem_cgroup_pool_state *pool) +{ + if (!refcount_dec_and_test(&pool->ref)) + return; + + call_rcu(&pool->rcu, dmemcg_pool_free_rcu); +} + +static void dmemcg_pool_free_rcu(struct rcu_head *rcu) +{ + struct dmem_cgroup_pool_state *pool = container_of(rcu, typeof(*pool), rcu); + + if (pool->parent) + dmemcg_pool_put(pool->parent); + kref_put(&pool->region->ref, dmemcg_free_region); + kfree(pool); +} + static void free_cg_pool(struct dmem_cgroup_pool_state *pool) { list_del(&pool->region_node); - kfree(pool); + dmemcg_pool_put(pool); } static void @@ -342,6 +376,12 @@ alloc_pool_single(struct dmemcg_state *dmemcs, struct dmem_cgroup_region *region page_counter_init(&pool->cnt, ppool ? &ppool->cnt : NULL, true); reset_all_resource_limits(pool); + refcount_set(&pool->ref, 1); + kref_get(®ion->ref); + if (ppool && !pool->parent) { + pool->parent = ppool; + dmemcg_pool_get(ppool); + } list_add_tail_rcu(&pool->css_node, &dmemcs->pools); list_add_tail(&pool->region_node, ®ion->pools); @@ -389,6 +429,10 @@ get_cg_pool_locked(struct dmemcg_state *dmemcs, struct dmem_cgroup_region *regio /* Fix up parent links, mark as inited. */ pool->cnt.parent = &ppool->cnt; + if (ppool && !pool->parent) { + pool->parent = ppool; + dmemcg_pool_get(ppool); + } pool->inited = true; pool = ppool; @@ -423,7 +467,7 @@ static void dmemcg_free_region(struct kref *ref) */ void dmem_cgroup_unregister_region(struct dmem_cgroup_region *region) { - struct list_head *entry; + struct dmem_cgroup_pool_state *pool, *next; if (!region) return; @@ -433,11 +477,10 @@ void dmem_cgroup_unregister_region(struct dmem_cgroup_region *region) /* Remove from global region list */ list_del_rcu(®ion->region_node); - list_for_each_rcu(entry, ®ion->pools) { - struct dmem_cgroup_pool_state *pool = - container_of(entry, typeof(*pool), region_node); - + list_for_each_entry_safe(pool, next, ®ion->pools, region_node) { list_del_rcu(&pool->css_node); + list_del(&pool->region_node); + dmemcg_pool_put(pool); } /* @@ -518,8 +561,10 @@ static struct dmem_cgroup_region *dmemcg_get_region_by_name(const char *name) */ void dmem_cgroup_pool_state_put(struct dmem_cgroup_pool_state *pool) { - if (pool) + if (pool) { css_put(&pool->cs->css); + dmemcg_pool_put(pool); + } } EXPORT_SYMBOL_GPL(dmem_cgroup_pool_state_put); @@ -533,6 +578,8 @@ get_cg_pool_unlocked(struct dmemcg_state *cg, struct dmem_cgroup_region *region) pool = find_cg_pool_locked(cg, region); if (pool && !READ_ONCE(pool->inited)) pool = NULL; + if (pool && !dmemcg_pool_tryget(pool)) + pool = NULL; rcu_read_unlock(); while (!pool) { @@ -541,6 +588,8 @@ get_cg_pool_unlocked(struct dmemcg_state *cg, struct dmem_cgroup_region *region) pool = get_cg_pool_locked(cg, region, &allocpool); else pool = ERR_PTR(-ENODEV); + if (!IS_ERR(pool)) + dmemcg_pool_get(pool); spin_unlock(&dmemcg_lock); if (pool == ERR_PTR(-ENOMEM)) { @@ -576,6 +625,7 @@ void dmem_cgroup_uncharge(struct dmem_cgroup_pool_state *pool, u64 size) page_counter_uncharge(&pool->cnt, size); css_put(&pool->cs->css); + dmemcg_pool_put(pool); } EXPORT_SYMBOL_GPL(dmem_cgroup_uncharge); @@ -627,7 +677,9 @@ int dmem_cgroup_try_charge(struct dmem_cgroup_region *region, u64 size, if (ret_limit_pool) { *ret_limit_pool = container_of(fail, struct dmem_cgroup_pool_state, cnt); css_get(&(*ret_limit_pool)->cs->css); + dmemcg_pool_get(*ret_limit_pool); } + dmemcg_pool_put(pool); ret = -EAGAIN; goto err; } @@ -700,6 +752,9 @@ static ssize_t dmemcg_limit_write(struct kernfs_open_file *of, if (!region_name[0]) continue; + if (!options || !*options) + return -EINVAL; + rcu_read_lock(); region = dmemcg_get_region_by_name(region_name); rcu_read_unlock(); @@ -719,6 +774,7 @@ static ssize_t dmemcg_limit_write(struct kernfs_open_file *of, /* And commit */ apply(pool, new_limit); + dmemcg_pool_put(pool); out_put: kref_put(®ion->ref, dmemcg_free_region); diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c index d8fd6f779f797f..0e266979728b21 100644 --- a/kernel/dma/contiguous.c +++ b/kernel/dma/contiguous.c @@ -91,6 +91,16 @@ static int __init early_cma(char *p) } early_param("cma", early_cma); +/* + * cma_skip_dt_default_reserved_mem - This is called from the + * reserved_mem framework to detect if the default cma region is being + * set by the "cma=" kernel parameter. + */ +bool __init cma_skip_dt_default_reserved_mem(void) +{ + return size_cmdline != -1; +} + #ifdef CONFIG_DMA_NUMA_CMA static struct cma *dma_contiguous_numa_area[MAX_NUMNODES]; @@ -470,12 +480,6 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem) struct cma *cma; int err; - if (size_cmdline != -1 && default_cma) { - pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n", - rmem->name); - return -EBUSY; - } - if (!of_get_flat_dt_prop(node, "reusable", NULL) || of_get_flat_dt_prop(node, "no-map", NULL)) return -EINVAL; diff --git a/kernel/dma/pool.c b/kernel/dma/pool.c index c5da29ad010c4d..2b2fbb7092429a 100644 --- a/kernel/dma/pool.c +++ b/kernel/dma/pool.c @@ -277,15 +277,20 @@ struct page *dma_alloc_from_pool(struct device *dev, size_t size, { struct gen_pool *pool = NULL; struct page *page; + bool pool_found = false; while ((pool = dma_guess_pool(pool, gfp))) { + pool_found = true; page = __dma_alloc_from_pool(dev, size, pool, cpu_addr, phys_addr_ok); if (page) return page; } - WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev)); + if (pool_found) + WARN(!(gfp & __GFP_NOWARN), "DMA pool exhausted for %s\n", dev_name(dev)); + else + WARN(1, "Failed to get suitable pool for %s\n", dev_name(dev)); return NULL; } diff --git a/kernel/events/callchain.c b/kernel/events/callchain.c index 1f658957870391..9d24b6e0c91f1f 100644 --- a/kernel/events/callchain.c +++ b/kernel/events/callchain.c @@ -246,7 +246,7 @@ get_perf_callchain(struct pt_regs *regs, bool kernel, bool user, if (user && !crosstask) { if (!user_mode(regs)) { - if (current->flags & (PF_KTHREAD | PF_USER_WORKER)) + if (!is_user_task(current)) goto exit_put; regs = task_pt_regs(current); } diff --git a/kernel/events/core.c b/kernel/events/core.c index a0fa488bce8465..8cca8009462481 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7460,7 +7460,7 @@ static void perf_sample_regs_user(struct perf_regs *regs_user, if (user_mode(regs)) { regs_user->abi = perf_reg_abi(current); regs_user->regs = regs; - } else if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER))) { + } else if (is_user_task(current)) { perf_get_regs_user(regs_user, regs); } else { regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; @@ -8100,7 +8100,7 @@ static u64 perf_virt_to_phys(u64 virt) * Try IRQ-safe get_user_page_fast_only first. * If failed, leave phys_addr as 0. */ - if (!(current->flags & (PF_KTHREAD | PF_USER_WORKER))) { + if (is_user_task(current)) { struct page *p; pagefault_disable(); @@ -8215,7 +8215,7 @@ perf_callchain(struct perf_event *event, struct pt_regs *regs) { bool kernel = !event->attr.exclude_callchain_kernel; bool user = !event->attr.exclude_callchain_user && - !(current->flags & (PF_KTHREAD | PF_USER_WORKER)); + is_user_task(current); /* Disallow cross-task user callchains. */ bool crosstask = event->ctx->task && event->ctx->task != current; bool defer_user = IS_ENABLED(CONFIG_UNWIND_USER) && user && diff --git a/kernel/liveupdate/kexec_handover.c b/kernel/liveupdate/kexec_handover.c index d4482b6e3cae39..90d411a59f76d9 100644 --- a/kernel/liveupdate/kexec_handover.c +++ b/kernel/liveupdate/kexec_handover.c @@ -255,6 +255,14 @@ static struct page *kho_restore_page(phys_addr_t phys, bool is_folio) if (is_folio && info.order) prep_compound_page(page, info.order); + /* Always mark headpage's codetag as empty to avoid accounting mismatch */ + clear_page_tag_ref(page); + if (!is_folio) { + /* Also do that for the non-compound tail pages */ + for (unsigned int i = 1; i < nr_pages; i++) + clear_page_tag_ref(page + i); + } + adjust_managed_page_count(page, nr_pages); return page; } @@ -1006,8 +1014,10 @@ int kho_preserve_vmalloc(void *ptr, struct kho_vmalloc *preservation) chunk->phys[idx++] = phys; if (idx == ARRAY_SIZE(chunk->phys)) { chunk = new_vmalloc_chunk(chunk); - if (!chunk) + if (!chunk) { + err = -ENOMEM; goto err_free; + } idx = 0; } } diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index c509f2e7d69de2..7bcde7114f1b65 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -1034,6 +1034,12 @@ static void update_dl_entity(struct sched_dl_entity *dl_se) return; } + /* + * When [4] D->A is followed by [1] A->B, dl_defer_running + * needs to be cleared, otherwise it will fail to properly + * start the zero-laxity timer. + */ + dl_se->dl_defer_running = 0; replenish_dl_new_period(dl_se, rq); } else if (dl_server(dl_se) && dl_se->dl_defer) { /* @@ -1655,6 +1661,12 @@ void dl_server_update(struct sched_dl_entity *dl_se, s64 delta_exec) * dl_server_active = 1; * enqueue_dl_entity() * update_dl_entity(WAKEUP) + * if (dl_time_before() || dl_entity_overflow()) + * dl_defer_running = 0; + * replenish_dl_new_period(); + * // fwd period + * dl_throttled = 1; + * dl_defer_armed = 1; * if (!dl_defer_running) * dl_defer_armed = 1; * dl_throttled = 1; diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c index afe28c04d5aa73..0bb8fa927e9e9f 100644 --- a/kernel/sched/ext.c +++ b/kernel/sched/ext.c @@ -194,6 +194,7 @@ MODULE_PARM_DESC(bypass_lb_intv_us, "bypass load balance interval in microsecond #include static void process_ddsp_deferred_locals(struct rq *rq); +static bool task_dead_and_done(struct task_struct *p); static u32 reenq_local(struct rq *rq); static void scx_kick_cpu(struct scx_sched *sch, s32 cpu, u64 flags); static bool scx_vexit(struct scx_sched *sch, enum scx_exit_kind kind, @@ -2619,6 +2620,9 @@ static void set_cpus_allowed_scx(struct task_struct *p, set_cpus_allowed_common(p, ac); + if (task_dead_and_done(p)) + return; + /* * The effective cpumask is stored in @p->cpus_ptr which may temporarily * differ from the configured one in @p->cpus_mask. Always tell the bpf @@ -3034,10 +3038,45 @@ void scx_cancel_fork(struct task_struct *p) percpu_up_read(&scx_fork_rwsem); } +/** + * task_dead_and_done - Is a task dead and done running? + * @p: target task + * + * Once sched_ext_dead() removes the dead task from scx_tasks and exits it, the + * task no longer exists from SCX's POV. However, certain sched_class ops may be + * invoked on these dead tasks leading to failures - e.g. sched_setscheduler() + * may try to switch a task which finished sched_ext_dead() back into SCX + * triggering invalid SCX task state transitions and worse. + * + * Once a task has finished the final switch, sched_ext_dead() is the only thing + * that needs to happen on the task. Use this test to short-circuit sched_class + * operations which may be called on dead tasks. + */ +static bool task_dead_and_done(struct task_struct *p) +{ + struct rq *rq = task_rq(p); + + lockdep_assert_rq_held(rq); + + /* + * In do_task_dead(), a dying task sets %TASK_DEAD with preemption + * disabled and __schedule(). If @p has %TASK_DEAD set and off CPU, @p + * won't ever run again. + */ + return unlikely(READ_ONCE(p->__state) == TASK_DEAD) && + !task_on_cpu(rq, p); +} + void sched_ext_dead(struct task_struct *p) { unsigned long flags; + /* + * By the time control reaches here, @p has %TASK_DEAD set, switched out + * for the last time and then dropped the rq lock - task_dead_and_done() + * should be returning %true nullifying the straggling sched_class ops. + * Remove from scx_tasks and exit @p. + */ raw_spin_lock_irqsave(&scx_tasks_lock, flags); list_del_init(&p->scx.tasks_node); raw_spin_unlock_irqrestore(&scx_tasks_lock, flags); @@ -3063,6 +3102,9 @@ static void reweight_task_scx(struct rq *rq, struct task_struct *p, lockdep_assert_rq_held(task_rq(p)); + if (task_dead_and_done(p)) + return; + p->scx.weight = sched_weight_to_cgroup(scale_load_down(lw->weight)); if (SCX_HAS_OP(sch, set_weight)) SCX_CALL_OP_TASK(sch, SCX_KF_REST, set_weight, rq, @@ -3077,6 +3119,9 @@ static void switching_to_scx(struct rq *rq, struct task_struct *p) { struct scx_sched *sch = scx_root; + if (task_dead_and_done(p)) + return; + scx_enable_task(p); /* @@ -3090,6 +3135,9 @@ static void switching_to_scx(struct rq *rq, struct task_struct *p) static void switched_from_scx(struct rq *rq, struct task_struct *p) { + if (task_dead_and_done(p)) + return; + scx_disable_task(p); } diff --git a/kernel/vmcore_info.c b/kernel/vmcore_info.c index fe9bf8db1922e6..e2784038bbed79 100644 --- a/kernel/vmcore_info.c +++ b/kernel/vmcore_info.c @@ -36,7 +36,11 @@ struct hwerr_info { time64_t timestamp; }; -static struct hwerr_info hwerr_data[HWERR_RECOV_MAX]; +/* + * The hwerr_data[] array is declared with global scope so that it remains + * accessible to vmcoreinfo even when Link Time Optimization (LTO) is enabled. + */ +struct hwerr_info hwerr_data[HWERR_RECOV_MAX]; Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type, void *data, size_t data_len) diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index 84ecccddc77182..012d5614efb9ab 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c @@ -64,13 +64,14 @@ void fprop_global_destroy(struct fprop_global *p) bool fprop_new_period(struct fprop_global *p, int periods) { s64 events = percpu_counter_sum(&p->events); + unsigned long flags; /* * Don't do anything if there are no events. */ if (events <= 1) return false; - preempt_disable_nested(); + local_irq_save(flags); write_seqcount_begin(&p->sequence); if (periods < 64) events -= events >> periods; @@ -78,7 +79,7 @@ bool fprop_new_period(struct fprop_global *p, int periods) percpu_counter_add(&p->events, -events); p->period += periods; write_seqcount_end(&p->sequence); - preempt_enable_nested(); + local_irq_restore(flags); return true; } diff --git a/lib/test_hmm.c b/lib/test_hmm.c index 8af169d3873ae4..455a6862ae503b 100644 --- a/lib/test_hmm.c +++ b/lib/test_hmm.c @@ -662,7 +662,9 @@ static struct page *dmirror_devmem_alloc_page(struct dmirror *dmirror, goto error; } - zone_device_folio_init(page_folio(dpage), order); + zone_device_folio_init(page_folio(dpage), + page_pgmap(folio_page(page_folio(dpage), 0)), + order); dpage->zone_device_data = rpage; return dpage; diff --git a/mm/kasan/common.c b/mm/kasan/common.c index ed489a14dddf74..b7d05c2a6d93dd 100644 --- a/mm/kasan/common.c +++ b/mm/kasan/common.c @@ -606,4 +606,25 @@ void __kasan_unpoison_vmap_areas(struct vm_struct **vms, int nr_vms, __kasan_unpoison_vmalloc(addr, size, flags | KASAN_VMALLOC_KEEP_TAG); } } + +void __kasan_vrealloc(const void *addr, unsigned long old_size, + unsigned long new_size) +{ + if (new_size < old_size) { + kasan_poison_last_granule(addr, new_size); + + new_size = round_up(new_size, KASAN_GRANULE_SIZE); + old_size = round_up(old_size, KASAN_GRANULE_SIZE); + if (new_size < old_size) + __kasan_poison_vmalloc(addr + new_size, + old_size - new_size); + } else if (new_size > old_size) { + old_size = round_down(old_size, KASAN_GRANULE_SIZE); + __kasan_unpoison_vmalloc(addr + old_size, + new_size - old_size, + KASAN_VMALLOC_PROT_NORMAL | + KASAN_VMALLOC_VM_ALLOC | + KASAN_VMALLOC_KEEP_TAG); + } +} #endif diff --git a/mm/kfence/core.c b/mm/kfence/core.c index da0f5b6f57446f..4f79ec72075254 100644 --- a/mm/kfence/core.c +++ b/mm/kfence/core.c @@ -596,7 +596,7 @@ static void rcu_guarded_free(struct rcu_head *h) static unsigned long kfence_init_pool(void) { unsigned long addr, start_pfn; - int i; + int i, rand; if (!arch_kfence_init_pool()) return (unsigned long)__kfence_pool; @@ -647,13 +647,27 @@ static unsigned long kfence_init_pool(void) INIT_LIST_HEAD(&meta->list); raw_spin_lock_init(&meta->lock); meta->state = KFENCE_OBJECT_UNUSED; - meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ - list_add_tail(&meta->list, &kfence_freelist); + /* Use addr to randomize the freelist. */ + meta->addr = i; /* Protect the right redzone. */ - if (unlikely(!kfence_protect(addr + PAGE_SIZE))) + if (unlikely(!kfence_protect(addr + 2 * i * PAGE_SIZE + PAGE_SIZE))) goto reset_slab; + } + + for (i = CONFIG_KFENCE_NUM_OBJECTS; i > 0; i--) { + rand = get_random_u32_below(i); + swap(kfence_metadata_init[i - 1].addr, kfence_metadata_init[rand].addr); + } + for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + struct kfence_metadata *meta_1 = &kfence_metadata_init[i]; + struct kfence_metadata *meta_2 = &kfence_metadata_init[meta_1->addr]; + + list_add_tail(&meta_2->list, &kfence_freelist); + } + for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { + kfence_metadata_init[i].addr = addr; addr += 2 * PAGE_SIZE; } @@ -666,6 +680,7 @@ static unsigned long kfence_init_pool(void) return 0; reset_slab: + addr += 2 * i * PAGE_SIZE; for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { struct page *page; diff --git a/mm/memfd.c b/mm/memfd.c index ab5312aff14b97..f032c60529265c 100644 --- a/mm/memfd.c +++ b/mm/memfd.c @@ -456,7 +456,7 @@ static char *alloc_name(const char __user *uname) return ERR_PTR(error); } -static struct file *alloc_file(const char *name, unsigned int flags) +struct file *memfd_alloc_file(const char *name, unsigned int flags) { unsigned int *file_seals; struct file *file; @@ -520,5 +520,5 @@ SYSCALL_DEFINE2(memfd_create, return PTR_ERR(name); fd_flags = (flags & MFD_CLOEXEC) ? O_CLOEXEC : 0; - return FD_ADD(fd_flags, alloc_file(name, flags)); + return FD_ADD(fd_flags, memfd_alloc_file(name, flags)); } diff --git a/mm/memfd_luo.c b/mm/memfd_luo.c index 4f6ba63b43105e..a34fccc23b6a42 100644 --- a/mm/memfd_luo.c +++ b/mm/memfd_luo.c @@ -78,6 +78,7 @@ #include #include #include +#include #include "internal.h" static int memfd_luo_preserve_folios(struct file *file, @@ -443,11 +444,11 @@ static int memfd_luo_retrieve(struct liveupdate_file_op_args *args) if (!ser) return -EINVAL; - file = shmem_file_setup("", 0, VM_NORESERVE); - + file = memfd_alloc_file("", 0); if (IS_ERR(file)) { pr_err("failed to setup file: %pe\n", file); - return PTR_ERR(file); + err = PTR_ERR(file); + goto free_ser; } vfs_setpos(file, ser->pos, MAX_LFS_FILESIZE); @@ -473,7 +474,8 @@ static int memfd_luo_retrieve(struct liveupdate_file_op_args *args) put_file: fput(file); - +free_ser: + kho_restore_free(ser); return err; } diff --git a/mm/memory-failure.c b/mm/memory-failure.c index c80c2907da3332..cf0d526e6d418e 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -692,6 +692,8 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift, unsigned long poisoned_pfn, struct to_kill *tk) { unsigned long pfn = 0; + unsigned long hwpoison_vaddr; + unsigned long mask; if (pte_present(pte)) { pfn = pte_pfn(pte); @@ -702,10 +704,12 @@ static int check_hwpoisoned_entry(pte_t pte, unsigned long addr, short shift, pfn = softleaf_to_pfn(entry); } - if (!pfn || pfn != poisoned_pfn) + mask = ~((1UL << (shift - PAGE_SHIFT)) - 1); + if (!pfn || pfn != (poisoned_pfn & mask)) return 0; - set_to_kill(tk, addr, shift); + hwpoison_vaddr = addr + ((poisoned_pfn - pfn) << PAGE_SHIFT); + set_to_kill(tk, hwpoison_vaddr, shift); return 1; } @@ -1883,12 +1887,22 @@ static unsigned long __folio_free_raw_hwp(struct folio *folio, bool move_flag) return count; } -static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page) +#define MF_HUGETLB_FREED 0 /* freed hugepage */ +#define MF_HUGETLB_IN_USED 1 /* in-use hugepage */ +#define MF_HUGETLB_NON_HUGEPAGE 2 /* not a hugepage */ +#define MF_HUGETLB_FOLIO_PRE_POISONED 3 /* folio already poisoned */ +#define MF_HUGETLB_PAGE_PRE_POISONED 4 /* exact page already poisoned */ +#define MF_HUGETLB_RETRY 5 /* hugepage is busy, retry */ +/* + * Set hugetlb folio as hwpoisoned, update folio private raw hwpoison list + * to keep track of the poisoned pages. + */ +static int hugetlb_update_hwpoison(struct folio *folio, struct page *page) { struct llist_head *head; struct raw_hwp_page *raw_hwp; struct raw_hwp_page *p; - int ret = folio_test_set_hwpoison(folio) ? -EHWPOISON : 0; + int ret = folio_test_set_hwpoison(folio) ? MF_HUGETLB_FOLIO_PRE_POISONED : 0; /* * Once the hwpoison hugepage has lost reliable raw error info, @@ -1896,20 +1910,17 @@ static int folio_set_hugetlb_hwpoison(struct folio *folio, struct page *page) * so skip to add additional raw error info. */ if (folio_test_hugetlb_raw_hwp_unreliable(folio)) - return -EHWPOISON; + return MF_HUGETLB_FOLIO_PRE_POISONED; head = raw_hwp_list_head(folio); llist_for_each_entry(p, head->first, node) { if (p->page == page) - return -EHWPOISON; + return MF_HUGETLB_PAGE_PRE_POISONED; } raw_hwp = kmalloc(sizeof(struct raw_hwp_page), GFP_ATOMIC); if (raw_hwp) { raw_hwp->page = page; llist_add(&raw_hwp->node, head); - /* the first error event will be counted in action_result(). */ - if (ret) - num_poisoned_pages_inc(page_to_pfn(page)); } else { /* * Failed to save raw error info. We no longer trace all @@ -1957,42 +1968,39 @@ void folio_clear_hugetlb_hwpoison(struct folio *folio) /* * Called from hugetlb code with hugetlb_lock held. - * - * Return values: - * 0 - free hugepage - * 1 - in-use hugepage - * 2 - not a hugepage - * -EBUSY - the hugepage is busy (try to retry) - * -EHWPOISON - the hugepage is already hwpoisoned */ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags, bool *migratable_cleared) { struct page *page = pfn_to_page(pfn); struct folio *folio = page_folio(page); - int ret = 2; /* fallback to normal page handling */ bool count_increased = false; + int ret, rc; - if (!folio_test_hugetlb(folio)) + if (!folio_test_hugetlb(folio)) { + ret = MF_HUGETLB_NON_HUGEPAGE; goto out; - - if (flags & MF_COUNT_INCREASED) { - ret = 1; + } else if (flags & MF_COUNT_INCREASED) { + ret = MF_HUGETLB_IN_USED; count_increased = true; } else if (folio_test_hugetlb_freed(folio)) { - ret = 0; + ret = MF_HUGETLB_FREED; } else if (folio_test_hugetlb_migratable(folio)) { - ret = folio_try_get(folio); - if (ret) + if (folio_try_get(folio)) { + ret = MF_HUGETLB_IN_USED; count_increased = true; + } else { + ret = MF_HUGETLB_FREED; + } } else { - ret = -EBUSY; + ret = MF_HUGETLB_RETRY; if (!(flags & MF_NO_RETRY)) goto out; } - if (folio_set_hugetlb_hwpoison(folio, page)) { - ret = -EHWPOISON; + rc = hugetlb_update_hwpoison(folio, page); + if (rc >= MF_HUGETLB_FOLIO_PRE_POISONED) { + ret = rc; goto out; } @@ -2017,10 +2025,16 @@ int __get_huge_page_for_hwpoison(unsigned long pfn, int flags, * with basic operations like hugepage allocation/free/demotion. * So some of prechecks for hwpoison (pinning, and testing/setting * PageHWPoison) should be done in single hugetlb_lock range. + * Returns: + * 0 - not hugetlb, or recovered + * -EBUSY - not recovered + * -EOPNOTSUPP - hwpoison_filter'ed + * -EHWPOISON - folio or exact page already poisoned + * -EFAULT - kill_accessing_process finds current->mm null */ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb) { - int res; + int res, rv; struct page *p = pfn_to_page(pfn); struct folio *folio; unsigned long page_flags; @@ -2029,22 +2043,29 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb *hugetlb = 1; retry: res = get_huge_page_for_hwpoison(pfn, flags, &migratable_cleared); - if (res == 2) { /* fallback to normal page handling */ + switch (res) { + case MF_HUGETLB_NON_HUGEPAGE: /* fallback to normal page handling */ *hugetlb = 0; return 0; - } else if (res == -EHWPOISON) { - if (flags & MF_ACTION_REQUIRED) { - folio = page_folio(p); - res = kill_accessing_process(current, folio_pfn(folio), flags); - } - action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED); - return res; - } else if (res == -EBUSY) { + case MF_HUGETLB_RETRY: if (!(flags & MF_NO_RETRY)) { flags |= MF_NO_RETRY; goto retry; } return action_result(pfn, MF_MSG_GET_HWPOISON, MF_IGNORED); + case MF_HUGETLB_FOLIO_PRE_POISONED: + case MF_HUGETLB_PAGE_PRE_POISONED: + rv = -EHWPOISON; + if (flags & MF_ACTION_REQUIRED) + rv = kill_accessing_process(current, pfn, flags); + if (res == MF_HUGETLB_PAGE_PRE_POISONED) + action_result(pfn, MF_MSG_ALREADY_POISONED, MF_FAILED); + else + action_result(pfn, MF_MSG_HUGE, MF_FAILED); + return rv; + default: + WARN_ON((res != MF_HUGETLB_FREED) && (res != MF_HUGETLB_IN_USED)); + break; } folio = page_folio(p); @@ -2055,7 +2076,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb if (migratable_cleared) folio_set_hugetlb_migratable(folio); folio_unlock(folio); - if (res == 1) + if (res == MF_HUGETLB_IN_USED) folio_put(folio); return -EOPNOTSUPP; } @@ -2064,7 +2085,7 @@ static int try_memory_failure_hugetlb(unsigned long pfn, int flags, int *hugetlb * Handling free hugepage. The possible race with hugepage allocation * or demotion can be prevented by PageHWPoison flag. */ - if (res == 0) { + if (res == MF_HUGETLB_FREED) { folio_unlock(folio); if (__page_handle_poison(p) > 0) { page_ref_inc(p); diff --git a/mm/memremap.c b/mm/memremap.c index 63c6ab4fdf082c..ac7be07e3361ae 100644 --- a/mm/memremap.c +++ b/mm/memremap.c @@ -477,10 +477,43 @@ void free_zone_device_folio(struct folio *folio) } } -void zone_device_page_init(struct page *page, unsigned int order) +void zone_device_page_init(struct page *page, struct dev_pagemap *pgmap, + unsigned int order) { + struct page *new_page = page; + unsigned int i; + VM_WARN_ON_ONCE(order > MAX_ORDER_NR_PAGES); + for (i = 0; i < (1UL << order); ++i, ++new_page) { + struct folio *new_folio = (struct folio *)new_page; + + /* + * new_page could have been part of previous higher order folio + * which encodes the order, in page + 1, in the flags bits. We + * blindly clear bits which could have set my order field here, + * including page head. + */ + new_page->flags.f &= ~0xffUL; /* Clear possible order, page head */ + +#ifdef NR_PAGES_IN_LARGE_FOLIO + /* + * This pointer math looks odd, but new_page could have been + * part of a previous higher order folio, which sets _nr_pages + * in page + 1 (new_page). Therefore, we use pointer casting to + * correctly locate the _nr_pages bits within new_page which + * could have modified by previous higher order folio. + */ + ((struct folio *)(new_page - 1))->_nr_pages = 0; +#endif + + new_folio->mapping = NULL; + new_folio->pgmap = pgmap; /* Also clear compound head */ + new_folio->share = 0; /* fsdax only, unused for device private */ + VM_WARN_ON_FOLIO(folio_ref_count(new_folio), new_folio); + VM_WARN_ON_FOLIO(!folio_is_zone_device(new_folio), new_folio); + } + /* * Drivers shouldn't be allocating pages after calling * memunmap_pages(). diff --git a/mm/mm_init.c b/mm/mm_init.c index fc2a6f1e518f1a..2a809cd8e7fae0 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -2059,7 +2059,7 @@ static unsigned long __init deferred_init_pages(struct zone *zone, */ static unsigned long __init deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, - struct zone *zone) + struct zone *zone, bool can_resched) { int nid = zone_to_nid(zone); unsigned long nr_pages = 0; @@ -2085,10 +2085,10 @@ deferred_init_memmap_chunk(unsigned long start_pfn, unsigned long end_pfn, spfn = chunk_end; - if (irqs_disabled()) - touch_nmi_watchdog(); - else + if (can_resched) cond_resched(); + else + touch_nmi_watchdog(); } } @@ -2101,7 +2101,7 @@ deferred_init_memmap_job(unsigned long start_pfn, unsigned long end_pfn, { struct zone *zone = arg; - deferred_init_memmap_chunk(start_pfn, end_pfn, zone); + deferred_init_memmap_chunk(start_pfn, end_pfn, zone, true); } static unsigned int __init @@ -2216,7 +2216,7 @@ bool __init deferred_grow_zone(struct zone *zone, unsigned int order) for (spfn = first_deferred_pfn, epfn = SECTION_ALIGN_UP(spfn + 1); nr_pages < nr_pages_needed && spfn < zone_end_pfn(zone); spfn = epfn, epfn += PAGES_PER_SECTION) { - nr_pages += deferred_init_memmap_chunk(spfn, epfn, zone); + nr_pages += deferred_init_memmap_chunk(spfn, epfn, zone, false); } /* diff --git a/mm/shmem.c b/mm/shmem.c index ec6c01378e9d2b..6c3485d24d66f8 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -962,17 +962,29 @@ static void shmem_delete_from_page_cache(struct folio *folio, void *radswap) * being freed). */ static long shmem_free_swap(struct address_space *mapping, - pgoff_t index, void *radswap) + pgoff_t index, pgoff_t end, void *radswap) { - int order = xa_get_order(&mapping->i_pages, index); - void *old; + XA_STATE(xas, &mapping->i_pages, index); + unsigned int nr_pages = 0; + pgoff_t base; + void *entry; - old = xa_cmpxchg_irq(&mapping->i_pages, index, radswap, NULL, 0); - if (old != radswap) - return 0; - free_swap_and_cache_nr(radix_to_swp_entry(radswap), 1 << order); + xas_lock_irq(&xas); + entry = xas_load(&xas); + if (entry == radswap) { + nr_pages = 1 << xas_get_order(&xas); + base = round_down(xas.xa_index, nr_pages); + if (base < index || base + nr_pages - 1 > end) + nr_pages = 0; + else + xas_store(&xas, NULL); + } + xas_unlock_irq(&xas); + + if (nr_pages) + free_swap_and_cache_nr(radix_to_swp_entry(radswap), nr_pages); - return 1 << order; + return nr_pages; } /* @@ -1124,8 +1136,8 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, uoff_t lend, if (xa_is_value(folio)) { if (unfalloc) continue; - nr_swaps_freed += shmem_free_swap(mapping, - indices[i], folio); + nr_swaps_freed += shmem_free_swap(mapping, indices[i], + end - 1, folio); continue; } @@ -1191,12 +1203,23 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, uoff_t lend, folio = fbatch.folios[i]; if (xa_is_value(folio)) { + int order; long swaps_freed; if (unfalloc) continue; - swaps_freed = shmem_free_swap(mapping, indices[i], folio); + swaps_freed = shmem_free_swap(mapping, indices[i], + end - 1, folio); if (!swaps_freed) { + /* + * If found a large swap entry cross the end border, + * skip it as the truncate_inode_partial_folio above + * should have at least zerod its content once. + */ + order = shmem_confirm_swap(mapping, indices[i], + radix_to_swp_entry(folio)); + if (order > 0 && indices[i] + (1 << order) > end) + continue; /* Swap was replaced by page: retry */ index = indices[i]; break; diff --git a/mm/swap.h b/mm/swap.h index d034c13d8dd260..1bd466da303939 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -198,7 +198,7 @@ int swap_writeout(struct folio *folio, struct swap_iocb **swap_plug); void __swap_writepage(struct folio *folio, struct swap_iocb **swap_plug); /* linux/mm/swap_state.c */ -extern struct address_space swap_space __ro_after_init; +extern struct address_space swap_space __read_mostly; static inline struct address_space *swap_address_space(swp_entry_t entry) { return &swap_space; diff --git a/mm/swap_state.c b/mm/swap_state.c index 5f97c6ae70a2f7..44d228982521ea 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -37,8 +37,7 @@ static const struct address_space_operations swap_aops = { #endif }; -/* Set swap_space as read only as swap cache is handled by swap table */ -struct address_space swap_space __ro_after_init = { +struct address_space swap_space __read_mostly = { .a_ops = &swap_aops, }; diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 628f96e83b1187..e286c2d2068cbd 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -4322,7 +4322,7 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align if (want_init_on_free() || want_init_on_alloc(flags)) memset((void *)p + size, 0, old_size - size); vm->requested_size = size; - kasan_poison_vmalloc(p + size, old_size - size); + kasan_vrealloc(p, old_size, size); return (void *)p; } @@ -4330,16 +4330,13 @@ void *vrealloc_node_align_noprof(const void *p, size_t size, unsigned long align * We already have the bytes available in the allocation; use them. */ if (size <= alloced_size) { - kasan_unpoison_vmalloc(p + old_size, size - old_size, - KASAN_VMALLOC_PROT_NORMAL | - KASAN_VMALLOC_VM_ALLOC | - KASAN_VMALLOC_KEEP_TAG); /* * No need to zero memory here, as unused memory will have * already been zeroed at initial allocation time or during * realloc shrink time. */ vm->requested_size = size; + kasan_vrealloc(p, old_size, size); return (void *)p; } diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 5be9b8c919490d..0e46f9e08b1067 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -1966,6 +1966,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) } mgmt_cmd_status(cmd->sk, cmd->hdev->id, cmd->opcode, mgmt_err); + mgmt_pending_free(cmd); return; } @@ -1984,6 +1985,7 @@ static void set_ssp_complete(struct hci_dev *hdev, void *data, int err) sock_put(match.sk); hci_update_eir_sync(hdev); + mgmt_pending_free(cmd); } static int set_ssp_sync(struct hci_dev *hdev, void *data) @@ -6438,6 +6440,7 @@ static void set_advertising_complete(struct hci_dev *hdev, void *data, int err) hci_dev_clear_flag(hdev, HCI_ADVERTISING); settings_rsp(cmd, &match); + mgmt_pending_free(cmd); new_settings(hdev, match.sk); diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index e355a15bf5ab13..1405f1061a5493 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -274,7 +274,7 @@ static int nf_hook_bridge_pre(struct sk_buff *skb, struct sk_buff **pskb) int ret; net = dev_net(skb->dev); -#ifdef HAVE_JUMP_LABEL +#ifdef CONFIG_JUMP_LABEL if (!static_key_false(&nf_hooks_needed[NFPROTO_BRIDGE][NF_BR_PRE_ROUTING])) goto frame_finish; #endif diff --git a/net/core/filter.c b/net/core/filter.c index 616e0520a0bb81..bcd73d9bd76491 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -3353,6 +3353,7 @@ static int bpf_skb_proto_4_to_6(struct sk_buff *skb) shinfo->gso_type &= ~SKB_GSO_TCPV4; shinfo->gso_type |= SKB_GSO_TCPV6; } + shinfo->gso_type |= SKB_GSO_DODGY; } bpf_skb_change_protocol(skb, ETH_P_IPV6); @@ -3383,6 +3384,7 @@ static int bpf_skb_proto_6_to_4(struct sk_buff *skb) shinfo->gso_type &= ~SKB_GSO_TCPV6; shinfo->gso_type |= SKB_GSO_TCPV4; } + shinfo->gso_type |= SKB_GSO_DODGY; } bpf_skb_change_protocol(skb, ETH_P_IP); diff --git a/net/ipv4/tcp_offload.c b/net/ipv4/tcp_offload.c index fdda18b1abda0f..942a948f1a3109 100644 --- a/net/ipv4/tcp_offload.c +++ b/net/ipv4/tcp_offload.c @@ -107,7 +107,8 @@ static struct sk_buff *tcp4_gso_segment(struct sk_buff *skb, if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) { struct tcphdr *th = tcp_hdr(skb); - if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) + if ((skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) && + !(skb_shinfo(skb)->gso_type & SKB_GSO_DODGY)) return __tcp4_gso_segment_list(skb, features); skb->ip_summed = CHECKSUM_NONE; diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c index 19d0b5b09ffae6..589456bd8b5f1b 100644 --- a/net/ipv4/udp_offload.c +++ b/net/ipv4/udp_offload.c @@ -514,7 +514,8 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb, if (skb_shinfo(gso_skb)->gso_type & SKB_GSO_FRAGLIST) { /* Detect modified geometry and pass those to skb_segment. */ - if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size) + if ((skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size) && + !(skb_shinfo(gso_skb)->gso_type & SKB_GSO_DODGY)) return __udp_gso_segment_list(gso_skb, features, is_ipv6); ret = __skb_linearize(gso_skb); diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 5d2f90babaa5f1..9d37e7711bc2b8 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -965,7 +965,9 @@ static enum skb_drop_reason icmpv6_echo_reply(struct sk_buff *skb) fl6.daddr = ipv6_hdr(skb)->saddr; if (saddr) fl6.saddr = *saddr; - fl6.flowi6_oif = icmp6_iif(skb); + fl6.flowi6_oif = ipv6_addr_loopback(&fl6.daddr) ? + skb->dev->ifindex : + icmp6_iif(skb); fl6.fl6_icmp_type = type; fl6.flowi6_mark = mark; fl6.flowi6_uid = sock_net_uid(net, NULL); diff --git a/net/ipv6/tcpv6_offload.c b/net/ipv6/tcpv6_offload.c index effeba58630b5a..5670d32c27f80e 100644 --- a/net/ipv6/tcpv6_offload.c +++ b/net/ipv6/tcpv6_offload.c @@ -170,7 +170,8 @@ static struct sk_buff *tcp6_gso_segment(struct sk_buff *skb, if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST) { struct tcphdr *th = tcp_hdr(skb); - if (skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) + if ((skb_pagelen(skb) - th->doff * 4 == skb_shinfo(skb)->gso_size) && + !(skb_shinfo(skb)->gso_type & SKB_GSO_DODGY)) return __tcp6_gso_segment_list(skb, features); skb->ip_summed = CHECKSUM_NONE; diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index b72345c779c0f3..73f57b9e0ebf76 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -8,7 +8,7 @@ * Copyright 2007, Michael Wu * Copyright 2013-2014 Intel Mobile Communications GmbH * Copyright (C) 2015 - 2017 Intel Deutschland GmbH - * Copyright (C) 2018 - 2025 Intel Corporation + * Copyright (C) 2018 - 2026 Intel Corporation */ #include @@ -6190,8 +6190,10 @@ ieee80211_parse_adv_t2l(struct ieee80211_sub_if_data *sdata, return -EINVAL; } - link_map_presence = *pos; - pos++; + if (!(control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP)) { + link_map_presence = *pos; + pos++; + } if (control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT) { ttlm_info->switch_time = get_unaligned_le16(pos); diff --git a/net/mptcp/pm_kernel.c b/net/mptcp/pm_kernel.c index 57570a44e41853..b26675054b0dc7 100644 --- a/net/mptcp/pm_kernel.c +++ b/net/mptcp/pm_kernel.c @@ -1294,16 +1294,26 @@ static void __reset_counters(struct pm_nl_pernet *pernet) int mptcp_pm_nl_flush_addrs_doit(struct sk_buff *skb, struct genl_info *info) { struct pm_nl_pernet *pernet = genl_info_pm_nl(info); - LIST_HEAD(free_list); + struct list_head free_list; spin_lock_bh(&pernet->lock); - list_splice_init(&pernet->endp_list, &free_list); + free_list = pernet->endp_list; + INIT_LIST_HEAD_RCU(&pernet->endp_list); __reset_counters(pernet); pernet->next_id = 1; bitmap_zero(pernet->id_bitmap, MPTCP_PM_MAX_ADDR_ID + 1); spin_unlock_bh(&pernet->lock); - mptcp_nl_flush_addrs_list(sock_net(skb->sk), &free_list); + + if (free_list.next == &pernet->endp_list) + return 0; + synchronize_rcu(); + + /* Adjust the pointers to free_list instead of pernet->endp_list */ + free_list.prev->next = &free_list; + free_list.next->prev = &free_list; + + mptcp_nl_flush_addrs_list(sock_net(skb->sk), &free_list); __flush_addrs(&free_list); return 0; } diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index f505b780f7139b..8d323366741810 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -821,11 +821,8 @@ static bool __mptcp_ofo_queue(struct mptcp_sock *msk) static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk) { - int err = sock_error(ssk); int ssk_state; - - if (!err) - return false; + int err; /* only propagate errors on fallen-back sockets or * on MPC connect @@ -833,6 +830,10 @@ static bool __mptcp_subflow_error_report(struct sock *sk, struct sock *ssk) if (sk->sk_state != TCP_SYN_SENT && !__mptcp_check_fallback(mptcp_sk(sk))) return false; + err = sock_error(ssk); + if (!err) + return false; + /* We need to propagate only transition to CLOSE state. * Orphaned socket will see such state change via * subflow_sched_work_if_closed() and that path will properly @@ -2598,8 +2599,8 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk, struct mptcp_sock *msk = mptcp_sk(sk); struct sk_buff *skb; - /* The first subflow can already be closed and still in the list */ - if (subflow->close_event_done) + /* The first subflow can already be closed or disconnected */ + if (subflow->close_event_done || READ_ONCE(subflow->local_id) < 0) return; subflow->close_event_done = true; diff --git a/net/nfc/core.c b/net/nfc/core.c index 82f023f377541b..f50e5bab35d8ec 100644 --- a/net/nfc/core.c +++ b/net/nfc/core.c @@ -1147,14 +1147,14 @@ int nfc_register_device(struct nfc_dev *dev) EXPORT_SYMBOL(nfc_register_device); /** - * nfc_unregister_device - unregister a nfc device in the nfc subsystem + * nfc_unregister_rfkill - unregister a nfc device in the rfkill subsystem * * @dev: The nfc device to unregister */ -void nfc_unregister_device(struct nfc_dev *dev) +void nfc_unregister_rfkill(struct nfc_dev *dev) { - int rc; struct rfkill *rfk = NULL; + int rc; pr_debug("dev_name=%s\n", dev_name(&dev->dev)); @@ -1175,7 +1175,16 @@ void nfc_unregister_device(struct nfc_dev *dev) rfkill_unregister(rfk); rfkill_destroy(rfk); } +} +EXPORT_SYMBOL(nfc_unregister_rfkill); +/** + * nfc_remove_device - remove a nfc device in the nfc subsystem + * + * @dev: The nfc device to remove + */ +void nfc_remove_device(struct nfc_dev *dev) +{ if (dev->ops->check_presence) { timer_delete_sync(&dev->check_pres_timer); cancel_work_sync(&dev->check_pres_work); @@ -1188,6 +1197,18 @@ void nfc_unregister_device(struct nfc_dev *dev) device_del(&dev->dev); mutex_unlock(&nfc_devlist_mutex); } +EXPORT_SYMBOL(nfc_remove_device); + +/** + * nfc_unregister_device - unregister a nfc device in the nfc subsystem + * + * @dev: The nfc device to unregister + */ +void nfc_unregister_device(struct nfc_dev *dev) +{ + nfc_unregister_rfkill(dev); + nfc_remove_device(dev); +} EXPORT_SYMBOL(nfc_unregister_device); static int __init nfc_init(void) diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c index e2680a3bef7995..b652323bc2c12b 100644 --- a/net/nfc/llcp_commands.c +++ b/net/nfc/llcp_commands.c @@ -778,8 +778,23 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap, if (likely(frag_len > 0)) skb_put_data(pdu, msg_ptr, frag_len); + spin_lock(&local->tx_queue.lock); + + if (list_empty(&local->list)) { + spin_unlock(&local->tx_queue.lock); + + kfree_skb(pdu); + + len -= remaining_len; + if (len == 0) + len = -ENXIO; + break; + } + /* No need to check for the peer RW for UI frames */ - skb_queue_tail(&local->tx_queue, pdu); + __skb_queue_tail(&local->tx_queue, pdu); + + spin_unlock(&local->tx_queue.lock); remaining_len -= frag_len; msg_ptr += frag_len; diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c index beeb3b4d28caba..444a3774c8e806 100644 --- a/net/nfc/llcp_core.c +++ b/net/nfc/llcp_core.c @@ -316,7 +316,9 @@ static struct nfc_llcp_local *nfc_llcp_remove_local(struct nfc_dev *dev) spin_lock(&llcp_devices_lock); list_for_each_entry_safe(local, tmp, &llcp_devices, list) if (local->dev == dev) { - list_del(&local->list); + spin_lock(&local->tx_queue.lock); + list_del_init(&local->list); + spin_unlock(&local->tx_queue.lock); spin_unlock(&llcp_devices_lock); return local; } diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c index fc921cd2cdffa5..e419e020a70a33 100644 --- a/net/nfc/nci/core.c +++ b/net/nfc/nci/core.c @@ -1303,6 +1303,8 @@ void nci_unregister_device(struct nci_dev *ndev) { struct nci_conn_info *conn_info, *n; + nfc_unregister_rfkill(ndev->nfc_dev); + /* This set_bit is not protected with specialized barrier, * However, it is fine because the mutex_lock(&ndev->req_lock); * in nci_close_device() will help to emit one. @@ -1320,7 +1322,7 @@ void nci_unregister_device(struct nci_dev *ndev) /* conn_info is allocated with devm_kzalloc */ } - nfc_unregister_device(ndev->nfc_dev); + nfc_remove_device(ndev->nfc_dev); } EXPORT_SYMBOL(nci_unregister_device); diff --git a/rust/Makefile b/rust/Makefile index 5d357dce1704d1..4dcc2eff51cb26 100644 --- a/rust/Makefile +++ b/rust/Makefile @@ -383,6 +383,7 @@ bindgen_skip_c_flags := -mno-fp-ret-in-387 -mpreferred-stack-boundary=% \ -fno-inline-functions-called-once -fsanitize=bounds-strict \ -fstrict-flex-arrays=% -fmin-function-alignment=% \ -fzero-init-padding-bits=% -mno-fdpic \ + -fdiagnostics-show-context -fdiagnostics-show-context=% \ --param=% --param asan-% -fno-isolate-erroneous-paths-dereference # Derived from `scripts/Makefile.clang`. diff --git a/rust/kernel/bits.rs b/rust/kernel/bits.rs index 553d5026588329..2daead125626ae 100644 --- a/rust/kernel/bits.rs +++ b/rust/kernel/bits.rs @@ -27,7 +27,8 @@ macro_rules! impl_bit_fn { /// /// This version is the default and should be used if `n` is known at /// compile time. - #[inline] + // Always inline to optimize out error path of `build_assert`. + #[inline(always)] pub const fn [](n: u32) -> $ty { build_assert!(n < <$ty>::BITS); (1 as $ty) << n @@ -75,7 +76,8 @@ macro_rules! impl_genmask_fn { /// This version is the default and should be used if the range is known /// at compile time. $(#[$genmask_ex])* - #[inline] + // Always inline to optimize out error path of `build_assert`. + #[inline(always)] pub const fn [](range: RangeInclusive) -> $ty { let start = *range.start(); let end = *range.end(); diff --git a/rust/kernel/fmt.rs b/rust/kernel/fmt.rs index 84d634201d90a7..1e8725eb44ed7d 100644 --- a/rust/kernel/fmt.rs +++ b/rust/kernel/fmt.rs @@ -6,7 +6,7 @@ pub use core::fmt::{Arguments, Debug, Error, Formatter, Result, Write}; -/// Internal adapter used to route allow implementations of formatting traits for foreign types. +/// Internal adapter used to route and allow implementations of formatting traits for foreign types. /// /// It is inserted automatically by the [`fmt!`] macro and is not meant to be used directly. /// diff --git a/rust/kernel/num/bounded.rs b/rust/kernel/num/bounded.rs index f870080af8ac0a..fa81acbdc8c258 100644 --- a/rust/kernel/num/bounded.rs +++ b/rust/kernel/num/bounded.rs @@ -40,11 +40,11 @@ fn fits_within(value: T, num_bits: u32) -> bool { fits_within!(value, T, num_bits) } -/// An integer value that requires only the `N` less significant bits of the wrapped type to be +/// An integer value that requires only the `N` least significant bits of the wrapped type to be /// encoded. /// /// This limits the number of usable bits in the wrapped integer type, and thus the stored value to -/// a narrower range, which provides guarantees that can be useful when working with in e.g. +/// a narrower range, which provides guarantees that can be useful when working within e.g. /// bitfields. /// /// # Invariants @@ -56,7 +56,7 @@ fn fits_within(value: T, num_bits: u32) -> bool { /// # Examples /// /// The preferred way to create values is through constants and the [`Bounded::new`] family of -/// constructors, as they trigger a build error if the type invariants cannot be withheld. +/// constructors, as they trigger a build error if the type invariants cannot be upheld. /// /// ``` /// use kernel::num::Bounded; @@ -82,7 +82,7 @@ fn fits_within(value: T, num_bits: u32) -> bool { /// ``` /// use kernel::num::Bounded; /// -/// // This succeeds because `15` can be represented with 4 unsigned bits. +/// // This succeeds because `15` can be represented with 4 unsigned bits. /// assert!(Bounded::::try_new(15).is_some()); /// /// // This fails because `16` cannot be represented with 4 unsigned bits. @@ -221,7 +221,7 @@ fn fits_within(value: T, num_bits: u32) -> bool { /// let v: Option> = 128u32.try_into_bounded(); /// assert_eq!(v.as_deref().copied(), Some(128)); /// -/// // Fails because `128` doesn't fits into 6 bits. +/// // Fails because `128` doesn't fit into 6 bits. /// let v: Option> = 128u32.try_into_bounded(); /// assert_eq!(v, None); /// ``` @@ -259,9 +259,9 @@ macro_rules! impl_const_new { assert!(fits_within!(VALUE, $type, N)); } - // INVARIANT: `fits_within` confirmed that `VALUE` can be represented within + // SAFETY: `fits_within` confirmed that `VALUE` can be represented within // `N` bits. - Self::__new(VALUE) + unsafe { Self::__new(VALUE) } } } )* @@ -282,9 +282,10 @@ where /// All instances of [`Bounded`] must be created through this method as it enforces most of the /// type invariants. /// - /// The caller remains responsible for checking, either statically or dynamically, that `value` - /// can be represented as a `T` using at most `N` bits. - const fn __new(value: T) -> Self { + /// # Safety + /// + /// The caller must ensure that `value` can be represented within `N` bits. + const unsafe fn __new(value: T) -> Self { // Enforce the type invariants. const { // `N` cannot be zero. @@ -293,6 +294,7 @@ where assert!(N <= T::BITS); } + // INVARIANT: The caller ensures `value` fits within `N` bits. Self(value) } @@ -328,8 +330,8 @@ where /// ``` pub fn try_new(value: T) -> Option { fits_within(value, N).then(|| { - // INVARIANT: `fits_within` confirmed that `value` can be represented within `N` bits. - Self::__new(value) + // SAFETY: `fits_within` confirmed that `value` can be represented within `N` bits. + unsafe { Self::__new(value) } }) } @@ -363,6 +365,7 @@ where /// assert_eq!(Bounded::::from_expr(1).get(), 1); /// assert_eq!(Bounded::::from_expr(0xff).get(), 0xff); /// ``` + // Always inline to optimize out error path of `build_assert`. #[inline(always)] pub fn from_expr(expr: T) -> Self { crate::build_assert!( @@ -370,8 +373,8 @@ where "Requested value larger than maximal representable value." ); - // INVARIANT: `fits_within` confirmed that `expr` can be represented within `N` bits. - Self::__new(expr) + // SAFETY: `fits_within` confirmed that `expr` can be represented within `N` bits. + unsafe { Self::__new(expr) } } /// Returns the wrapped value as the backing type. @@ -410,9 +413,9 @@ where ); } - // INVARIANT: The value did fit within `N` bits, so it will all the more fit within + // SAFETY: The value did fit within `N` bits, so it will all the more fit within // the larger `M` bits. - Bounded::__new(self.0) + unsafe { Bounded::__new(self.0) } } /// Attempts to shrink the number of bits usable for `self`. @@ -466,9 +469,9 @@ where // `U` and `T` have the same sign, hence this conversion cannot fail. let value = unsafe { U::try_from(self.get()).unwrap_unchecked() }; - // INVARIANT: Although the backing type has changed, the value is still represented within + // SAFETY: Although the backing type has changed, the value is still represented within // `N` bits, and with the same signedness. - Bounded::__new(value) + unsafe { Bounded::__new(value) } } } @@ -501,7 +504,7 @@ where /// let v: Option> = 128u32.try_into_bounded(); /// assert_eq!(v.as_deref().copied(), Some(128)); /// -/// // Fails because `128` doesn't fits into 6 bits. +/// // Fails because `128` doesn't fit into 6 bits. /// let v: Option> = 128u32.try_into_bounded(); /// assert_eq!(v, None); /// ``` @@ -944,9 +947,9 @@ macro_rules! impl_from_primitive { Self: AtLeastXBits<{ <$type as Integer>::BITS as usize }>, { fn from(value: $type) -> Self { - // INVARIANT: The trait bound on `Self` guarantees that `N` bits is + // SAFETY: The trait bound on `Self` guarantees that `N` bits is // enough to hold any value of the source type. - Self::__new(T::from(value)) + unsafe { Self::__new(T::from(value)) } } } )* @@ -1051,8 +1054,8 @@ where T: Integer + From, { fn from(value: bool) -> Self { - // INVARIANT: A boolean can be represented using a single bit, and thus fits within any + // SAFETY: A boolean can be represented using a single bit, and thus fits within any // integer type for any `N` > 0. - Self::__new(T::from(value)) + unsafe { Self::__new(T::from(value)) } } } diff --git a/rust/kernel/rbtree.rs b/rust/kernel/rbtree.rs index 4729eb56827a12..312cecab72e780 100644 --- a/rust/kernel/rbtree.rs +++ b/rust/kernel/rbtree.rs @@ -985,7 +985,7 @@ impl<'a, K, V> CursorMut<'a, K, V> { self.peek(Direction::Prev) } - /// Access the previous node without moving the cursor. + /// Access the next node without moving the cursor. pub fn peek_next(&self) -> Option<(&K, &V)> { self.peek(Direction::Next) } @@ -1130,7 +1130,7 @@ pub struct IterMut<'a, K, V> { } // SAFETY: The [`IterMut`] has exclusive access to both `K` and `V`, so it is sufficient to require them to be `Send`. -// The iterator only gives out immutable references to the keys, but since the iterator has excusive access to those same +// The iterator only gives out immutable references to the keys, but since the iterator has exclusive access to those same // keys, `Send` is sufficient. `Sync` would be okay, but it is more restrictive to the user. unsafe impl<'a, K: Send, V: Send> Send for IterMut<'a, K, V> {} diff --git a/rust/kernel/sync/atomic/predefine.rs b/rust/kernel/sync/atomic/predefine.rs index 45a17985cda45e..0fca1ba3c2dbd1 100644 --- a/rust/kernel/sync/atomic/predefine.rs +++ b/rust/kernel/sync/atomic/predefine.rs @@ -35,12 +35,23 @@ unsafe impl super::AtomicAdd for i64 { // as `isize` and `usize`, and `isize` and `usize` are always bi-directional transmutable to // `isize_atomic_repr`, which also always implements `AtomicImpl`. #[allow(non_camel_case_types)] +#[cfg(not(testlib))] #[cfg(not(CONFIG_64BIT))] type isize_atomic_repr = i32; #[allow(non_camel_case_types)] +#[cfg(not(testlib))] #[cfg(CONFIG_64BIT)] type isize_atomic_repr = i64; +#[allow(non_camel_case_types)] +#[cfg(testlib)] +#[cfg(target_pointer_width = "32")] +type isize_atomic_repr = i32; +#[allow(non_camel_case_types)] +#[cfg(testlib)] +#[cfg(target_pointer_width = "64")] +type isize_atomic_repr = i64; + // Ensure size and alignment requirements are checked. static_assert!(size_of::() == size_of::()); static_assert!(align_of::() == align_of::()); diff --git a/rust/kernel/sync/refcount.rs b/rust/kernel/sync/refcount.rs index 19236a5bccdeb0..6c7ae8b05a0b85 100644 --- a/rust/kernel/sync/refcount.rs +++ b/rust/kernel/sync/refcount.rs @@ -23,7 +23,8 @@ impl Refcount { /// Construct a new [`Refcount`] from an initial value. /// /// The initial value should be non-saturated. - #[inline] + // Always inline to optimize out error path of `build_assert`. + #[inline(always)] pub fn new(value: i32) -> Self { build_assert!(value >= 0, "initial value saturated"); // SAFETY: There are no safety requirements for this FFI call. diff --git a/rust/macros/fmt.rs b/rust/macros/fmt.rs index 2f4b9f6e221104..8354abd5450222 100644 --- a/rust/macros/fmt.rs +++ b/rust/macros/fmt.rs @@ -67,7 +67,7 @@ pub(crate) fn fmt(input: TokenStream) -> TokenStream { } (None, acc) })(); - args.extend(quote_spanned!(first_span => #lhs #adapter(&#rhs))); + args.extend(quote_spanned!(first_span => #lhs #adapter(&(#rhs)))); } }; diff --git a/rust/macros/lib.rs b/rust/macros/lib.rs index b38002151871a3..33f66e86418ab8 100644 --- a/rust/macros/lib.rs +++ b/rust/macros/lib.rs @@ -59,7 +59,7 @@ use proc_macro::TokenStream; /// /// # Examples /// -/// ``` +/// ```ignore /// use kernel::prelude::*; /// /// module!{ diff --git a/rust/proc-macro2/lib.rs b/rust/proc-macro2/lib.rs index 7b78d065d51ce8..5d408943fa0d4e 100644 --- a/rust/proc-macro2/lib.rs +++ b/rust/proc-macro2/lib.rs @@ -1,5 +1,9 @@ // SPDX-License-Identifier: Apache-2.0 OR MIT +// When fixdep scans this, it will find this string `CONFIG_RUSTC_VERSION_TEXT` +// and thus add a dependency on `include/config/RUSTC_VERSION_TEXT`, which is +// touched by Kconfig when the version string from the compiler changes. + //! [![github]](https://github.com/dtolnay/proc-macro2) [![crates-io]](https://crates.io/crates/proc-macro2) [![docs-rs]](crate) //! //! [github]: https://img.shields.io/badge/github-8da0cb?style=for-the-badge&labelColor=555555&logo=github diff --git a/scripts/Makefile.build b/scripts/Makefile.build index 5037f4715d7491..32e209bc7985cb 100644 --- a/scripts/Makefile.build +++ b/scripts/Makefile.build @@ -166,11 +166,13 @@ else ifeq ($(KBUILD_CHECKSRC),2) cmd_force_checksrc = $(CHECK) $(CHECKFLAGS) $(c_flags) $< endif +ifeq ($(KBUILD_EXTMOD),) ifneq ($(KBUILD_EXTRA_WARN),) cmd_checkdoc = PYTHONDONTWRITEBYTECODE=1 $(PYTHON3) $(KERNELDOC) -none $(KDOCFLAGS) \ $(if $(findstring 2, $(KBUILD_EXTRA_WARN)), -Wall) \ $< endif +endif # Compile C sources (.c) # --------------------------------------------------------------------------- @@ -356,7 +358,7 @@ $(obj)/%.o: $(obj)/%.rs FORCE quiet_cmd_rustc_rsi_rs = $(RUSTC_OR_CLIPPY_QUIET) $(quiet_modtag) $@ cmd_rustc_rsi_rs = \ $(rust_common_cmd) -Zunpretty=expanded $< >$@; \ - command -v $(RUSTFMT) >/dev/null && $(RUSTFMT) $@ + command -v $(RUSTFMT) >/dev/null && $(RUSTFMT) --config-path $(srctree)/.rustfmt.toml $@ $(obj)/%.rsi: $(obj)/%.rs FORCE +$(call if_changed_dep,rustc_rsi_rs) diff --git a/scripts/Makefile.vmlinux b/scripts/Makefile.vmlinux index cd788cac9d91da..276c3134a563ad 100644 --- a/scripts/Makefile.vmlinux +++ b/scripts/Makefile.vmlinux @@ -113,7 +113,8 @@ vmlinux: vmlinux.unstripped FORCE # what kmod expects to parse. quiet_cmd_modules_builtin_modinfo = GEN $@ cmd_modules_builtin_modinfo = $(cmd_objcopy); \ - sed -i 's/\x00\+$$/\x00/g' $@ + sed -i 's/\x00\+$$/\x00/g' $@; \ + chmod -x $@ OBJCOPYFLAGS_modules.builtin.modinfo := -j .modinfo -O binary diff --git a/scripts/generate_rust_analyzer.py b/scripts/generate_rust_analyzer.py index 147d0cc9406814..766c2d91cd8111 100755 --- a/scripts/generate_rust_analyzer.py +++ b/scripts/generate_rust_analyzer.py @@ -61,7 +61,6 @@ def append_sysroot_crate( display_name, deps, cfg=[], - edition="2021", ): append_crate( display_name, @@ -69,13 +68,37 @@ def append_sysroot_crate( deps, cfg, is_workspace_member=False, - edition=edition, + # Miguel Ojeda writes: + # + # > ... in principle even the sysroot crates may have different + # > editions. + # > + # > For instance, in the move to 2024, it seems all happened at once + # > in 1.87.0 in these upstream commits: + # > + # > 0e071c2c6a58 ("Migrate core to Rust 2024") + # > f505d4e8e380 ("Migrate alloc to Rust 2024") + # > 0b2489c226c3 ("Migrate proc_macro to Rust 2024") + # > 993359e70112 ("Migrate std to Rust 2024") + # > + # > But in the previous move to 2021, `std` moved in 1.59.0, while + # > the others in 1.60.0: + # > + # > b656384d8398 ("Update stdlib to the 2021 edition") + # > 06a1c14d52a8 ("Switch all libraries to the 2021 edition") + # + # Link: https://lore.kernel.org/all/CANiq72kd9bHdKaAm=8xCUhSHMy2csyVed69bOc4dXyFAW4sfuw@mail.gmail.com/ + # + # At the time of writing all rust versions we support build the + # sysroot crates with the same edition. We may need to relax this + # assumption if future edition moves span multiple rust versions. + edition=core_edition, ) # NB: sysroot crates reexport items from one another so setting up our transitive dependencies # here is important for ensuring that rust-analyzer can resolve symbols. The sources of truth # for this dependency graph are `(sysroot_src / crate / "Cargo.toml" for crate in crates)`. - append_sysroot_crate("core", [], cfg=crates_cfgs.get("core", []), edition=core_edition) + append_sysroot_crate("core", [], cfg=crates_cfgs.get("core", [])) append_sysroot_crate("alloc", ["core"]) append_sysroot_crate("std", ["alloc", "core"]) append_sysroot_crate("proc_macro", ["core", "std"]) @@ -83,7 +106,7 @@ def append_sysroot_crate( append_crate( "compiler_builtins", srctree / "rust" / "compiler_builtins.rs", - [], + ["core"], ) append_crate( @@ -96,14 +119,15 @@ def append_sysroot_crate( append_crate( "quote", srctree / "rust" / "quote" / "lib.rs", - ["alloc", "proc_macro", "proc_macro2"], + ["core", "alloc", "std", "proc_macro", "proc_macro2"], cfg=crates_cfgs["quote"], + edition="2018", ) append_crate( "syn", srctree / "rust" / "syn" / "lib.rs", - ["proc_macro", "proc_macro2", "quote"], + ["std", "proc_macro", "proc_macro2", "quote"], cfg=crates_cfgs["syn"], ) @@ -123,7 +147,7 @@ def append_sysroot_crate( append_crate( "pin_init_internal", srctree / "rust" / "pin-init" / "internal" / "src" / "lib.rs", - [], + ["std", "proc_macro"], cfg=["kernel"], is_proc_macro=True, ) @@ -131,7 +155,7 @@ def append_sysroot_crate( append_crate( "pin_init", srctree / "rust" / "pin-init" / "src" / "lib.rs", - ["core", "pin_init_internal", "macros"], + ["core", "compiler_builtins", "pin_init_internal", "macros"], cfg=["kernel"], ) @@ -190,7 +214,7 @@ def is_root_crate(build_file, target): append_crate( name, path, - ["core", "kernel"], + ["core", "kernel", "pin_init"], cfg=cfg, ) @@ -213,9 +237,6 @@ def main(): level=logging.INFO if args.verbose else logging.WARNING ) - # Making sure that the `sysroot` and `sysroot_src` belong to the same toolchain. - assert args.sysroot in args.sysroot_src.parents - rust_project = { "crates": generate_crates(args.srctree, args.objtree, args.sysroot_src, args.exttree, args.cfgs, args.core_edition), "sysroot": str(args.sysroot), diff --git a/scripts/livepatch/klp-build b/scripts/livepatch/klp-build index 882272120c9ef1..a73515a82272ec 100755 --- a/scripts/livepatch/klp-build +++ b/scripts/livepatch/klp-build @@ -555,13 +555,11 @@ copy_orig_objects() { local file_dir="$(dirname "$file")" local orig_file="$ORIG_DIR/$rel_file" local orig_dir="$(dirname "$orig_file")" - local cmd_file="$file_dir/.$(basename "$file").cmd" [[ ! -f "$file" ]] && die "missing $(basename "$file") for $_file" mkdir -p "$orig_dir" cp -f "$file" "$orig_dir" - [[ -e "$cmd_file" ]] && cp -f "$cmd_file" "$orig_dir" done xtrace_restore @@ -740,15 +738,17 @@ build_patch_module() { local orig_dir="$(dirname "$orig_file")" local kmod_file="$KMOD_DIR/$rel_file" local kmod_dir="$(dirname "$kmod_file")" - local cmd_file="$orig_dir/.$(basename "$file").cmd" + local cmd_file="$kmod_dir/.$(basename "$file").cmd" mkdir -p "$kmod_dir" cp -f "$file" "$kmod_dir" - [[ -e "$cmd_file" ]] && cp -f "$cmd_file" "$kmod_dir" # Tell kbuild this is a prebuilt object cp -f "$file" "${kmod_file}_shipped" + # Make modpost happy + touch "$cmd_file" + echo -n " $rel_file" >> "$makefile" done diff --git a/scripts/package/kernel.spec b/scripts/package/kernel.spec index 98f206cb7c6079..0f1c8de1bd95f8 100644 --- a/scripts/package/kernel.spec +++ b/scripts/package/kernel.spec @@ -2,6 +2,8 @@ %{!?_arch: %define _arch dummy} %{!?make: %define make make} %define makeflags %{?_smp_mflags} ARCH=%{ARCH} +%define __spec_install_post /usr/lib/rpm/brp-compress || : +%define debug_package %{nil} Name: kernel Summary: The Linux Kernel @@ -46,34 +48,12 @@ against the %{version} kernel package. %endif %if %{with_debuginfo} -# list of debuginfo-related options taken from distribution kernel.spec -# files -%undefine _include_minidebuginfo -%undefine _find_debuginfo_dwz_opts -%undefine _unique_build_ids -%undefine _unique_debug_names -%undefine _unique_debug_srcs -%undefine _debugsource_packages -%undefine _debuginfo_subpackages -%global _find_debuginfo_opts -r -%global _missing_build_ids_terminate_build 1 -%global _no_recompute_build_ids 1 -%{debug_package} +%package debuginfo +Summary: Debug information package for the Linux kernel +%description debuginfo +This package provides debug information for the kernel image and modules from the +%{version} package. %endif -# some (but not all) versions of rpmbuild emit %%debug_package with -# %%install. since we've already emitted it manually, that would cause -# a package redefinition error. ensure that doesn't happen -%define debug_package %{nil} - -# later, we make all modules executable so that find-debuginfo.sh strips -# them up. but they don't actually need to be executable, so remove the -# executable bit, taking care to do it _after_ find-debuginfo.sh has run -%define __spec_install_post \ - %{?__debug_package:%{__debug_install_post}} \ - %{__arch_install_post} \ - %{__os_install_post} \ - find %{buildroot}/lib/modules/%{KERNELRELEASE} -name "*.ko" -type f \\\ - | xargs --no-run-if-empty chmod u-x %prep %setup -q -n linux @@ -87,7 +67,7 @@ patch -p1 < %{SOURCE2} mkdir -p %{buildroot}/lib/modules/%{KERNELRELEASE} cp $(%{make} %{makeflags} -s image_name) %{buildroot}/lib/modules/%{KERNELRELEASE}/vmlinuz # DEPMOD=true makes depmod no-op. We do not package depmod-generated files. -%{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} DEPMOD=true modules_install +%{make} %{makeflags} INSTALL_MOD_PATH=%{buildroot} INSTALL_MOD_STRIP=1 DEPMOD=true modules_install %{make} %{makeflags} INSTALL_HDR_PATH=%{buildroot}/usr headers_install cp System.map %{buildroot}/lib/modules/%{KERNELRELEASE} cp .config %{buildroot}/lib/modules/%{KERNELRELEASE}/config @@ -118,22 +98,31 @@ ln -fns /usr/src/kernels/%{KERNELRELEASE} %{buildroot}/lib/modules/%{KERNELRELEA echo "%exclude /lib/modules/%{KERNELRELEASE}/build" } > %{buildroot}/kernel.list -# make modules executable so that find-debuginfo.sh strips them. this -# will be undone later in %%__spec_install_post -find %{buildroot}/lib/modules/%{KERNELRELEASE} -name "*.ko" -type f \ - | xargs --no-run-if-empty chmod u+x - %if %{with_debuginfo} # copying vmlinux directly to the debug directory means it will not get # stripped (but its source paths will still be collected + fixed up) mkdir -p %{buildroot}/usr/lib/debug/lib/modules/%{KERNELRELEASE} cp vmlinux %{buildroot}/usr/lib/debug/lib/modules/%{KERNELRELEASE} + +echo /usr/lib/debug/lib/modules/%{KERNELRELEASE}/vmlinux > %{buildroot}/debuginfo.list + +while read -r mod; do + mod="${mod%.o}.ko" + dbg="%{buildroot}/usr/lib/debug/lib/modules/%{KERNELRELEASE}/kernel/${mod}" + buildid=$("${READELF}" -n "${mod}" | sed -n 's@^.*Build ID: \(..\)\(.*\)@\1/\2@p') + link="%{buildroot}/usr/lib/debug/.build-id/${buildid}.debug" + + mkdir -p "${dbg%/*}" "${link%/*}" + "${OBJCOPY}" --only-keep-debug "${mod}" "${dbg}" + ln -sf --relative "${dbg}" "${link}" + + echo "${dbg#%{buildroot}}" >> %{buildroot}/debuginfo.list + echo "${link#%{buildroot}}" >> %{buildroot}/debuginfo.list +done < modules.order %endif %clean rm -rf %{buildroot} -rm -f debugfiles.list debuglinks.list debugsourcefiles.list debugsources.list \ - elfbins.list %post if [ -x /usr/bin/kernel-install ]; then @@ -172,3 +161,9 @@ fi /usr/src/kernels/%{KERNELRELEASE} /lib/modules/%{KERNELRELEASE}/build %endif + +%if %{with_debuginfo} +%files -f %{buildroot}/debuginfo.list debuginfo +%defattr (-, root, root) +%exclude /debuginfo.list +%endif diff --git a/scripts/rustdoc_test_gen.rs b/scripts/rustdoc_test_gen.rs index be05610496605b..6fd9f5c84e2e4c 100644 --- a/scripts/rustdoc_test_gen.rs +++ b/scripts/rustdoc_test_gen.rs @@ -206,7 +206,7 @@ pub extern "C" fn {kunit_name}(__kunit_test: *mut ::kernel::bindings::kunit) {{ /// The anchor where the test code body starts. #[allow(unused)] - static __DOCTEST_ANCHOR: i32 = ::core::line!() as i32 + {body_offset} + 1; + static __DOCTEST_ANCHOR: i32 = ::core::line!() as i32 + {body_offset} + 2; {{ #![allow(unreachable_pub, clippy::disallowed_names)] {body} diff --git a/security/lsm.h b/security/lsm.h index 81aadbc61685cd..db77cc83e1582b 100644 --- a/security/lsm.h +++ b/security/lsm.h @@ -37,15 +37,6 @@ int lsm_task_alloc(struct task_struct *task); /* LSM framework initializers */ -#ifdef CONFIG_MMU -int min_addr_init(void); -#else -static inline int min_addr_init(void) -{ - return 0; -} -#endif /* CONFIG_MMU */ - #ifdef CONFIG_SECURITYFS int securityfs_init(void); #else diff --git a/security/lsm_init.c b/security/lsm_init.c index 05bd52e6b1f25d..573e2a7250c416 100644 --- a/security/lsm_init.c +++ b/security/lsm_init.c @@ -489,12 +489,7 @@ int __init security_init(void) */ static int __init security_initcall_pure(void) { - int rc_adr, rc_lsm; - - rc_adr = min_addr_init(); - rc_lsm = lsm_initcall(pure); - - return (rc_adr ? rc_adr : rc_lsm); + return lsm_initcall(pure); } pure_initcall(security_initcall_pure); diff --git a/security/min_addr.c b/security/min_addr.c index 0fde5ec9abc8a0..56e4f9d25929b0 100644 --- a/security/min_addr.c +++ b/security/min_addr.c @@ -5,8 +5,6 @@ #include #include -#include "lsm.h" - /* amount of vm to protect from userspace access by both DAC and the LSM*/ unsigned long mmap_min_addr; /* amount of vm to protect from userspace using CAP_SYS_RAWIO (DAC) */ @@ -54,10 +52,11 @@ static const struct ctl_table min_addr_sysctl_table[] = { }, }; -int __init min_addr_init(void) +static int __init mmap_min_addr_init(void) { register_sysctl_init("vm", min_addr_sysctl_table); update_mmap_min_addr(); return 0; } +pure_initcall(mmap_min_addr_init); diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c index cafa48b5aceb52..0a0496deb9c881 100644 --- a/sound/hda/codecs/realtek/alc269.c +++ b/sound/hda/codecs/realtek/alc269.c @@ -3383,11 +3383,22 @@ static void alc287_alc1318_playback_pcm_hook(struct hda_pcm_stream *hinfo, struct snd_pcm_substream *substream, int action) { + static const struct coef_fw dis_coefs[] = { + WRITE_COEF(0x24, 0x0013), WRITE_COEF(0x25, 0x0000), WRITE_COEF(0x26, 0xC203), + WRITE_COEF(0x28, 0x0004), WRITE_COEF(0x29, 0xb023), + }; /* Disable AMP silence detection */ + static const struct coef_fw en_coefs[] = { + WRITE_COEF(0x24, 0x0013), WRITE_COEF(0x25, 0x0000), WRITE_COEF(0x26, 0xC203), + WRITE_COEF(0x28, 0x0084), WRITE_COEF(0x29, 0xb023), + }; /* Enable AMP silence detection */ + switch (action) { case HDA_GEN_PCM_ACT_OPEN: + alc_process_coef_fw(codec, dis_coefs); alc_write_coefex_idx(codec, 0x5a, 0x00, 0x954f); /* write gpio3 to high */ break; case HDA_GEN_PCM_ACT_CLOSE: + alc_process_coef_fw(codec, en_coefs); alc_write_coefex_idx(codec, 0x5a, 0x00, 0x554f); /* write gpio3 as default value */ break; } @@ -6739,6 +6750,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8c8c, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c8d, "HP ProBook 440 G11", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c8e, "HP ProBook 460 G11", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8c8f, "HP EliteBook 630 G11", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c90, "HP EliteBook 640", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c91, "HP EliteBook 660", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8c96, "HP", ALC236_FIXUP_HP_MUTE_LED_MICMUTE_VREF), @@ -7336,6 +7348,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1d05, 0x1409, "TongFang GMxIXxx", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1d05, 0x300f, "TongFang X6AR5xxY", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1d05, 0x3019, "TongFang X6FR5xxY", ALC2XX_FIXUP_HEADSET_MIC), + SND_PCI_QUIRK(0x1d05, 0x3031, "TongFang X6AR55xU", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1d17, 0x3288, "Haier Boyue G42", ALC269VC_FIXUP_ACER_VCOPPERBOX_PINS), SND_PCI_QUIRK(0x1d72, 0x1602, "RedmiBook", ALC255_FIXUP_XIAOMI_HEADSET_MIC), SND_PCI_QUIRK(0x1d72, 0x1701, "XiaomiNotebook Pro", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE), @@ -7346,6 +7359,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1ee7, 0x2078, "HONOR BRB-X M1010", ALC2XX_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1f66, 0x0105, "Ayaneo Portable Game Player", ALC287_FIXUP_CS35L41_I2C_2), SND_PCI_QUIRK(0x2014, 0x800a, "Positivo ARN50", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x2039, 0x0001, "Inspur S14-G1", ALC295_FIXUP_CHROME_BOOK), SND_PCI_QUIRK(0x2782, 0x0214, "VAIO VJFE-CL", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x2782, 0x0228, "Infinix ZERO BOOK 13", ALC269VB_FIXUP_INFINIX_ZERO_BOOK_13), SND_PCI_QUIRK(0x2782, 0x0232, "CHUWI CoreBook XPro", ALC269VB_FIXUP_CHUWI_COREBOOK_XPRO), @@ -7805,10 +7819,6 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { {0x12, 0x90a60140}, {0x19, 0x04a11030}, {0x21, 0x04211020}), - SND_HDA_PIN_QUIRK(0x10ec0274, 0x1d05, "TongFang", ALC274_FIXUP_HP_HEADSET_MIC, - {0x17, 0x90170110}, - {0x19, 0x03a11030}, - {0x21, 0x03211020}), SND_HDA_PIN_QUIRK(0x10ec0282, 0x1025, "Acer", ALC282_FIXUP_ACER_DISABLE_LINEOUT, ALC282_STANDARD_PINS, {0x12, 0x90a609c0}, diff --git a/sound/soc/amd/yc/acp6x-mach.c b/sound/soc/amd/yc/acp6x-mach.c index 0294177acc6637..c18da0915baad8 100644 --- a/sound/soc/amd/yc/acp6x-mach.c +++ b/sound/soc/amd/yc/acp6x-mach.c @@ -542,6 +542,13 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "15NBC1011"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "ASUS EXPERTBOOK PM1503CDA"), + } + }, { .driver_data = &acp6x_card, .matches = { @@ -675,6 +682,14 @@ static const struct dmi_system_id yc_acp_quirk_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "GOH-X"), } }, + { + .driver_data = &acp6x_card, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "RB"), + DMI_MATCH(DMI_BOARD_NAME, "XyloD5_RBU"), + } + }, + {} }; diff --git a/sound/soc/codecs/cs35l45.c b/sound/soc/codecs/cs35l45.c index e33f1143598045..7aa558d6362f01 100644 --- a/sound/soc/codecs/cs35l45.c +++ b/sound/soc/codecs/cs35l45.c @@ -453,7 +453,7 @@ static const struct snd_soc_dapm_widget cs35l45_dapm_widgets[] = { SND_SOC_DAPM_AIF_OUT("ASP_TX2", NULL, 1, CS35L45_ASP_ENABLES1, CS35L45_ASP_TX2_EN_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("ASP_TX3", NULL, 2, CS35L45_ASP_ENABLES1, CS35L45_ASP_TX3_EN_SHIFT, 0), SND_SOC_DAPM_AIF_OUT("ASP_TX4", NULL, 3, CS35L45_ASP_ENABLES1, CS35L45_ASP_TX4_EN_SHIFT, 0), - SND_SOC_DAPM_AIF_OUT("ASP_TX5", NULL, 3, CS35L45_ASP_ENABLES1, CS35L45_ASP_TX5_EN_SHIFT, 0), + SND_SOC_DAPM_AIF_OUT("ASP_TX5", NULL, 4, CS35L45_ASP_ENABLES1, CS35L45_ASP_TX5_EN_SHIFT, 0), SND_SOC_DAPM_MUX("ASP_TX1 Source", SND_SOC_NOPM, 0, 0, &cs35l45_asp_muxes[0]), SND_SOC_DAPM_MUX("ASP_TX2 Source", SND_SOC_NOPM, 0, 0, &cs35l45_asp_muxes[1]), diff --git a/sound/soc/fsl/imx-card.c b/sound/soc/fsl/imx-card.c index 28699d7b75ca04..05b4e971a36618 100644 --- a/sound/soc/fsl/imx-card.c +++ b/sound/soc/fsl/imx-card.c @@ -346,7 +346,6 @@ static int imx_aif_hw_params(struct snd_pcm_substream *substream, SND_SOC_DAIFMT_PDM; } else { slots = 2; - slot_width = params_physical_width(params); fmt = (rtd->dai_link->dai_fmt & ~SND_SOC_DAIFMT_FORMAT_MASK) | SND_SOC_DAIFMT_I2S; } diff --git a/sound/soc/intel/boards/sof_es8336.c b/sound/soc/intel/boards/sof_es8336.c index 774fff58d51b93..fce50fd9f09389 100644 --- a/sound/soc/intel/boards/sof_es8336.c +++ b/sound/soc/intel/boards/sof_es8336.c @@ -120,7 +120,7 @@ static void pcm_pop_work_events(struct work_struct *work) gpiod_set_value_cansleep(priv->gpio_speakers, priv->speaker_en); if (quirk & SOF_ES8336_HEADPHONE_GPIO) - gpiod_set_value_cansleep(priv->gpio_headphone, priv->speaker_en); + gpiod_set_value_cansleep(priv->gpio_headphone, !priv->speaker_en); } diff --git a/sound/soc/intel/boards/sof_sdw.c b/sound/soc/intel/boards/sof_sdw.c index 8721a098d53f15..50b838be24e95c 100644 --- a/sound/soc/intel/boards/sof_sdw.c +++ b/sound/soc/intel/boards/sof_sdw.c @@ -838,6 +838,7 @@ static const struct snd_pci_quirk sof_sdw_ssid_quirk_table[] = { SND_PCI_QUIRK(0x17aa, 0x2347, "Lenovo P16", SOC_SDW_CODEC_MIC), SND_PCI_QUIRK(0x17aa, 0x2348, "Lenovo P16", SOC_SDW_CODEC_MIC), SND_PCI_QUIRK(0x17aa, 0x2349, "Lenovo P1", SOC_SDW_CODEC_MIC), + SND_PCI_QUIRK(0x17aa, 0x3821, "Lenovo 0x3821", SOC_SDW_SIDECAR_AMPS), {} }; diff --git a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c index 060955825fe00a..e297c8ecedb726 100644 --- a/sound/soc/intel/common/soc-acpi-intel-ptl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-ptl-match.c @@ -442,7 +442,7 @@ static const struct snd_soc_acpi_adr_device rt1320_2_group2_adr[] = { .adr = 0x000230025D132001ull, .num_endpoints = 1, .endpoints = &spk_r_endpoint, - .name_prefix = "rt1320-1" + .name_prefix = "rt1320-2" } }; diff --git a/tools/objtool/check.c b/tools/objtool/check.c index 3f7999317f4dfa..719ec727efd483 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -197,7 +197,8 @@ static bool is_rust_noreturn(const struct symbol *func) * as well as changes to the source code itself between versions (since * these come from the Rust standard library). */ - return str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") || + return str_ends_with(func->name, "_4core3num22from_ascii_radix_panic") || + str_ends_with(func->name, "_4core5sliceSp15copy_from_slice17len_mismatch_fail") || str_ends_with(func->name, "_4core6option13expect_failed") || str_ends_with(func->name, "_4core6option13unwrap_failed") || str_ends_with(func->name, "_4core6result13unwrap_failed") || diff --git a/tools/objtool/disas.c b/tools/objtool/disas.c index 2b5059f55e4008..26f08d41f2b133 100644 --- a/tools/objtool/disas.c +++ b/tools/objtool/disas.c @@ -108,6 +108,8 @@ static int sprint_name(char *str, const char *name, unsigned long offset) #define DINFO_FPRINTF(dinfo, ...) \ ((*(dinfo)->fprintf_func)((dinfo)->stream, __VA_ARGS__)) +#define bfd_vma_fmt \ + __builtin_choose_expr(sizeof(bfd_vma) == sizeof(unsigned long), "%#lx <%s>", "%#llx <%s>") static int disas_result_fprintf(struct disas_context *dctx, const char *fmt, va_list ap) @@ -170,10 +172,10 @@ static void disas_print_addr_sym(struct section *sec, struct symbol *sym, if (sym) { sprint_name(symstr, sym->name, addr - sym->offset); - DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, symstr); + DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, symstr); } else { str = offstr(sec, addr); - DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, str); + DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, str); free(str); } } @@ -252,7 +254,7 @@ static void disas_print_addr_reloc(bfd_vma addr, struct disassemble_info *dinfo) * example: "lea 0x0(%rip),%rdi". The kernel can reference * the next IP with _THIS_IP_ macro. */ - DINFO_FPRINTF(dinfo, "0x%lx <_THIS_IP_>", addr); + DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, "_THIS_IP_"); return; } @@ -264,11 +266,11 @@ static void disas_print_addr_reloc(bfd_vma addr, struct disassemble_info *dinfo) */ if (reloc->sym->type == STT_SECTION) { str = offstr(reloc->sym->sec, reloc->sym->offset + offset); - DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, str); + DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, str); free(str); } else { sprint_name(symstr, reloc->sym->name, offset); - DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, symstr); + DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, symstr); } } @@ -311,7 +313,7 @@ static void disas_print_address(bfd_vma addr, struct disassemble_info *dinfo) */ sym = insn_call_dest(insn); if (sym && (sym->offset == addr || (sym->offset == 0 && is_reloc))) { - DINFO_FPRINTF(dinfo, "0x%lx <%s>", addr, sym->name); + DINFO_FPRINTF(dinfo, bfd_vma_fmt, addr, sym->name); return; } diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c index 6a8ed9c62323ea..2c02c7b492658c 100644 --- a/tools/objtool/elf.c +++ b/tools/objtool/elf.c @@ -18,15 +18,14 @@ #include #include #include +#include +#include #include +#include #include #include #include -#define ALIGN_UP(x, align_to) (((x) + ((align_to)-1)) & ~((align_to)-1)) -#define ALIGN_UP_POW2(x) (1U << ((8 * sizeof(x)) - __builtin_clz((x) - 1U))) -#define MAX(a, b) ((a) > (b) ? (a) : (b)) - static inline u32 str_hash(const char *str) { return jhash(str, strlen(str), 0); @@ -1336,7 +1335,7 @@ unsigned int elf_add_string(struct elf *elf, struct section *strtab, const char return -1; } - offset = ALIGN_UP(strtab->sh.sh_size, strtab->sh.sh_addralign); + offset = ALIGN(strtab->sh.sh_size, strtab->sh.sh_addralign); if (!elf_add_data(elf, strtab, str, strlen(str) + 1)) return -1; @@ -1378,7 +1377,7 @@ void *elf_add_data(struct elf *elf, struct section *sec, const void *data, size_ sec->data->d_size = size; sec->data->d_align = 1; - offset = ALIGN_UP(sec->sh.sh_size, sec->sh.sh_addralign); + offset = ALIGN(sec->sh.sh_size, sec->sh.sh_addralign); sec->sh.sh_size = offset + size; mark_sec_changed(elf, sec, true); @@ -1502,7 +1501,7 @@ static int elf_alloc_reloc(struct elf *elf, struct section *rsec) rsec->data->d_size = nr_relocs_new * elf_rela_size(elf); rsec->sh.sh_size = rsec->data->d_size; - nr_alloc = MAX(64, ALIGN_UP_POW2(nr_relocs_new)); + nr_alloc = max(64UL, roundup_pow_of_two(nr_relocs_new)); if (nr_alloc <= rsec->nr_alloc_relocs) return 0; diff --git a/tools/objtool/klp-diff.c b/tools/objtool/klp-diff.c index 4d1f9e9977eb94..d94531e3f64e3d 100644 --- a/tools/objtool/klp-diff.c +++ b/tools/objtool/klp-diff.c @@ -1425,9 +1425,6 @@ static int clone_special_sections(struct elfs *e) { struct section *patched_sec; - if (create_fake_symbols(e->patched)) - return -1; - for_each_sec(e->patched, patched_sec) { if (is_special_section(patched_sec)) { if (clone_special_section(e, patched_sec)) @@ -1704,6 +1701,17 @@ int cmd_klp_diff(int argc, const char **argv) if (!e.out) return -1; + /* + * Special section fake symbols are needed so that individual special + * section entries can be extracted by clone_special_sections(). + * + * Note the fake symbols are also needed by clone_included_functions() + * because __WARN_printf() call sites add references to bug table + * entries in the calling functions. + */ + if (create_fake_symbols(e.patched)) + return -1; + if (clone_included_functions(&e)) return -1; diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm index ba5c2b643efaa2..d45bf4ccb3bf07 100644 --- a/tools/testing/selftests/kvm/Makefile.kvm +++ b/tools/testing/selftests/kvm/Makefile.kvm @@ -251,6 +251,7 @@ LINUX_TOOL_INCLUDE = $(top_srcdir)/tools/include LINUX_TOOL_ARCH_INCLUDE = $(top_srcdir)/tools/arch/$(ARCH)/include CFLAGS += -Wall -Wstrict-prototypes -Wuninitialized -O2 -g -std=gnu99 \ -Wno-gnu-variable-sized-type-not-at-end -MD -MP -DCONFIG_64BIT \ + -U_FORTIFY_SOURCE \ -fno-builtin-memcmp -fno-builtin-memcpy \ -fno-builtin-memset -fno-builtin-strnlen \ -fno-stack-protector -fno-PIE -fno-strict-aliasing \ diff --git a/tools/testing/selftests/net/fcnal-test.sh b/tools/testing/selftests/net/fcnal-test.sh index 844a580ae74ef8..890c3f8e51bb8b 100755 --- a/tools/testing/selftests/net/fcnal-test.sh +++ b/tools/testing/selftests/net/fcnal-test.sh @@ -2327,6 +2327,13 @@ ipv6_ping_novrf() log_test_addr ${a} $? 2 "ping local, device bind" done + for a in ${NSA_LO_IP6} ${NSA_LINKIP6}%${NSA_DEV} ${NSA_IP6} + do + log_start + run_cmd ${ping6} -c1 -w1 -I ::1 ${a} + log_test_addr ${a} $? 0 "ping local, from localhost" + done + # # ip rule blocks address # diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh index b2e6e548f796d3..e70d3420954fc1 100755 --- a/tools/testing/selftests/net/mptcp/mptcp_join.sh +++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh @@ -2329,17 +2329,16 @@ signal_address_tests() ip netns exec $ns1 sysctl -q net.mptcp.add_addr_timeout=1 speed=slow \ run_tests $ns1 $ns2 10.0.1.1 + chk_join_nr 3 3 3 # It is not directly linked to the commit introducing this # symbol but for the parent one which is linked anyway. - if ! mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then - chk_join_nr 3 3 2 - chk_add_nr 4 4 - else - chk_join_nr 3 3 3 + if mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then # the server will not signal the address terminating # the MPC subflow chk_add_nr 3 3 + else + chk_add_nr 4 4 fi fi } @@ -3847,21 +3846,28 @@ userspace_pm_chk_get_addr() fi } -# $1: ns ; $2: event type ; $3: count +# $1: ns ; $2: event type ; $3: count ; [ $4: attr ; $5: attr count ] chk_evt_nr() { local ns=${1} local evt_name="${2}" local exp="${3}" + local attr="${4}" + local attr_exp="${5}" local evts="${evts_ns1}" local evt="${!evt_name}" + local attr_name local count + if [ -n "${attr}" ]; then + attr_name=", ${attr}: ${attr_exp}" + fi + evt_name="${evt_name:16}" # without MPTCP_LIB_EVENT_ [ "${ns}" == "ns2" ] && evts="${evts_ns2}" - print_check "event ${ns} ${evt_name} (${exp})" + print_check "event ${ns} ${evt_name} (${exp}${attr_name})" if [[ "${evt_name}" = "LISTENER_"* ]] && ! mptcp_lib_kallsyms_has "mptcp_event_pm_listener$"; then @@ -3872,11 +3878,42 @@ chk_evt_nr() count=$(grep -cw "type:${evt}" "${evts}") if [ "${count}" != "${exp}" ]; then fail_test "got ${count} events, expected ${exp}" + cat "${evts}" + return + elif [ -z "${attr}" ]; then + print_ok + return + fi + + count=$(grep -w "type:${evt}" "${evts}" | grep -c ",${attr}:") + if [ "${count}" != "${attr_exp}" ]; then + fail_test "got ${count} event attributes, expected ${attr_exp}" + grep -w "type:${evt}" "${evts}" else print_ok fi } +# $1: ns ; $2: event type ; $3: expected count +wait_event() +{ + local ns="${1}" + local evt_name="${2}" + local exp="${3}" + + local evt="${!evt_name}" + local evts="${evts_ns1}" + local count + + [ "${ns}" == "ns2" ] && evts="${evts_ns2}" + + for _ in $(seq 100); do + count=$(grep -cw "type:${evt}" "${evts}") + [ "${count}" -ge "${exp}" ] && break + sleep 0.1 + done +} + userspace_tests() { # userspace pm type prevents add_addr @@ -4085,6 +4122,36 @@ userspace_tests() kill_events_pids mptcp_lib_kill_group_wait $tests_pid fi + + # userspace pm no duplicated spurious close events after an error + if reset_with_events "userspace pm no dup close events after error" && + continue_if mptcp_lib_has_file '/proc/sys/net/mptcp/pm_type'; then + set_userspace_pm $ns2 + pm_nl_set_limits $ns1 0 2 + { timeout_test=120 test_linkfail=128 speed=slow \ + run_tests $ns1 $ns2 10.0.1.1 & } 2>/dev/null + local tests_pid=$! + wait_event ns2 MPTCP_LIB_EVENT_ESTABLISHED 1 + userspace_pm_add_sf $ns2 10.0.3.2 20 + chk_mptcp_info subflows 1 subflows 1 + chk_subflows_total 2 2 + + # force quick loss + ip netns exec $ns2 sysctl -q net.ipv4.tcp_syn_retries=1 + if ip netns exec "${ns1}" ${iptables} -A INPUT -s "10.0.1.2" \ + -p tcp --tcp-option 30 -j REJECT --reject-with tcp-reset && + ip netns exec "${ns2}" ${iptables} -A INPUT -d "10.0.1.2" \ + -p tcp --tcp-option 30 -j REJECT --reject-with tcp-reset; then + wait_event ns2 MPTCP_LIB_EVENT_SUB_CLOSED 1 + wait_event ns1 MPTCP_LIB_EVENT_SUB_CLOSED 1 + chk_subflows_total 1 1 + userspace_pm_add_sf $ns2 10.0.1.2 0 + wait_event ns2 MPTCP_LIB_EVENT_SUB_CLOSED 2 + chk_evt_nr ns2 MPTCP_LIB_EVENT_SUB_CLOSED 2 error 2 + fi + kill_events_pids + mptcp_lib_kill_group_wait $tests_pid + fi } endpoint_tests() diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c index 0e8b5277be3bf8..a369b20d47f0fb 100644 --- a/virt/kvm/eventfd.c +++ b/virt/kvm/eventfd.c @@ -157,21 +157,28 @@ irqfd_shutdown(struct work_struct *work) } -/* assumes kvm->irqfds.lock is held */ -static bool -irqfd_is_active(struct kvm_kernel_irqfd *irqfd) +static bool irqfd_is_active(struct kvm_kernel_irqfd *irqfd) { + /* + * Assert that either irqfds.lock or SRCU is held, as irqfds.lock must + * be held to prevent false positives (on the irqfd being active), and + * while false negatives are impossible as irqfds are never added back + * to the list once they're deactivated, the caller must at least hold + * SRCU to guard against routing changes if the irqfd is deactivated. + */ + lockdep_assert_once(lockdep_is_held(&irqfd->kvm->irqfds.lock) || + srcu_read_lock_held(&irqfd->kvm->irq_srcu)); + return list_empty(&irqfd->list) ? false : true; } /* * Mark the irqfd as inactive and schedule it for removal - * - * assumes kvm->irqfds.lock is held */ -static void -irqfd_deactivate(struct kvm_kernel_irqfd *irqfd) +static void irqfd_deactivate(struct kvm_kernel_irqfd *irqfd) { + lockdep_assert_held(&irqfd->kvm->irqfds.lock); + BUG_ON(!irqfd_is_active(irqfd)); list_del_init(&irqfd->list); @@ -217,8 +224,15 @@ irqfd_wakeup(wait_queue_entry_t *wait, unsigned mode, int sync, void *key) seq = read_seqcount_begin(&irqfd->irq_entry_sc); irq = irqfd->irq_entry; } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq)); - /* An event has been signaled, inject an interrupt */ - if (kvm_arch_set_irq_inatomic(&irq, kvm, + + /* + * An event has been signaled, inject an interrupt unless the + * irqfd is being deassigned (isn't active), in which case the + * routing information may be stale (once the irqfd is removed + * from the list, it will stop receiving routing updates). + */ + if (unlikely(!irqfd_is_active(irqfd)) || + kvm_arch_set_irq_inatomic(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false) == -EWOULDBLOCK) schedule_work(&irqfd->inject); @@ -585,18 +599,8 @@ kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args) spin_lock_irq(&kvm->irqfds.lock); list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { - if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) { - /* - * This clearing of irq_entry.type is needed for when - * another thread calls kvm_irq_routing_update before - * we flush workqueue below (we synchronize with - * kvm_irq_routing_update using irqfds.lock). - */ - write_seqcount_begin(&irqfd->irq_entry_sc); - irqfd->irq_entry.type = 0; - write_seqcount_end(&irqfd->irq_entry_sc); + if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) irqfd_deactivate(irqfd); - } } spin_unlock_irq(&kvm->irqfds.lock);