summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt3
-rw-r--r--Documentation/dev-tools/kunit/run_wrapper.rst2
-rw-r--r--Documentation/devicetree/bindings/display/msm/dp-controller.yaml21
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,glymur-mdss.yaml16
-rw-r--r--Documentation/devicetree/bindings/display/msm/qcom,sm8750-mdss.yaml2
-rw-r--r--Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml2
-rw-r--r--Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml29
-rw-r--r--Documentation/scheduler/sched-ext.rst30
-rw-r--r--Documentation/virt/kvm/api.rst226
-rw-r--r--Documentation/virt/kvm/locking.rst2
-rw-r--r--MAINTAINERS13
-rw-r--r--Makefile6
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm64/boot/dts/renesas/r8a78000.dtsi16
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g057.dtsi30
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g077.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/r9a09g087.dtsi4
-rw-r--r--arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi2
-rw-r--r--arch/arm64/boot/dts/renesas/rzt2h-n2h-evk-common.dtsi1
-rw-r--r--arch/arm64/boot/dts/renesas/rzv2-evk-cn15-sd.dtso1
-rw-r--r--arch/arm64/crypto/aes-neonbs-glue.c37
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/kernel/cpufeature.c9
-rw-r--r--arch/arm64/kvm/at.c2
-rw-r--r--arch/arm64/kvm/guest.c4
-rw-r--r--arch/arm64/kvm/hyp/nvhe/mem_protect.c2
-rw-r--r--arch/arm64/kvm/mmu.c14
-rw-r--r--arch/arm64/kvm/nested.c27
-rw-r--r--arch/arm64/kvm/vgic/vgic-init.c32
-rw-r--r--arch/arm64/kvm/vgic/vgic-v2.c4
-rw-r--r--arch/arm64/kvm/vgic/vgic-v3.c12
-rw-r--r--arch/arm64/kvm/vgic/vgic.c6
-rw-r--r--arch/loongarch/Kconfig3
-rw-r--r--arch/loongarch/include/asm/cmpxchg.h5
-rw-r--r--arch/loongarch/include/asm/uaccess.h14
-rw-r--r--arch/loongarch/kernel/inst.c31
-rw-r--r--arch/loongarch/kvm/vcpu.c2
-rw-r--r--arch/loongarch/kvm/vm.c6
-rw-r--r--arch/loongarch/net/bpf_jit.c11
-rw-r--r--arch/mips/kvm/mips.c4
-rw-r--r--arch/powerpc/include/asm/uaccess.h65
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/setup-common.c10
-rw-r--r--arch/powerpc/kvm/book3s.c4
-rw-r--r--arch/powerpc/kvm/booke.c4
-rw-r--r--arch/powerpc/kvm/e500.h6
-rw-r--r--arch/powerpc/kvm/e500_mmu.c4
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c91
-rw-r--r--arch/powerpc/lib/copyuser_64.S1
-rw-r--r--arch/powerpc/lib/copyuser_power7.S45
-rw-r--r--arch/powerpc/lib/vmx-helper.c2
-rw-r--r--arch/powerpc/mm/mem.c14
-rw-r--r--arch/powerpc/perf/callchain.c5
-rw-r--r--arch/powerpc/perf/callchain_32.c1
-rw-r--r--arch/powerpc/perf/callchain_64.c1
-rw-r--r--arch/riscv/boot/dts/microchip/mpfs.dtsi2
-rw-r--r--arch/riscv/kvm/aia.c15
-rw-r--r--arch/riscv/kvm/aia_aplic.c23
-rw-r--r--arch/riscv/kvm/aia_device.c18
-rw-r--r--arch/riscv/kvm/aia_imsic.c4
-rw-r--r--arch/riscv/kvm/mmu.c6
-rw-r--r--arch/riscv/kvm/vcpu.c2
-rw-r--r--arch/riscv/kvm/vcpu_fp.c17
-rw-r--r--arch/riscv/kvm/vcpu_onereg.c54
-rw-r--r--arch/riscv/kvm/vcpu_pmu.c16
-rw-r--r--arch/riscv/kvm/vm.c2
-rw-r--r--arch/s390/kernel/irq.c14
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--arch/x86/kernel/apic/apic.c6
-rw-r--r--arch/x86/kvm/cpuid.c5
-rw-r--r--arch/x86/kvm/hyperv.c9
-rw-r--r--arch/x86/kvm/ioapic.c3
-rw-r--r--arch/x86/kvm/svm/avic.c9
-rw-r--r--arch/x86/kvm/svm/nested.c12
-rw-r--r--arch/x86/kvm/svm/svm.c17
-rw-r--r--arch/x86/kvm/svm/svm.h1
-rw-r--r--arch/x86/kvm/vmx/nested.c61
-rw-r--r--arch/x86/kvm/vmx/nested.h1
-rw-r--r--arch/x86/kvm/vmx/vmx.c10
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--drivers/accel/amdxdna/aie2_ctx.c14
-rw-r--r--drivers/accel/amdxdna/amdxdna_ctx.c10
-rw-r--r--drivers/accel/ivpu/ivpu_hw_40xx_reg.h6
-rw-r--r--drivers/accel/ivpu/ivpu_hw_ip.c1
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/acpi_platform.c2
-rw-r--r--drivers/acpi/acpi_processor.c15
-rw-r--r--drivers/acpi/acpi_video.c45
-rw-r--r--drivers/acpi/acpica/acpredef.h2
-rw-r--r--drivers/acpi/bus.c3
-rw-r--r--drivers/acpi/osl.c2
-rw-r--r--drivers/acpi/scan.c45
-rw-r--r--drivers/android/binder/page_range.rs84
-rw-r--r--drivers/android/binder/process.rs3
-rw-r--r--drivers/android/binder/range_alloc/array.rs35
-rw-r--r--drivers/android/binder/range_alloc/mod.rs4
-rw-r--r--drivers/android/binder/range_alloc/tree.rs18
-rw-r--r--drivers/android/binder/thread.rs17
-rw-r--r--drivers/base/power/runtime.c1
-rw-r--r--drivers/block/ublk_drv.c16
-rw-r--r--drivers/cache/ax45mp_cache.c4
-rw-r--r--drivers/cache/starfive_starlink_cache.c4
-rw-r--r--drivers/cpuidle/cpuidle.c10
-rw-r--r--drivers/crypto/ccp/sev-dev.c4
-rw-r--r--drivers/crypto/padlock-sha.c7
-rw-r--r--drivers/firmware/arm_ffa/driver.c8
-rw-r--r--drivers/firmware/arm_scmi/notify.c4
-rw-r--r--drivers/firmware/arm_scmi/protocols.h4
-rw-r--r--drivers/firmware/arm_scpi.c5
-rw-r--r--drivers/firmware/cirrus/cs_dsp.c24
-rw-r--r--drivers/firmware/stratix10-rsu.c2
-rw-r--r--drivers/firmware/stratix10-svc.c228
-rw-r--r--drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mes_v12_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h5
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c3
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi83.c13
-rw-r--r--drivers/gpu/drm/gud/gud_drv.c9
-rw-r--r--drivers/gpu/drm/gud/gud_internal.h4
-rw-r--r--drivers/gpu/drm/gud/gud_pipe.c54
-rw-r--r--drivers/gpu/drm/i915/display/intel_alpm.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c1
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c7
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c60
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c23
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.h3
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc_regs.h12
-rw-r--r--drivers/gpu/drm/i915/display/intel_vrr.c14
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c12
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpummu.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_catalog.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a8xx_gpu.c14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h12
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp_v13.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c52
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c43
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c22
-rw-r--r--drivers/gpu/drm/sitronix/st7586.c15
-rw-r--r--drivers/gpu/nova-core/gsp.rs46
-rw-r--r--drivers/gpu/nova-core/gsp/boot.rs2
-rw-r--r--drivers/gpu/nova-core/gsp/cmdq.rs93
-rw-r--r--drivers/gpu/nova-core/gsp/fw.rs101
-rw-r--r--drivers/hid/bpf/hid_bpf_dispatch.c2
-rw-r--r--drivers/hid/hid-appletb-kbd.c5
-rw-r--r--drivers/hid/hid-asus.c3
-rw-r--r--drivers/hid/hid-core.c7
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c18
-rw-r--r--drivers/hid/hid-logitech-hidpp.c6
-rw-r--r--drivers/hid/hid-multitouch.c7
-rw-r--r--drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c1
-rw-r--r--drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c1
-rw-r--r--drivers/hid/wacom_wac.c10
-rw-r--r--drivers/hwmon/Kconfig6
-rw-r--r--drivers/i3c/Kconfig12
-rw-r--r--drivers/i3c/master/dw-i3c-master.c6
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd.h1
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v1.c8
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/cmd_v2.c8
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/core.c143
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/dma.c158
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/hci.h5
-rw-r--r--drivers/i3c/master/mipi-i3c-hci/pio.c16
-rw-r--r--drivers/iio/adc/ad7768-1.c13
-rw-r--r--drivers/iio/chemical/bme680_core.c2
-rw-r--r--drivers/iio/chemical/sps30_i2c.c2
-rw-r--r--drivers/iio/chemical/sps30_serial.c2
-rw-r--r--drivers/iio/dac/ds4424.c2
-rw-r--r--drivers/iio/frequency/adf4377.c2
-rw-r--r--drivers/iio/gyro/mpu3050-core.c18
-rw-r--r--drivers/iio/gyro/mpu3050-i2c.c3
-rw-r--r--drivers/iio/imu/adis.c2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c2
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c4
-rw-r--r--drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c2
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600.h2
-rw-r--r--drivers/iio/imu/inv_icm45600/inv_icm45600_core.c11
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c8
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c5
-rw-r--r--drivers/iio/industrialio-buffer.c6
-rw-r--r--drivers/iio/light/bh1780.c2
-rw-r--r--drivers/iio/magnetometer/Kconfig3
-rw-r--r--drivers/iio/magnetometer/tlv493d.c2
-rw-r--r--drivers/iio/potentiometer/mcp4131.c2
-rw-r--r--drivers/iio/proximity/hx9023s.c6
-rw-r--r--drivers/irqchip/irq-riscv-aplic-main.c21
-rw-r--r--drivers/misc/amd-sbi/Kconfig3
-rw-r--r--drivers/nvdimm/bus.c5
-rw-r--r--drivers/nvme/host/core.c3
-rw-r--r--drivers/nvme/host/pci.c8
-rw-r--r--drivers/nvme/target/admin-cmd.c2
-rw-r--r--drivers/nvme/target/core.c14
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c1
-rw-r--r--drivers/power/sequencing/pwrseq-pcie-m2.c2
-rw-r--r--drivers/regulator/pca9450-regulator.c14
-rw-r--r--drivers/reset/reset-rzg2l-usbphy-ctrl.c3
-rw-r--r--drivers/s390/block/dasd_eckd.c16
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c12
-rw-r--r--drivers/s390/crypto/zcrypt_cex4.c3
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_main.c2
-rw-r--r--drivers/scsi/hisi_sas/hisi_sas_v3_hw.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c2
-rw-r--r--drivers/scsi/scsi_scan.c8
-rw-r--r--drivers/soc/fsl/qbman/qman.c24
-rw-r--r--drivers/soc/fsl/qe/qmc.c4
-rw-r--r--drivers/soc/microchip/mpfs-sys-controller.c13
-rw-r--r--drivers/soc/rockchip/grf.c1
-rw-r--r--drivers/spi/spi-amlogic-spifc-a4.c5
-rw-r--r--drivers/spi/spi-atcspi200.c38
-rw-r--r--drivers/spi/spi-cadence-quadspi.c6
-rw-r--r--drivers/spi/spi-intel-pci.c1
-rw-r--r--drivers/spi/spi-rockchip-sfc.c2
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ieee80211.c15
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_mlme.c5
-rw-r--r--drivers/staging/sm750fb/sm750.c1
-rw-r--r--drivers/staging/sm750fb/sm750_hw.c22
-rw-r--r--drivers/tee/tee_shm.c27
-rw-r--r--drivers/ufs/core/ufshcd.c2
-rw-r--r--drivers/usb/class/cdc-acm.c5
-rw-r--r--drivers/usb/class/cdc-acm.h1
-rw-r--r--drivers/usb/class/cdc-wdm.c4
-rw-r--r--drivers/usb/class/usbtmc.c6
-rw-r--r--drivers/usb/core/config.c6
-rw-r--r--drivers/usb/core/message.c100
-rw-r--r--drivers/usb/core/phy.c8
-rw-r--r--drivers/usb/core/quirks.c21
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/gadget/function/f_hid.c4
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c12
-rw-r--r--drivers/usb/gadget/function/f_ncm.c146
-rw-r--r--drivers/usb/gadget/function/f_tcm.c14
-rw-r--r--drivers/usb/gadget/function/u_ether.c67
-rw-r--r--drivers/usb/gadget/function/u_ether.h56
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h177
-rw-r--r--drivers/usb/gadget/function/u_ncm.h4
-rw-r--r--drivers/usb/gadget/function/uvc_video.c2
-rw-r--r--drivers/usb/host/xhci-debugfs.c10
-rw-r--r--drivers/usb/host/xhci-ring.c1
-rw-r--r--drivers/usb/host/xhci.c4
-rw-r--r--drivers/usb/image/mdc800.c6
-rw-r--r--drivers/usb/misc/uss720.c2
-rw-r--r--drivers/usb/misc/yurex.c2
-rw-r--r--drivers/usb/renesas_usbhs/common.c9
-rw-r--r--drivers/usb/roles/class.c7
-rw-r--r--drivers/usb/typec/altmodes/displayport.c7
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c2
-rw-r--r--fs/btrfs/disk-io.c7
-rw-r--r--fs/btrfs/extent_io.c1
-rw-r--r--fs/btrfs/inode.c19
-rw-r--r--fs/btrfs/ioctl.c32
-rw-r--r--fs/btrfs/messages.h3
-rw-r--r--fs/btrfs/print-tree.c10
-rw-r--r--fs/btrfs/relocation.c2
-rw-r--r--fs/btrfs/space-info.c5
-rw-r--r--fs/btrfs/transaction.c16
-rw-r--r--fs/btrfs/tree-checker.c2
-rw-r--r--fs/btrfs/tree-log.c6
-rw-r--r--fs/btrfs/uuid-tree.c38
-rw-r--r--fs/btrfs/uuid-tree.h2
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/btrfs/zoned.c6
-rw-r--r--fs/ceph/addr.c1
-rw-r--r--fs/ceph/debugfs.c4
-rw-r--r--fs/ceph/dir.c17
-rw-r--r--fs/ceph/file.c4
-rw-r--r--fs/ceph/inode.c2
-rw-r--r--fs/ceph/mds_client.c3
-rw-r--r--fs/nfs/Kconfig3
-rw-r--r--fs/nfs/nfs3proc.c7
-rw-r--r--fs/nfsd/export.c63
-rw-r--r--fs/nfsd/export.h7
-rw-r--r--fs/nfsd/nfs4xdr.c9
-rw-r--r--fs/nfsd/nfsctl.c22
-rw-r--r--fs/nfsd/state.h17
-rw-r--r--fs/smb/client/cifsacl.c2
-rw-r--r--fs/smb/client/cifsfs.c2
-rw-r--r--fs/smb/client/cifsglob.h23
-rw-r--r--fs/smb/client/cifsproto.h26
-rw-r--r--fs/smb/client/dir.c4
-rw-r--r--fs/smb/client/file.c131
-rw-r--r--fs/smb/client/fs_context.c2
-rw-r--r--fs/smb/client/inode.c6
-rw-r--r--fs/smb/client/smb1ops.c2
-rw-r--r--fs/smb/client/smb2inode.c22
-rw-r--r--fs/smb/client/smb2maperror.c9
-rw-r--r--fs/smb/client/smb2ops.c18
-rw-r--r--fs/smb/client/smb2pdu.c5
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c8
-rw-r--r--fs/xfs/libxfs/xfs_defer.c2
-rw-r--r--fs/xfs/xfs_bmap_item.c2
-rw-r--r--fs/xfs/xfs_dquot.c8
-rw-r--r--fs/xfs/xfs_healthmon.c17
-rw-r--r--fs/xfs/xfs_icache.c1
-rw-r--r--fs/xfs/xfs_log.c2
-rw-r--r--fs/xfs/xfs_zone_gc.c2
-rw-r--r--include/linux/build_bug.h4
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h8
-rw-r--r--include/linux/hid.h1
-rw-r--r--include/linux/io_uring_types.h1
-rw-r--r--include/linux/kvm_host.h83
-rw-r--r--include/linux/nvme-auth.h2
-rw-r--r--include/linux/rseq_types.h6
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/usb.h8
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/uapi/linux/kvm.h8
-rw-r--r--io_uring/bpf_filter.c2
-rw-r--r--io_uring/eventfd.c10
-rw-r--r--io_uring/io_uring.c4
-rw-r--r--io_uring/kbuf.c13
-rw-r--r--io_uring/register.c15
-rw-r--r--io_uring/tw.c22
-rw-r--r--kernel/cgroup/cgroup.c6
-rw-r--r--kernel/cgroup/cpuset.c59
-rw-r--r--kernel/crash_dump_dm_crypt.c4
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/kprobes.c8
-rw-r--r--kernel/sched/core.c79
-rw-r--r--kernel/sched/ext.c22
-rw-r--r--kernel/sched/ext_internal.h114
-rw-r--r--kernel/sched/idle.c39
-rw-r--r--kernel/time/time.c2
-rw-r--r--kernel/workqueue.c55
-rw-r--r--kernel/workqueue_internal.h1
-rw-r--r--lib/bootconfig.c6
-rw-r--r--lib/crypto/Makefile3
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/rmap.c21
-rw-r--r--mm/slub.c11
-rw-r--r--net/ceph/auth.c6
-rw-r--r--net/ceph/messenger_v2.c31
-rw-r--r--net/ceph/mon_client.c6
-rw-r--r--net/sunrpc/cache.c26
-rw-r--r--net/sunrpc/xprtrdma/verbs.c7
-rw-r--r--rust/Makefile11
-rw-r--r--rust/kernel/cpufreq.rs1
-rw-r--r--rust/kernel/dma.rs114
-rw-r--r--rust/kernel/lib.rs4
-rw-r--r--rust/kernel/ptr.rs30
-rw-r--r--rust/kernel/ptr/projection.rs305
-rw-r--r--rust/kernel/str.rs4
-rw-r--r--rust/pin-init/internal/src/init.rs69
-rw-r--r--rust/pin-init/src/__internal.rs28
-rw-r--r--samples/rust/rust_dma.rs30
-rw-r--r--samples/workqueue/stall_detector/Makefile1
-rw-r--r--samples/workqueue/stall_detector/wq_stall.c98
-rw-r--r--scripts/Makefile.build4
-rw-r--r--sound/core/pcm_native.c19
-rw-r--r--sound/hda/codecs/realtek/alc269.c3
-rw-r--r--sound/hda/codecs/realtek/alc662.c9
-rw-r--r--sound/soc/amd/acp/acp-mach-common.c18
-rw-r--r--sound/soc/amd/acp3x-rt5682-max9836.c9
-rw-r--r--sound/soc/codecs/rt1011.c2
-rw-r--r--sound/soc/generic/simple-card-utils.c12
-rw-r--r--sound/soc/qcom/qdsp6/q6apm-dai.c1
-rw-r--r--sound/soc/qcom/qdsp6/q6apm-lpass-dais.c1
-rw-r--r--sound/soc/qcom/qdsp6/q6apm.c1
-rw-r--r--sound/soc/soc-core.c11
-rw-r--r--sound/soc/tegra/tegra_audio_graph_card.c11
-rw-r--r--sound/usb/mixer_scarlett2.c2
-rw-r--r--sound/usb/quirks.c2
-rw-r--r--tools/arch/x86/include/asm/amd/ibs.h2
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h4
-rw-r--r--tools/arch/x86/include/asm/msr-index.h6
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h8
-rw-r--r--tools/bootconfig/samples/bad-non-closed-brace.bconf4
-rw-r--r--tools/bootconfig/samples/bad-over-max-brace.bconf19
-rw-r--r--tools/bootconfig/samples/exp-good-nested-brace.bconf1
-rw-r--r--tools/bootconfig/samples/good-nested-brace.bconf18
-rwxr-xr-xtools/bootconfig/test-bootconfig.sh9
-rw-r--r--tools/build/Build.include9
-rw-r--r--tools/build/Makefile.build6
-rw-r--r--tools/include/linux/coresight-pmu.h24
-rw-r--r--tools/include/linux/gfp_types.h9
-rw-r--r--tools/include/uapi/asm-generic/unistd.h5
-rw-r--r--tools/include/uapi/linux/kvm.h24
-rw-r--r--tools/include/uapi/linux/perf_event.h2
-rw-r--r--tools/objtool/Makefile2
-rw-r--r--tools/objtool/arch/x86/decode.c62
-rw-r--r--tools/objtool/check.c24
-rw-r--r--tools/objtool/elf.c2
-rw-r--r--tools/objtool/include/objtool/warn.h2
-rw-r--r--tools/objtool/klp-diff.c39
-rw-r--r--tools/perf/Makefile.config18
-rw-r--r--tools/perf/Makefile.perf2
-rw-r--r--tools/perf/arch/arm/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c14
-rw-r--r--tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl1
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/s390/entry/syscalls/syscall.tbl859
-rw-r--r--tools/perf/arch/sh/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/arch/sparc/entry/syscalls/syscall.tbl3
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_32.tbl1
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl1
-rw-r--r--tools/perf/arch/xtensa/entry/syscalls/syscall.tbl1
-rw-r--r--tools/perf/builtin-ftrace.c9
-rw-r--r--tools/perf/pmu-events/Build3
-rw-r--r--tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h1
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/fs.h1
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/mount.h13
-rw-r--r--tools/perf/trace/beauty/include/uapi/linux/prctl.h37
-rw-r--r--tools/perf/util/annotate-arch/annotate-loongarch.c2
-rw-r--r--tools/perf/util/annotate.c5
-rw-r--r--tools/perf/util/cs-etm-decoder/cs-etm-decoder.c2
-rw-r--r--tools/perf/util/cs-etm.c36
-rw-r--r--tools/perf/util/cs-etm.h15
-rw-r--r--tools/perf/util/disasm.c2
-rw-r--r--tools/perf/util/synthetic-events.c5
-rw-r--r--tools/power/cpupower/cpupower-service.conf5
-rw-r--r--tools/power/cpupower/cpupower.sh6
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c6
-rw-r--r--tools/power/cpupower/utils/helpers/helpers.h5
-rw-r--r--tools/power/cpupower/utils/helpers/misc.c41
-rw-r--r--tools/power/cpupower/utils/powercap-info.c4
-rw-r--r--tools/scripts/syscall.tbl1
-rw-r--r--tools/testing/selftests/hid/progs/hid_bpf_helpers.h12
-rw-r--r--tools/testing/selftests/kvm/Makefile.kvm1
-rw-r--r--tools/testing/selftests/kvm/guest_memfd_test.c2
-rw-r--r--tools/testing/selftests/kvm/include/x86/processor.h23
-rw-r--r--tools/testing/selftests/kvm/include/x86/smm.h17
-rw-r--r--tools/testing/selftests/kvm/lib/x86/processor.c26
-rw-r--r--tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c150
-rw-r--r--tools/testing/selftests/kvm/x86/sev_smoke_test.c30
-rw-r--r--tools/testing/selftests/kvm/x86/smm_test.c27
-rw-r--r--tools/testing/selftests/powerpc/copyloops/.gitignore4
-rw-r--r--tools/testing/selftests/powerpc/copyloops/Makefile11
-rw-r--r--tools/testing/selftests/powerpc/copyloops/stubs.S8
-rw-r--r--tools/testing/selftests/powerpc/copyloops/validate.c15
-rw-r--r--tools/testing/selftests/sched_ext/util.c4
-rw-r--r--virt/kvm/binary_stats.c2
-rw-r--r--virt/kvm/kvm_main.c20
453 files changed, 5045 insertions, 2923 deletions
diff --git a/.mailmap b/.mailmap
index 63c11ea7e35d..40b4db2b2d60 100644
--- a/.mailmap
+++ b/.mailmap
@@ -327,6 +327,7 @@ Henrik Rydberg <rydberg@bitmath.org>
Herbert Xu <herbert@gondor.apana.org.au>
Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
+Ignat Korchagin <ignat@linux.win> <ignat@cloudflare.com>
Ike Panhc <ikepanhc@gmail.com> <ike.pan@canonical.com>
J. Bruce Fields <bfields@fieldses.org> <bfields@redhat.com>
J. Bruce Fields <bfields@fieldses.org> <bfields@citi.umich.edu>
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 55ffc0f8858a..03a550630644 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -8196,6 +8196,9 @@ Kernel parameters
p = USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT
(Reduce timeout of the SET_ADDRESS
request from 5000 ms to 500 ms);
+ q = USB_QUIRK_FORCE_ONE_CONFIG (Device
+ claims zero configurations,
+ forcing to 1);
Example: quirks=0781:5580:bk,0a5c:5834:gij
usbhid.mousepoll=
diff --git a/Documentation/dev-tools/kunit/run_wrapper.rst b/Documentation/dev-tools/kunit/run_wrapper.rst
index 3c0b585dcfff..770bb09a475a 100644
--- a/Documentation/dev-tools/kunit/run_wrapper.rst
+++ b/Documentation/dev-tools/kunit/run_wrapper.rst
@@ -336,6 +336,8 @@ command line arguments:
- ``--list_tests_attr``: If set, lists all tests that will be run and all of their
attributes.
+- ``--list_suites``: If set, lists all suites that will be run.
+
Command-line completion
==============================
diff --git a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
index ebda78db87a6..02ddfaab5f56 100644
--- a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
@@ -253,7 +253,6 @@ allOf:
enum:
# these platforms support 2 streams MST on some interfaces,
# others are SST only
- - qcom,glymur-dp
- qcom,sc8280xp-dp
- qcom,x1e80100-dp
then:
@@ -310,6 +309,26 @@ allOf:
minItems: 6
maxItems: 8
+ - if:
+ properties:
+ compatible:
+ contains:
+ enum:
+ # these platforms support 2 streams MST on some interfaces,
+ # others are SST only, but all controllers have 4 ports
+ - qcom,glymur-dp
+ then:
+ properties:
+ reg:
+ minItems: 9
+ maxItems: 9
+ clocks:
+ minItems: 5
+ maxItems: 6
+ clocks-names:
+ minItems: 5
+ maxItems: 6
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,glymur-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,glymur-mdss.yaml
index 2329ed96e6cb..64dde43373ac 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,glymur-mdss.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,glymur-mdss.yaml
@@ -176,13 +176,17 @@ examples:
};
};
- displayport-controller@ae90000 {
+ displayport-controller@af54000 {
compatible = "qcom,glymur-dp";
- reg = <0xae90000 0x200>,
- <0xae90200 0x200>,
- <0xae90400 0x600>,
- <0xae91000 0x400>,
- <0xae91400 0x400>;
+ reg = <0xaf54000 0x200>,
+ <0xaf54200 0x200>,
+ <0xaf55000 0xc00>,
+ <0xaf56000 0x400>,
+ <0xaf57000 0x400>,
+ <0xaf58000 0x400>,
+ <0xaf59000 0x400>,
+ <0xaf5a000 0x600>,
+ <0xaf5b000 0x600>;
interrupt-parent = <&mdss>;
interrupts = <12>;
diff --git a/Documentation/devicetree/bindings/display/msm/qcom,sm8750-mdss.yaml b/Documentation/devicetree/bindings/display/msm/qcom,sm8750-mdss.yaml
index d55fda9a523e..a38c2261ef1a 100644
--- a/Documentation/devicetree/bindings/display/msm/qcom,sm8750-mdss.yaml
+++ b/Documentation/devicetree/bindings/display/msm/qcom,sm8750-mdss.yaml
@@ -10,7 +10,7 @@ maintainers:
- Krzysztof Kozlowski <krzk@kernel.org>
description:
- SM8650 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like
+ SM8750 MSM Mobile Display Subsystem(MDSS), which encapsulates sub-blocks like
DPU display controller, DSI and DP interfaces etc.
$ref: /schemas/display/msm/mdss-common.yaml#
diff --git a/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml
index 914200188809..082fdc2e69ea 100644
--- a/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml
+++ b/Documentation/devicetree/bindings/i2c/snps,designware-i2c.yaml
@@ -7,7 +7,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Synopsys DesignWare APB I2C Controller
maintainers:
- - Jarkko Nikula <jarkko.nikula@linux.intel.com>
+ - Mika Westerberg <mika.westerberg@linux.intel.com>
allOf:
- $ref: /schemas/i2c/i2c-controller.yaml#
diff --git a/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml b/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
index a6067030c5ed..6af4ff233158 100644
--- a/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
+++ b/Documentation/devicetree/bindings/spi/allwinner,sun6i-a31-spi.yaml
@@ -6,9 +6,6 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
title: Allwinner A31 SPI Controller
-allOf:
- - $ref: spi-controller.yaml
-
maintainers:
- Chen-Yu Tsai <wens@csie.org>
- Maxime Ripard <mripard@kernel.org>
@@ -82,11 +79,11 @@ patternProperties:
spi-rx-bus-width:
items:
- - const: 1
+ enum: [0, 1, 2, 4]
spi-tx-bus-width:
items:
- - const: 1
+ enum: [0, 1, 2, 4]
required:
- compatible
@@ -95,6 +92,28 @@ required:
- clocks
- clock-names
+allOf:
+ - $ref: spi-controller.yaml
+ - if:
+ not:
+ properties:
+ compatible:
+ contains:
+ enum:
+ - allwinner,sun50i-r329-spi
+ - allwinner,sun55i-a523-spi
+ then:
+ patternProperties:
+ "^.*@[0-9a-f]+":
+ properties:
+ spi-rx-bus-width:
+ items:
+ enum: [0, 1]
+
+ spi-tx-bus-width:
+ items:
+ enum: [0, 1]
+
unevaluatedProperties: false
examples:
diff --git a/Documentation/scheduler/sched-ext.rst b/Documentation/scheduler/sched-ext.rst
index 9e2882d937b4..d74c2c2b9ef3 100644
--- a/Documentation/scheduler/sched-ext.rst
+++ b/Documentation/scheduler/sched-ext.rst
@@ -43,7 +43,6 @@ options should be enabled to use sched_ext:
CONFIG_DEBUG_INFO_BTF=y
CONFIG_BPF_JIT_ALWAYS_ON=y
CONFIG_BPF_JIT_DEFAULT_ON=y
- CONFIG_PAHOLE_HAS_BTF_TAG=y
sched_ext is used only when the BPF scheduler is loaded and running.
@@ -58,7 +57,8 @@ in ``ops->flags``, all ``SCHED_NORMAL``, ``SCHED_BATCH``, ``SCHED_IDLE``, and
However, when the BPF scheduler is loaded and ``SCX_OPS_SWITCH_PARTIAL`` is
set in ``ops->flags``, only tasks with the ``SCHED_EXT`` policy are scheduled
by sched_ext, while tasks with ``SCHED_NORMAL``, ``SCHED_BATCH`` and
-``SCHED_IDLE`` policies are scheduled by the fair-class scheduler.
+``SCHED_IDLE`` policies are scheduled by the fair-class scheduler which has
+higher sched_class precedence than ``SCHED_EXT``.
Terminating the sched_ext scheduler program, triggering `SysRq-S`, or
detection of any internal error including stalled runnable tasks aborts the
@@ -345,6 +345,8 @@ Where to Look
The functions prefixed with ``scx_bpf_`` can be called from the BPF
scheduler.
+* ``kernel/sched/ext_idle.c`` contains the built-in idle CPU selection policy.
+
* ``tools/sched_ext/`` hosts example BPF scheduler implementations.
* ``scx_simple[.bpf].c``: Minimal global FIFO scheduler example using a
@@ -353,13 +355,35 @@ Where to Look
* ``scx_qmap[.bpf].c``: A multi-level FIFO scheduler supporting five
levels of priority implemented with ``BPF_MAP_TYPE_QUEUE``.
+ * ``scx_central[.bpf].c``: A central FIFO scheduler where all scheduling
+ decisions are made on one CPU, demonstrating ``LOCAL_ON`` dispatching,
+ tickless operation, and kthread preemption.
+
+ * ``scx_cpu0[.bpf].c``: A scheduler that queues all tasks to a shared DSQ
+ and only dispatches them on CPU0 in FIFO order. Useful for testing bypass
+ behavior.
+
+ * ``scx_flatcg[.bpf].c``: A flattened cgroup hierarchy scheduler
+ implementing hierarchical weight-based cgroup CPU control by compounding
+ each cgroup's share at every level into a single flat scheduling layer.
+
+ * ``scx_pair[.bpf].c``: A core-scheduling example that always makes
+ sibling CPU pairs execute tasks from the same CPU cgroup.
+
+ * ``scx_sdt[.bpf].c``: A variation of ``scx_simple`` demonstrating BPF
+ arena memory management for per-task data.
+
+ * ``scx_userland[.bpf].c``: A minimal scheduler demonstrating user space
+ scheduling. Tasks with CPU affinity are direct-dispatched in FIFO order;
+ all others are scheduled in user space by a simple vruntime scheduler.
+
ABI Instability
===============
The APIs provided by sched_ext to BPF schedulers programs have no stability
guarantees. This includes the ops table callbacks and constants defined in
``include/linux/sched/ext.h``, as well as the ``scx_bpf_`` kfuncs defined in
-``kernel/sched/ext.c``.
+``kernel/sched/ext.c`` and ``kernel/sched/ext_idle.c``.
While we will attempt to provide a relatively stable API surface when
possible, they are subject to change without warning between kernel
diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst
index 6f85e1b321dd..032516783e96 100644
--- a/Documentation/virt/kvm/api.rst
+++ b/Documentation/virt/kvm/api.rst
@@ -8435,115 +8435,123 @@ KVM_CHECK_EXTENSION.
The valid bits in cap.args[0] are:
-=================================== ============================================
- KVM_X86_QUIRK_LINT0_REENABLED By default, the reset value for the LVT
- LINT0 register is 0x700 (APIC_MODE_EXTINT).
- When this quirk is disabled, the reset value
- is 0x10000 (APIC_LVT_MASKED).
-
- KVM_X86_QUIRK_CD_NW_CLEARED By default, KVM clears CR0.CD and CR0.NW on
- AMD CPUs to workaround buggy guest firmware
- that runs in perpetuity with CR0.CD, i.e.
- with caches in "no fill" mode.
-
- When this quirk is disabled, KVM does not
- change the value of CR0.CD and CR0.NW.
-
- KVM_X86_QUIRK_LAPIC_MMIO_HOLE By default, the MMIO LAPIC interface is
- available even when configured for x2APIC
- mode. When this quirk is disabled, KVM
- disables the MMIO LAPIC interface if the
- LAPIC is in x2APIC mode.
-
- KVM_X86_QUIRK_OUT_7E_INC_RIP By default, KVM pre-increments %rip before
- exiting to userspace for an OUT instruction
- to port 0x7e. When this quirk is disabled,
- KVM does not pre-increment %rip before
- exiting to userspace.
-
- KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT When this quirk is disabled, KVM sets
- CPUID.01H:ECX[bit 3] (MONITOR/MWAIT) if
- IA32_MISC_ENABLE[bit 18] (MWAIT) is set.
- Additionally, when this quirk is disabled,
- KVM clears CPUID.01H:ECX[bit 3] if
- IA32_MISC_ENABLE[bit 18] is cleared.
-
- KVM_X86_QUIRK_FIX_HYPERCALL_INSN By default, KVM rewrites guest
- VMMCALL/VMCALL instructions to match the
- vendor's hypercall instruction for the
- system. When this quirk is disabled, KVM
- will no longer rewrite invalid guest
- hypercall instructions. Executing the
- incorrect hypercall instruction will
- generate a #UD within the guest.
-
-KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS By default, KVM emulates MONITOR/MWAIT (if
- they are intercepted) as NOPs regardless of
- whether or not MONITOR/MWAIT are supported
- according to guest CPUID. When this quirk
- is disabled and KVM_X86_DISABLE_EXITS_MWAIT
- is not set (MONITOR/MWAIT are intercepted),
- KVM will inject a #UD on MONITOR/MWAIT if
- they're unsupported per guest CPUID. Note,
- KVM will modify MONITOR/MWAIT support in
- guest CPUID on writes to MISC_ENABLE if
- KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT is
- disabled.
-
-KVM_X86_QUIRK_SLOT_ZAP_ALL By default, for KVM_X86_DEFAULT_VM VMs, KVM
- invalidates all SPTEs in all memslots and
- address spaces when a memslot is deleted or
- moved. When this quirk is disabled (or the
- VM type isn't KVM_X86_DEFAULT_VM), KVM only
- ensures the backing memory of the deleted
- or moved memslot isn't reachable, i.e KVM
- _may_ invalidate only SPTEs related to the
- memslot.
-
-KVM_X86_QUIRK_STUFF_FEATURE_MSRS By default, at vCPU creation, KVM sets the
- vCPU's MSR_IA32_PERF_CAPABILITIES (0x345),
- MSR_IA32_ARCH_CAPABILITIES (0x10a),
- MSR_PLATFORM_INFO (0xce), and all VMX MSRs
- (0x480..0x492) to the maximal capabilities
- supported by KVM. KVM also sets
- MSR_IA32_UCODE_REV (0x8b) to an arbitrary
- value (which is different for Intel vs.
- AMD). Lastly, when guest CPUID is set (by
- userspace), KVM modifies select VMX MSR
- fields to force consistency between guest
- CPUID and L2's effective ISA. When this
- quirk is disabled, KVM zeroes the vCPU's MSR
- values (with two exceptions, see below),
- i.e. treats the feature MSRs like CPUID
- leaves and gives userspace full control of
- the vCPU model definition. This quirk does
- not affect VMX MSRs CR0/CR4_FIXED1 (0x487
- and 0x489), as KVM does now allow them to
- be set by userspace (KVM sets them based on
- guest CPUID, for safety purposes).
-
-KVM_X86_QUIRK_IGNORE_GUEST_PAT By default, on Intel platforms, KVM ignores
- guest PAT and forces the effective memory
- type to WB in EPT. The quirk is not available
- on Intel platforms which are incapable of
- safely honoring guest PAT (i.e., without CPU
- self-snoop, KVM always ignores guest PAT and
- forces effective memory type to WB). It is
- also ignored on AMD platforms or, on Intel,
- when a VM has non-coherent DMA devices
- assigned; KVM always honors guest PAT in
- such case. The quirk is needed to avoid
- slowdowns on certain Intel Xeon platforms
- (e.g. ICX, SPR) where self-snoop feature is
- supported but UC is slow enough to cause
- issues with some older guests that use
- UC instead of WC to map the video RAM.
- Userspace can disable the quirk to honor
- guest PAT if it knows that there is no such
- guest software, for example if it does not
- expose a bochs graphics device (which is
- known to have had a buggy driver).
-=================================== ============================================
+======================================== ================================================
+KVM_X86_QUIRK_LINT0_REENABLED By default, the reset value for the LVT
+ LINT0 register is 0x700 (APIC_MODE_EXTINT).
+ When this quirk is disabled, the reset value
+ is 0x10000 (APIC_LVT_MASKED).
+
+KVM_X86_QUIRK_CD_NW_CLEARED By default, KVM clears CR0.CD and CR0.NW on
+ AMD CPUs to workaround buggy guest firmware
+ that runs in perpetuity with CR0.CD, i.e.
+ with caches in "no fill" mode.
+
+ When this quirk is disabled, KVM does not
+ change the value of CR0.CD and CR0.NW.
+
+KVM_X86_QUIRK_LAPIC_MMIO_HOLE By default, the MMIO LAPIC interface is
+ available even when configured for x2APIC
+ mode. When this quirk is disabled, KVM
+ disables the MMIO LAPIC interface if the
+ LAPIC is in x2APIC mode.
+
+KVM_X86_QUIRK_OUT_7E_INC_RIP By default, KVM pre-increments %rip before
+ exiting to userspace for an OUT instruction
+ to port 0x7e. When this quirk is disabled,
+ KVM does not pre-increment %rip before
+ exiting to userspace.
+
+KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT When this quirk is disabled, KVM sets
+ CPUID.01H:ECX[bit 3] (MONITOR/MWAIT) if
+ IA32_MISC_ENABLE[bit 18] (MWAIT) is set.
+ Additionally, when this quirk is disabled,
+ KVM clears CPUID.01H:ECX[bit 3] if
+ IA32_MISC_ENABLE[bit 18] is cleared.
+
+KVM_X86_QUIRK_FIX_HYPERCALL_INSN By default, KVM rewrites guest
+ VMMCALL/VMCALL instructions to match the
+ vendor's hypercall instruction for the
+ system. When this quirk is disabled, KVM
+ will no longer rewrite invalid guest
+ hypercall instructions. Executing the
+ incorrect hypercall instruction will
+ generate a #UD within the guest.
+
+KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS By default, KVM emulates MONITOR/MWAIT (if
+ they are intercepted) as NOPs regardless of
+ whether or not MONITOR/MWAIT are supported
+ according to guest CPUID. When this quirk
+ is disabled and KVM_X86_DISABLE_EXITS_MWAIT
+ is not set (MONITOR/MWAIT are intercepted),
+ KVM will inject a #UD on MONITOR/MWAIT if
+ they're unsupported per guest CPUID. Note,
+ KVM will modify MONITOR/MWAIT support in
+ guest CPUID on writes to MISC_ENABLE if
+ KVM_X86_QUIRK_MISC_ENABLE_NO_MWAIT is
+ disabled.
+
+KVM_X86_QUIRK_SLOT_ZAP_ALL By default, for KVM_X86_DEFAULT_VM VMs, KVM
+ invalidates all SPTEs in all memslots and
+ address spaces when a memslot is deleted or
+ moved. When this quirk is disabled (or the
+ VM type isn't KVM_X86_DEFAULT_VM), KVM only
+ ensures the backing memory of the deleted
+ or moved memslot isn't reachable, i.e KVM
+ _may_ invalidate only SPTEs related to the
+ memslot.
+
+KVM_X86_QUIRK_STUFF_FEATURE_MSRS By default, at vCPU creation, KVM sets the
+ vCPU's MSR_IA32_PERF_CAPABILITIES (0x345),
+ MSR_IA32_ARCH_CAPABILITIES (0x10a),
+ MSR_PLATFORM_INFO (0xce), and all VMX MSRs
+ (0x480..0x492) to the maximal capabilities
+ supported by KVM. KVM also sets
+ MSR_IA32_UCODE_REV (0x8b) to an arbitrary
+ value (which is different for Intel vs.
+ AMD). Lastly, when guest CPUID is set (by
+ userspace), KVM modifies select VMX MSR
+ fields to force consistency between guest
+ CPUID and L2's effective ISA. When this
+ quirk is disabled, KVM zeroes the vCPU's MSR
+ values (with two exceptions, see below),
+ i.e. treats the feature MSRs like CPUID
+ leaves and gives userspace full control of
+ the vCPU model definition. This quirk does
+ not affect VMX MSRs CR0/CR4_FIXED1 (0x487
+ and 0x489), as KVM does now allow them to
+ be set by userspace (KVM sets them based on
+ guest CPUID, for safety purposes).
+
+KVM_X86_QUIRK_IGNORE_GUEST_PAT By default, on Intel platforms, KVM ignores
+ guest PAT and forces the effective memory
+ type to WB in EPT. The quirk is not available
+ on Intel platforms which are incapable of
+ safely honoring guest PAT (i.e., without CPU
+ self-snoop, KVM always ignores guest PAT and
+ forces effective memory type to WB). It is
+ also ignored on AMD platforms or, on Intel,
+ when a VM has non-coherent DMA devices
+ assigned; KVM always honors guest PAT in
+ such case. The quirk is needed to avoid
+ slowdowns on certain Intel Xeon platforms
+ (e.g. ICX, SPR) where self-snoop feature is
+ supported but UC is slow enough to cause
+ issues with some older guests that use
+ UC instead of WC to map the video RAM.
+ Userspace can disable the quirk to honor
+ guest PAT if it knows that there is no such
+ guest software, for example if it does not
+ expose a bochs graphics device (which is
+ known to have had a buggy driver).
+
+KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM By default, KVM relaxes the consistency
+ check for GUEST_IA32_DEBUGCTL in vmcs12
+ to allow FREEZE_IN_SMM to be set. When
+ this quirk is disabled, KVM requires this
+ bit to be cleared. Note that the vmcs02
+ bit is still completely controlled by the
+ host, regardless of the quirk setting.
+======================================== ================================================
7.32 KVM_CAP_MAX_VCPU_ID
------------------------
diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
index ae8bce7fecbe..662231e958a0 100644
--- a/Documentation/virt/kvm/locking.rst
+++ b/Documentation/virt/kvm/locking.rst
@@ -17,6 +17,8 @@ The acquisition orders for mutexes are as follows:
- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock
+- vcpu->mutex is taken outside kvm->slots_lock and kvm->slots_arch_lock
+
- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
them together is quite rare.
diff --git a/MAINTAINERS b/MAINTAINERS
index bc3bcc641663..d7241695df96 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4022,7 +4022,7 @@ F: drivers/hwmon/asus_wmi_sensors.c
ASYMMETRIC KEYS
M: David Howells <dhowells@redhat.com>
M: Lukas Wunner <lukas@wunner.de>
-M: Ignat Korchagin <ignat@cloudflare.com>
+M: Ignat Korchagin <ignat@linux.win>
L: keyrings@vger.kernel.org
L: linux-crypto@vger.kernel.org
S: Maintained
@@ -4035,7 +4035,7 @@ F: include/linux/verification.h
ASYMMETRIC KEYS - ECDSA
M: Lukas Wunner <lukas@wunner.de>
-M: Ignat Korchagin <ignat@cloudflare.com>
+M: Ignat Korchagin <ignat@linux.win>
R: Stefan Berger <stefanb@linux.ibm.com>
L: linux-crypto@vger.kernel.org
S: Maintained
@@ -4045,14 +4045,14 @@ F: include/crypto/ecc*
ASYMMETRIC KEYS - GOST
M: Lukas Wunner <lukas@wunner.de>
-M: Ignat Korchagin <ignat@cloudflare.com>
+M: Ignat Korchagin <ignat@linux.win>
L: linux-crypto@vger.kernel.org
S: Odd fixes
F: crypto/ecrdsa*
ASYMMETRIC KEYS - RSA
M: Lukas Wunner <lukas@wunner.de>
-M: Ignat Korchagin <ignat@cloudflare.com>
+M: Ignat Korchagin <ignat@linux.win>
L: linux-crypto@vger.kernel.org
S: Maintained
F: crypto/rsa*
@@ -8626,9 +8626,8 @@ F: drivers/gpu/drm/lima/
F: include/uapi/drm/lima_drm.h
DRM DRIVERS FOR LOONGSON
-M: Sui Jingfeng <suijingfeng@loongson.cn>
L: dri-devel@lists.freedesktop.org
-S: Supported
+S: Orphan
T: git https://gitlab.freedesktop.org/drm/misc/kernel.git
F: drivers/gpu/drm/loongson/
@@ -21937,7 +21936,7 @@ F: drivers/media/radio/radio-tea5777.c
RADOS BLOCK DEVICE (RBD)
M: Ilya Dryomov <idryomov@gmail.com>
-R: Dongsheng Yang <dongsheng.yang@easystack.cn>
+R: Dongsheng Yang <dongsheng.yang@linux.dev>
L: ceph-devel@vger.kernel.org
S: Supported
W: http://ceph.com/
diff --git a/Makefile b/Makefile
index 2b15f0b4a0cb..c9b7bee102e8 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 7
PATCHLEVEL = 0
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc4
NAME = Baby Opossum Posse
# *DOCUMENTATION*
@@ -476,6 +476,7 @@ KBUILD_USERLDFLAGS := $(USERLDFLAGS)
export rust_common_flags := --edition=2021 \
-Zbinary_dep_depinfo=y \
-Astable_features \
+ -Aunused_features \
-Dnon_ascii_idents \
-Dunsafe_op_in_unsafe_fn \
-Wmissing_docs \
@@ -1113,6 +1114,9 @@ KBUILD_CFLAGS += -fno-builtin-wcslen
# change __FILE__ to the relative path to the source directory
ifdef building_out_of_srctree
KBUILD_CPPFLAGS += -fmacro-prefix-map=$(srcroot)/=
+ifeq ($(call rustc-option-yn, --remap-path-scope=macro),y)
+KBUILD_RUSTFLAGS += --remap-path-prefix=$(srcroot)/= --remap-path-scope=macro
+endif
endif
# include additional Makefiles when needed
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index f75d75cf91c8..70d05f74049e 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -279,7 +279,6 @@ CONFIG_TI_CPSW_SWITCHDEV=y
CONFIG_TI_CPTS=y
CONFIG_TI_KEYSTONE_NETCP=y
CONFIG_TI_KEYSTONE_NETCP_ETHSS=y
-CONFIG_TI_PRUSS=m
CONFIG_TI_PRUETH=m
CONFIG_XILINX_EMACLITE=y
CONFIG_SFP=m
diff --git a/arch/arm64/boot/dts/renesas/r8a78000.dtsi b/arch/arm64/boot/dts/renesas/r8a78000.dtsi
index 4c97298fa763..3e1c98903cea 100644
--- a/arch/arm64/boot/dts/renesas/r8a78000.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a78000.dtsi
@@ -698,7 +698,7 @@
compatible = "renesas,scif-r8a78000",
"renesas,rcar-gen5-scif", "renesas,scif";
reg = <0 0xc0700000 0 0x40>;
- interrupts = <GIC_SPI 4074 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_ESPI 10 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&dummy_clk_sgasyncd16>, <&dummy_clk_sgasyncd16>, <&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
status = "disabled";
@@ -708,7 +708,7 @@
compatible = "renesas,scif-r8a78000",
"renesas,rcar-gen5-scif", "renesas,scif";
reg = <0 0xc0704000 0 0x40>;
- interrupts = <GIC_SPI 4075 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_ESPI 11 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&dummy_clk_sgasyncd16>, <&dummy_clk_sgasyncd16>, <&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
status = "disabled";
@@ -718,7 +718,7 @@
compatible = "renesas,scif-r8a78000",
"renesas,rcar-gen5-scif", "renesas,scif";
reg = <0 0xc0708000 0 0x40>;
- interrupts = <GIC_SPI 4076 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_ESPI 12 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&dummy_clk_sgasyncd16>, <&dummy_clk_sgasyncd16>, <&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
status = "disabled";
@@ -728,7 +728,7 @@
compatible = "renesas,scif-r8a78000",
"renesas,rcar-gen5-scif", "renesas,scif";
reg = <0 0xc070c000 0 0x40>;
- interrupts = <GIC_SPI 4077 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_ESPI 13 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&dummy_clk_sgasyncd16>, <&dummy_clk_sgasyncd16>, <&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
status = "disabled";
@@ -738,7 +738,7 @@
compatible = "renesas,hscif-r8a78000",
"renesas,rcar-gen5-hscif", "renesas,hscif";
reg = <0 0xc0710000 0 0x60>;
- interrupts = <GIC_SPI 4078 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_ESPI 14 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&dummy_clk_sgasyncd4>, <&dummy_clk_sgasyncd4>, <&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
status = "disabled";
@@ -748,7 +748,7 @@
compatible = "renesas,hscif-r8a78000",
"renesas,rcar-gen5-hscif", "renesas,hscif";
reg = <0 0xc0714000 0 0x60>;
- interrupts = <GIC_SPI 4079 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_ESPI 15 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&dummy_clk_sgasyncd4>, <&dummy_clk_sgasyncd4>, <&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
status = "disabled";
@@ -758,7 +758,7 @@
compatible = "renesas,hscif-r8a78000",
"renesas,rcar-gen5-hscif", "renesas,hscif";
reg = <0 0xc0718000 0 0x60>;
- interrupts = <GIC_SPI 4080 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_ESPI 16 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&dummy_clk_sgasyncd4>, <&dummy_clk_sgasyncd4>, <&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
status = "disabled";
@@ -768,7 +768,7 @@
compatible = "renesas,hscif-r8a78000",
"renesas,rcar-gen5-hscif", "renesas,hscif";
reg = <0 0xc071c000 0 0x60>;
- interrupts = <GIC_SPI 4081 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_ESPI 17 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&dummy_clk_sgasyncd4>, <&dummy_clk_sgasyncd4>, <&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r9a09g057.dtsi b/arch/arm64/boot/dts/renesas/r9a09g057.dtsi
index 80cba9fcfe7b..504c28386622 100644
--- a/arch/arm64/boot/dts/renesas/r9a09g057.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a09g057.dtsi
@@ -581,16 +581,6 @@
status = "disabled";
};
- wdt0: watchdog@11c00400 {
- compatible = "renesas,r9a09g057-wdt";
- reg = <0 0x11c00400 0 0x400>;
- clocks = <&cpg CPG_MOD 0x4b>, <&cpg CPG_MOD 0x4c>;
- clock-names = "pclk", "oscclk";
- resets = <&cpg 0x75>;
- power-domains = <&cpg>;
- status = "disabled";
- };
-
wdt1: watchdog@14400000 {
compatible = "renesas,r9a09g057-wdt";
reg = <0 0x14400000 0 0x400>;
@@ -601,26 +591,6 @@
status = "disabled";
};
- wdt2: watchdog@13000000 {
- compatible = "renesas,r9a09g057-wdt";
- reg = <0 0x13000000 0 0x400>;
- clocks = <&cpg CPG_MOD 0x4f>, <&cpg CPG_MOD 0x50>;
- clock-names = "pclk", "oscclk";
- resets = <&cpg 0x77>;
- power-domains = <&cpg>;
- status = "disabled";
- };
-
- wdt3: watchdog@13000400 {
- compatible = "renesas,r9a09g057-wdt";
- reg = <0 0x13000400 0 0x400>;
- clocks = <&cpg CPG_MOD 0x51>, <&cpg CPG_MOD 0x52>;
- clock-names = "pclk", "oscclk";
- resets = <&cpg 0x78>;
- power-domains = <&cpg>;
- status = "disabled";
- };
-
rtc: rtc@11c00800 {
compatible = "renesas,r9a09g057-rtca3", "renesas,rz-rtca3";
reg = <0 0x11c00800 0 0x400>;
diff --git a/arch/arm64/boot/dts/renesas/r9a09g077.dtsi b/arch/arm64/boot/dts/renesas/r9a09g077.dtsi
index 14d7fb6f8952..9d0b4d8d3d5b 100644
--- a/arch/arm64/boot/dts/renesas/r9a09g077.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a09g077.dtsi
@@ -974,8 +974,8 @@
cpg: clock-controller@80280000 {
compatible = "renesas,r9a09g077-cpg-mssr";
- reg = <0 0x80280000 0 0x1000>,
- <0 0x81280000 0 0x9000>;
+ reg = <0 0x80280000 0 0x10000>,
+ <0 0x81280000 0 0x10000>;
clocks = <&extal_clk>;
clock-names = "extal";
#clock-cells = <2>;
diff --git a/arch/arm64/boot/dts/renesas/r9a09g087.dtsi b/arch/arm64/boot/dts/renesas/r9a09g087.dtsi
index 4a1339561332..d407c48f9966 100644
--- a/arch/arm64/boot/dts/renesas/r9a09g087.dtsi
+++ b/arch/arm64/boot/dts/renesas/r9a09g087.dtsi
@@ -977,8 +977,8 @@
cpg: clock-controller@80280000 {
compatible = "renesas,r9a09g087-cpg-mssr";
- reg = <0 0x80280000 0 0x1000>,
- <0 0x81280000 0 0x9000>;
+ reg = <0 0x80280000 0 0x10000>,
+ <0 0x81280000 0 0x10000>;
clocks = <&extal_clk>;
clock-names = "extal";
#clock-cells = <2>;
diff --git a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
index 982f17aafbc5..b45acfe6288a 100644
--- a/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzg3s-smarc-som.dtsi
@@ -162,7 +162,7 @@
<100000000>;
renesas,settings = [
80 00 11 19 4c 42 dc 2f 06 7d 20 1a 5f 1e f2 27
- 00 40 00 00 00 00 00 00 06 0c 19 02 3f f0 90 86
+ 00 40 00 00 00 00 00 00 06 0c 19 02 3b f0 90 86
a0 80 30 30 9c
];
};
diff --git a/arch/arm64/boot/dts/renesas/rzt2h-n2h-evk-common.dtsi b/arch/arm64/boot/dts/renesas/rzt2h-n2h-evk-common.dtsi
index 510399febf29..f87c2492f414 100644
--- a/arch/arm64/boot/dts/renesas/rzt2h-n2h-evk-common.dtsi
+++ b/arch/arm64/boot/dts/renesas/rzt2h-n2h-evk-common.dtsi
@@ -53,6 +53,7 @@
regulator-max-microvolt = <3300000>;
gpios-states = <0>;
states = <3300000 0>, <1800000 1>;
+ regulator-ramp-delay = <60>;
};
#endif
diff --git a/arch/arm64/boot/dts/renesas/rzv2-evk-cn15-sd.dtso b/arch/arm64/boot/dts/renesas/rzv2-evk-cn15-sd.dtso
index 0af1e0a6c7f4..fc53c1aae3b5 100644
--- a/arch/arm64/boot/dts/renesas/rzv2-evk-cn15-sd.dtso
+++ b/arch/arm64/boot/dts/renesas/rzv2-evk-cn15-sd.dtso
@@ -25,6 +25,7 @@
regulator-max-microvolt = <3300000>;
gpios-states = <0>;
states = <3300000 0>, <1800000 1>;
+ regulator-ramp-delay = <60>;
};
};
diff --git a/arch/arm64/crypto/aes-neonbs-glue.c b/arch/arm64/crypto/aes-neonbs-glue.c
index cb87c8fc66b3..00530b291010 100644
--- a/arch/arm64/crypto/aes-neonbs-glue.c
+++ b/arch/arm64/crypto/aes-neonbs-glue.c
@@ -76,19 +76,24 @@ static int aesbs_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct aesbs_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_aes_ctx rk;
+ struct crypto_aes_ctx *rk;
int err;
- err = aes_expandkey(&rk, in_key, key_len);
+ rk = kmalloc(sizeof(*rk), GFP_KERNEL);
+ if (!rk)
+ return -ENOMEM;
+
+ err = aes_expandkey(rk, in_key, key_len);
if (err)
- return err;
+ goto out;
ctx->rounds = 6 + key_len / 4;
scoped_ksimd()
- aesbs_convert_key(ctx->rk, rk.key_enc, ctx->rounds);
-
- return 0;
+ aesbs_convert_key(ctx->rk, rk->key_enc, ctx->rounds);
+out:
+ kfree_sensitive(rk);
+ return err;
}
static int __ecb_crypt(struct skcipher_request *req,
@@ -133,22 +138,26 @@ static int aesbs_cbc_ctr_setkey(struct crypto_skcipher *tfm, const u8 *in_key,
unsigned int key_len)
{
struct aesbs_cbc_ctr_ctx *ctx = crypto_skcipher_ctx(tfm);
- struct crypto_aes_ctx rk;
+ struct crypto_aes_ctx *rk;
int err;
- err = aes_expandkey(&rk, in_key, key_len);
+ rk = kmalloc(sizeof(*rk), GFP_KERNEL);
+ if (!rk)
+ return -ENOMEM;
+
+ err = aes_expandkey(rk, in_key, key_len);
if (err)
- return err;
+ goto out;
ctx->key.rounds = 6 + key_len / 4;
- memcpy(ctx->enc, rk.key_enc, sizeof(ctx->enc));
+ memcpy(ctx->enc, rk->key_enc, sizeof(ctx->enc));
scoped_ksimd()
- aesbs_convert_key(ctx->key.rk, rk.key_enc, ctx->key.rounds);
- memzero_explicit(&rk, sizeof(rk));
-
- return 0;
+ aesbs_convert_key(ctx->key.rk, rk->key_enc, ctx->key.rounds);
+out:
+ kfree_sensitive(rk);
+ return err;
}
static int cbc_encrypt(struct skcipher_request *req)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 2ca264b3db5f..70cb9cfd760a 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -784,6 +784,9 @@ struct kvm_host_data {
/* Number of debug breakpoints/watchpoints for this CPU (minus 1) */
unsigned int debug_brps;
unsigned int debug_wrps;
+
+ /* Last vgic_irq part of the AP list recorded in an LR */
+ struct vgic_irq *last_lr_irq;
};
struct kvm_host_psci_config {
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index c31f8e17732a..32c2dbcc0c64 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -2345,6 +2345,15 @@ static bool can_trap_icv_dir_el1(const struct arm64_cpu_capabilities *entry,
!is_midr_in_range_list(has_vgic_v3))
return false;
+ /*
+ * pKVM prevents late onlining of CPUs. This means that whatever
+ * state the capability is in after deprivilege cannot be affected
+ * by a new CPU booting -- this is garanteed to be a CPU we have
+ * already seen, and the cap is therefore unchanged.
+ */
+ if (system_capabilities_finalized() && is_protected_kvm_enabled())
+ return cpus_have_final_cap(ARM64_HAS_ICH_HCR_EL2_TDIR);
+
if (is_kernel_in_hyp_mode())
res.a1 = read_sysreg_s(SYS_ICH_VTR_EL2);
else
diff --git a/arch/arm64/kvm/at.c b/arch/arm64/kvm/at.c
index 6588ea251ed7..c5c5644b1878 100644
--- a/arch/arm64/kvm/at.c
+++ b/arch/arm64/kvm/at.c
@@ -1504,8 +1504,6 @@ int __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr)
fail = true;
}
- isb();
-
if (!fail)
par = read_sysreg_par();
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index 1c87699fd886..332c453b87cf 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -29,7 +29,7 @@
#include "trace.h"
-const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+const struct kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS()
};
@@ -42,7 +42,7 @@ const struct kvm_stats_header kvm_vm_stats_header = {
sizeof(kvm_vm_stats_desc),
};
-const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+const struct kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
STATS_DESC_COUNTER(VCPU, hvc_exit_stat),
STATS_DESC_COUNTER(VCPU, wfe_exit_stat),
diff --git a/arch/arm64/kvm/hyp/nvhe/mem_protect.c b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
index 38f66a56a766..d815265bd374 100644
--- a/arch/arm64/kvm/hyp/nvhe/mem_protect.c
+++ b/arch/arm64/kvm/hyp/nvhe/mem_protect.c
@@ -518,7 +518,7 @@ static int host_stage2_adjust_range(u64 addr, struct kvm_mem_range *range)
granule = kvm_granule_size(level);
cur.start = ALIGN_DOWN(addr, granule);
cur.end = cur.start + granule;
- if (!range_included(&cur, range))
+ if (!range_included(&cur, range) && level < KVM_PGTABLE_LAST_LEVEL)
continue;
*range = cur;
return 0;
diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index ec2eee857208..17d64a1e11e5 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1751,6 +1751,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
force_pte = (max_map_size == PAGE_SIZE);
vma_pagesize = min_t(long, vma_pagesize, max_map_size);
+ vma_shift = __ffs(vma_pagesize);
}
/*
@@ -1837,10 +1838,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
if (exec_fault && s2_force_noncacheable)
ret = -ENOEXEC;
- if (ret) {
- kvm_release_page_unused(page);
- return ret;
- }
+ if (ret)
+ goto out_put_page;
/*
* Guest performs atomic/exclusive operations on memory with unsupported
@@ -1850,7 +1849,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
*/
if (esr_fsc_is_excl_atomic_fault(kvm_vcpu_get_esr(vcpu))) {
kvm_inject_dabt_excl_atomic(vcpu, kvm_vcpu_get_hfar(vcpu));
- return 1;
+ ret = 1;
+ goto out_put_page;
}
if (nested)
@@ -1936,6 +1936,10 @@ out_unlock:
mark_page_dirty_in_slot(kvm, memslot, gfn);
return ret != -EAGAIN ? ret : 0;
+
+out_put_page:
+ kvm_release_page_unused(page);
+ return ret;
}
/* Resolve the access fault by making the page young again. */
diff --git a/arch/arm64/kvm/nested.c b/arch/arm64/kvm/nested.c
index 12c9f6e8dfda..2c43097248b2 100644
--- a/arch/arm64/kvm/nested.c
+++ b/arch/arm64/kvm/nested.c
@@ -152,31 +152,31 @@ static int get_ia_size(struct s2_walk_info *wi)
return 64 - wi->t0sz;
}
-static int check_base_s2_limits(struct s2_walk_info *wi,
+static int check_base_s2_limits(struct kvm_vcpu *vcpu, struct s2_walk_info *wi,
int level, int input_size, int stride)
{
- int start_size, ia_size;
+ int start_size, pa_max;
- ia_size = get_ia_size(wi);
+ pa_max = kvm_get_pa_bits(vcpu->kvm);
/* Check translation limits */
switch (BIT(wi->pgshift)) {
case SZ_64K:
- if (level == 0 || (level == 1 && ia_size <= 42))
+ if (level == 0 || (level == 1 && pa_max <= 42))
return -EFAULT;
break;
case SZ_16K:
- if (level == 0 || (level == 1 && ia_size <= 40))
+ if (level == 0 || (level == 1 && pa_max <= 40))
return -EFAULT;
break;
case SZ_4K:
- if (level < 0 || (level == 0 && ia_size <= 42))
+ if (level < 0 || (level == 0 && pa_max <= 42))
return -EFAULT;
break;
}
/* Check input size limits */
- if (input_size > ia_size)
+ if (input_size > pa_max)
return -EFAULT;
/* Check number of entries in starting level table */
@@ -269,16 +269,19 @@ static int walk_nested_s2_pgd(struct kvm_vcpu *vcpu, phys_addr_t ipa,
if (input_size > 48 || input_size < 25)
return -EFAULT;
- ret = check_base_s2_limits(wi, level, input_size, stride);
- if (WARN_ON(ret))
+ ret = check_base_s2_limits(vcpu, wi, level, input_size, stride);
+ if (WARN_ON(ret)) {
+ out->esr = compute_fsc(0, ESR_ELx_FSC_FAULT);
return ret;
+ }
base_lower_bound = 3 + input_size - ((3 - level) * stride +
wi->pgshift);
base_addr = wi->baddr & GENMASK_ULL(47, base_lower_bound);
if (check_output_size(wi, base_addr)) {
- out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ);
+ /* R_BFHQH */
+ out->esr = compute_fsc(0, ESR_ELx_FSC_ADDRSZ);
return 1;
}
@@ -293,8 +296,10 @@ static int walk_nested_s2_pgd(struct kvm_vcpu *vcpu, phys_addr_t ipa,
paddr = base_addr | index;
ret = read_guest_s2_desc(vcpu, paddr, &desc, wi);
- if (ret < 0)
+ if (ret < 0) {
+ out->esr = ESR_ELx_FSC_SEA_TTW(level);
return ret;
+ }
new_desc = desc;
diff --git a/arch/arm64/kvm/vgic/vgic-init.c b/arch/arm64/kvm/vgic/vgic-init.c
index 9b3091ad868c..e9b8b5fc480c 100644
--- a/arch/arm64/kvm/vgic/vgic-init.c
+++ b/arch/arm64/kvm/vgic/vgic-init.c
@@ -143,6 +143,21 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
kvm->arch.vgic.in_kernel = true;
kvm->arch.vgic.vgic_model = type;
kvm->arch.vgic.implementation_rev = KVM_VGIC_IMP_REV_LATEST;
+ kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
+
+ aa64pfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
+ pfr1 = kvm_read_vm_id_reg(kvm, SYS_ID_PFR1_EL1) & ~ID_PFR1_EL1_GIC;
+
+ if (type == KVM_DEV_TYPE_ARM_VGIC_V2) {
+ kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
+ } else {
+ INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
+ aa64pfr0 |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
+ pfr1 |= SYS_FIELD_PREP_ENUM(ID_PFR1_EL1, GIC, GICv3);
+ }
+
+ kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, aa64pfr0);
+ kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, pfr1);
kvm_for_each_vcpu(i, vcpu, kvm) {
ret = vgic_allocate_private_irqs_locked(vcpu, type);
@@ -157,25 +172,10 @@ int kvm_vgic_create(struct kvm *kvm, u32 type)
vgic_cpu->private_irqs = NULL;
}
+ kvm->arch.vgic.vgic_model = 0;
goto out_unlock;
}
- kvm->arch.vgic.vgic_dist_base = VGIC_ADDR_UNDEF;
-
- aa64pfr0 = kvm_read_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1) & ~ID_AA64PFR0_EL1_GIC;
- pfr1 = kvm_read_vm_id_reg(kvm, SYS_ID_PFR1_EL1) & ~ID_PFR1_EL1_GIC;
-
- if (type == KVM_DEV_TYPE_ARM_VGIC_V2) {
- kvm->arch.vgic.vgic_cpu_base = VGIC_ADDR_UNDEF;
- } else {
- INIT_LIST_HEAD(&kvm->arch.vgic.rd_regions);
- aa64pfr0 |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
- pfr1 |= SYS_FIELD_PREP_ENUM(ID_PFR1_EL1, GIC, GICv3);
- }
-
- kvm_set_vm_id_reg(kvm, SYS_ID_AA64PFR0_EL1, aa64pfr0);
- kvm_set_vm_id_reg(kvm, SYS_ID_PFR1_EL1, pfr1);
-
if (type == KVM_DEV_TYPE_ARM_VGIC_V3)
kvm->arch.vgic.nassgicap = system_supports_direct_sgis();
diff --git a/arch/arm64/kvm/vgic/vgic-v2.c b/arch/arm64/kvm/vgic/vgic-v2.c
index 585491fbda80..cafa3cb32bda 100644
--- a/arch/arm64/kvm/vgic/vgic-v2.c
+++ b/arch/arm64/kvm/vgic/vgic-v2.c
@@ -115,7 +115,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_v2_cpu_if *cpuif = &vgic_cpu->vgic_v2;
u32 eoicount = FIELD_GET(GICH_HCR_EOICOUNT, cpuif->vgic_hcr);
- struct vgic_irq *irq;
+ struct vgic_irq *irq = *host_data_ptr(last_lr_irq);
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
@@ -123,7 +123,7 @@ void vgic_v2_fold_lr_state(struct kvm_vcpu *vcpu)
vgic_v2_fold_lr(vcpu, cpuif->vgic_lr[lr]);
/* See the GICv3 equivalent for the EOIcount handling rationale */
- list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
+ list_for_each_entry_continue(irq, &vgic_cpu->ap_list_head, ap_list) {
u32 lr;
if (!eoicount) {
diff --git a/arch/arm64/kvm/vgic/vgic-v3.c b/arch/arm64/kvm/vgic/vgic-v3.c
index 386ddf69a9c5..6a355eca1934 100644
--- a/arch/arm64/kvm/vgic/vgic-v3.c
+++ b/arch/arm64/kvm/vgic/vgic-v3.c
@@ -148,7 +148,7 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
u32 eoicount = FIELD_GET(ICH_HCR_EL2_EOIcount, cpuif->vgic_hcr);
- struct vgic_irq *irq;
+ struct vgic_irq *irq = *host_data_ptr(last_lr_irq);
DEBUG_SPINLOCK_BUG_ON(!irqs_disabled());
@@ -158,12 +158,12 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu)
/*
* EOIMode=0: use EOIcount to emulate deactivation. We are
* guaranteed to deactivate in reverse order of the activation, so
- * just pick one active interrupt after the other in the ap_list,
- * and replay the deactivation as if the CPU was doing it. We also
- * rely on priority drop to have taken place, and the list to be
- * sorted by priority.
+ * just pick one active interrupt after the other in the tail part
+ * of the ap_list, past the LRs, and replay the deactivation as if
+ * the CPU was doing it. We also rely on priority drop to have taken
+ * place, and the list to be sorted by priority.
*/
- list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
+ list_for_each_entry_continue(irq, &vgic_cpu->ap_list_head, ap_list) {
u64 lr;
/*
diff --git a/arch/arm64/kvm/vgic/vgic.c b/arch/arm64/kvm/vgic/vgic.c
index 430aa98888fd..e22b79cfff96 100644
--- a/arch/arm64/kvm/vgic/vgic.c
+++ b/arch/arm64/kvm/vgic/vgic.c
@@ -814,6 +814,9 @@ retry:
static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
{
+ if (!*host_data_ptr(last_lr_irq))
+ return;
+
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_fold_lr_state(vcpu);
else
@@ -960,10 +963,13 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
if (irqs_outside_lrs(&als))
vgic_sort_ap_list(vcpu);
+ *host_data_ptr(last_lr_irq) = NULL;
+
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
scoped_guard(raw_spinlock, &irq->irq_lock) {
if (likely(vgic_target_oracle(irq) == vcpu)) {
vgic_populate_lr(vcpu, irq, count++);
+ *host_data_ptr(last_lr_irq) = irq;
}
}
diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index d211c6572b0a..92068ff38685 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -304,6 +304,9 @@ config AS_HAS_LBT_EXTENSION
config AS_HAS_LVZ_EXTENSION
def_bool $(as-instr,hvcl 0)
+config AS_HAS_SCQ_EXTENSION
+ def_bool $(as-instr,sc.q \$t0$(comma)\$t1$(comma)\$t2)
+
config CC_HAS_ANNOTATE_TABLEJUMP
def_bool $(cc-option,-mannotate-tablejump)
diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h
index 58cabab6d90d..909f9274fe71 100644
--- a/arch/loongarch/include/asm/cmpxchg.h
+++ b/arch/loongarch/include/asm/cmpxchg.h
@@ -238,6 +238,8 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, unsigned int
arch_cmpxchg((ptr), (o), (n)); \
})
+#ifdef CONFIG_AS_HAS_SCQ_EXTENSION
+
union __u128_halves {
u128 full;
struct {
@@ -290,6 +292,9 @@ union __u128_halves {
BUILD_BUG_ON(sizeof(*(ptr)) != 16); \
__arch_cmpxchg128(ptr, o, n, ""); \
})
+
+#endif /* CONFIG_AS_HAS_SCQ_EXTENSION */
+
#else
#include <asm-generic/cmpxchg-local.h>
#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
diff --git a/arch/loongarch/include/asm/uaccess.h b/arch/loongarch/include/asm/uaccess.h
index 4e259d490e45..438269313e78 100644
--- a/arch/loongarch/include/asm/uaccess.h
+++ b/arch/loongarch/include/asm/uaccess.h
@@ -253,8 +253,13 @@ do { \
\
__get_kernel_common(*((type *)(dst)), sizeof(type), \
(__force type *)(src)); \
- if (unlikely(__gu_err)) \
+ if (unlikely(__gu_err)) { \
+ pr_info("%s: memory access failed, ecode 0x%x\n", \
+ __func__, read_csr_excode()); \
+ pr_info("%s: the caller is %pS\n", \
+ __func__, __builtin_return_address(0)); \
goto err_label; \
+ } \
} while (0)
#define __put_kernel_nofault(dst, src, type, err_label) \
@@ -264,8 +269,13 @@ do { \
\
__pu_val = *(__force type *)(src); \
__put_kernel_common(((type *)(dst)), sizeof(type)); \
- if (unlikely(__pu_err)) \
+ if (unlikely(__pu_err)) { \
+ pr_info("%s: memory access failed, ecode 0x%x\n", \
+ __func__, read_csr_excode()); \
+ pr_info("%s: the caller is %pS\n", \
+ __func__, __builtin_return_address(0)); \
goto err_label; \
+ } \
} while (0)
extern unsigned long __copy_user(void *to, const void *from, __kernel_size_t n);
diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
index bf037f0c6b26..1a728082944c 100644
--- a/arch/loongarch/kernel/inst.c
+++ b/arch/loongarch/kernel/inst.c
@@ -246,32 +246,51 @@ static int text_copy_cb(void *data)
if (smp_processor_id() == copy->cpu) {
ret = copy_to_kernel_nofault(copy->dst, copy->src, copy->len);
- if (ret)
+ if (ret) {
pr_err("%s: operation failed\n", __func__);
+ return ret;
+ }
}
flush_icache_range((unsigned long)copy->dst, (unsigned long)copy->dst + copy->len);
- return ret;
+ return 0;
}
int larch_insn_text_copy(void *dst, void *src, size_t len)
{
int ret = 0;
+ int err = 0;
size_t start, end;
struct insn_copy copy = {
.dst = dst,
.src = src,
.len = len,
- .cpu = smp_processor_id(),
+ .cpu = raw_smp_processor_id(),
};
+ /*
+ * Ensure copy.cpu won't be hot removed before stop_machine.
+ * If it is removed nobody will really update the text.
+ */
+ lockdep_assert_cpus_held();
+
start = round_down((size_t)dst, PAGE_SIZE);
end = round_up((size_t)dst + len, PAGE_SIZE);
- set_memory_rw(start, (end - start) / PAGE_SIZE);
- ret = stop_machine(text_copy_cb, &copy, cpu_online_mask);
- set_memory_rox(start, (end - start) / PAGE_SIZE);
+ err = set_memory_rw(start, (end - start) / PAGE_SIZE);
+ if (err) {
+ pr_info("%s: set_memory_rw() failed\n", __func__);
+ return err;
+ }
+
+ ret = stop_machine_cpuslocked(text_copy_cb, &copy, cpu_online_mask);
+
+ err = set_memory_rox(start, (end - start) / PAGE_SIZE);
+ if (err) {
+ pr_info("%s: set_memory_rox() failed\n", __func__);
+ return err;
+ }
return ret;
}
diff --git a/arch/loongarch/kvm/vcpu.c b/arch/loongarch/kvm/vcpu.c
index 09e137f2f841..8ffd50a470e6 100644
--- a/arch/loongarch/kvm/vcpu.c
+++ b/arch/loongarch/kvm/vcpu.c
@@ -14,7 +14,7 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+const struct kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
STATS_DESC_COUNTER(VCPU, int_exits),
STATS_DESC_COUNTER(VCPU, idle_exits),
diff --git a/arch/loongarch/kvm/vm.c b/arch/loongarch/kvm/vm.c
index 5282158b8122..8cc5ee1c53ef 100644
--- a/arch/loongarch/kvm/vm.c
+++ b/arch/loongarch/kvm/vm.c
@@ -10,7 +10,7 @@
#include <asm/kvm_eiointc.h>
#include <asm/kvm_pch_pic.h>
-const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+const struct kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS(),
STATS_DESC_ICOUNTER(VM, pages),
STATS_DESC_ICOUNTER(VM, hugepages),
@@ -49,8 +49,8 @@ static void kvm_vm_init_features(struct kvm *kvm)
kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PMU);
/* Enable all PV features by default */
- kvm->arch.pv_features = BIT(KVM_FEATURE_IPI);
- kvm->arch.kvm_features = BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI);
+ kvm->arch.pv_features |= BIT(KVM_FEATURE_IPI);
+ kvm->arch.kvm_features |= BIT(KVM_LOONGARCH_VM_FEAT_PV_IPI);
if (kvm_pvtime_supported()) {
kvm->arch.pv_features |= BIT(KVM_FEATURE_PREEMPT);
kvm->arch.pv_features |= BIT(KVM_FEATURE_STEAL_TIME);
diff --git a/arch/loongarch/net/bpf_jit.c b/arch/loongarch/net/bpf_jit.c
index 3bd89f55960d..9cb796e16379 100644
--- a/arch/loongarch/net/bpf_jit.c
+++ b/arch/loongarch/net/bpf_jit.c
@@ -1379,9 +1379,11 @@ void *bpf_arch_text_copy(void *dst, void *src, size_t len)
{
int ret;
+ cpus_read_lock();
mutex_lock(&text_mutex);
ret = larch_insn_text_copy(dst, src, len);
mutex_unlock(&text_mutex);
+ cpus_read_unlock();
return ret ? ERR_PTR(-EINVAL) : dst;
}
@@ -1429,10 +1431,12 @@ int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type old_t,
if (ret)
return ret;
+ cpus_read_lock();
mutex_lock(&text_mutex);
if (memcmp(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES))
ret = larch_insn_text_copy(ip, new_insns, LOONGARCH_LONG_JUMP_NBYTES);
mutex_unlock(&text_mutex);
+ cpus_read_unlock();
return ret;
}
@@ -1450,10 +1454,12 @@ int bpf_arch_text_invalidate(void *dst, size_t len)
for (i = 0; i < (len / sizeof(u32)); i++)
inst[i] = INSN_BREAK;
+ cpus_read_lock();
mutex_lock(&text_mutex);
if (larch_insn_text_copy(dst, inst, len))
ret = -EINVAL;
mutex_unlock(&text_mutex);
+ cpus_read_unlock();
kvfree(inst);
@@ -1568,6 +1574,11 @@ void arch_free_bpf_trampoline(void *image, unsigned int size)
bpf_prog_pack_free(image, size);
}
+int arch_protect_bpf_trampoline(void *image, unsigned int size)
+{
+ return 0;
+}
+
/*
* Sign-extend the register if necessary
*/
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 29d9f630edfb..a53abbba43ea 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -38,7 +38,7 @@
#define VECTORSPACING 0x100 /* for EI/VI mode */
#endif
-const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+const struct kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS()
};
@@ -51,7 +51,7 @@ const struct kvm_stats_header kvm_vm_stats_header = {
sizeof(kvm_vm_stats_desc),
};
-const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+const struct kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
STATS_DESC_COUNTER(VCPU, wait_exits),
STATS_DESC_COUNTER(VCPU, cache_exits),
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index 570b3d91e2e4..17e63244e885 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -15,6 +15,9 @@
#define TASK_SIZE_MAX TASK_SIZE_USER64
#endif
+/* Threshold above which VMX copy path is used */
+#define VMX_COPY_THRESHOLD 3328
+
#include <asm-generic/access_ok.h>
/*
@@ -326,40 +329,62 @@ do { \
extern unsigned long __copy_tofrom_user(void __user *to,
const void __user *from, unsigned long size);
-#ifdef __powerpc64__
-static inline unsigned long
-raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
+unsigned long __copy_tofrom_user_base(void __user *to,
+ const void __user *from, unsigned long size);
+
+unsigned long __copy_tofrom_user_power7_vmx(void __user *to,
+ const void __user *from, unsigned long size);
+
+static __always_inline bool will_use_vmx(unsigned long n)
+{
+ return IS_ENABLED(CONFIG_ALTIVEC) && cpu_has_feature(CPU_FTR_VMX_COPY) &&
+ n > VMX_COPY_THRESHOLD;
+}
+
+static __always_inline unsigned long
+raw_copy_tofrom_user(void __user *to, const void __user *from,
+ unsigned long n, unsigned long dir)
{
unsigned long ret;
- barrier_nospec();
- allow_user_access(to, KUAP_READ_WRITE);
+ if (will_use_vmx(n) && enter_vmx_usercopy()) {
+ allow_user_access(to, dir);
+ ret = __copy_tofrom_user_power7_vmx(to, from, n);
+ prevent_user_access(dir);
+ exit_vmx_usercopy();
+
+ if (unlikely(ret)) {
+ allow_user_access(to, dir);
+ ret = __copy_tofrom_user_base(to, from, n);
+ prevent_user_access(dir);
+ }
+ return ret;
+ }
+
+ allow_user_access(to, dir);
ret = __copy_tofrom_user(to, from, n);
- prevent_user_access(KUAP_READ_WRITE);
+ prevent_user_access(dir);
return ret;
}
-#endif /* __powerpc64__ */
-static inline unsigned long raw_copy_from_user(void *to,
- const void __user *from, unsigned long n)
+#ifdef CONFIG_PPC64
+static inline unsigned long
+raw_copy_in_user(void __user *to, const void __user *from, unsigned long n)
{
- unsigned long ret;
+ barrier_nospec();
+ return raw_copy_tofrom_user(to, from, n, KUAP_READ_WRITE);
+}
+#endif /* CONFIG_PPC64 */
- allow_user_access(NULL, KUAP_READ);
- ret = __copy_tofrom_user((__force void __user *)to, from, n);
- prevent_user_access(KUAP_READ);
- return ret;
+static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ return raw_copy_tofrom_user((__force void __user *)to, from, n, KUAP_READ);
}
static inline unsigned long
raw_copy_to_user(void __user *to, const void *from, unsigned long n)
{
- unsigned long ret;
-
- allow_user_access(to, KUAP_WRITE);
- ret = __copy_tofrom_user(to, (__force const void __user *)from, n);
- prevent_user_access(KUAP_WRITE);
- return ret;
+ return raw_copy_tofrom_user(to, (__force const void __user *)from, n, KUAP_WRITE);
}
unsigned long __arch_clear_user(void __user *addr, unsigned long size);
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 0ce71310b7d9..d122e8447831 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1159,7 +1159,7 @@ spapr_tce_platform_iommu_attach_dev(struct iommu_domain *platform_domain,
struct device *dev,
struct iommu_domain *old)
{
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
+ struct iommu_domain *domain = iommu_driver_get_domain_for_dev(dev);
struct iommu_table_group *table_group;
struct iommu_group *grp;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index cb5b73adc250..b1761909c23f 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -35,7 +35,6 @@
#include <linux/of_irq.h>
#include <linux/hugetlb.h>
#include <linux/pgtable.h>
-#include <asm/kexec.h>
#include <asm/io.h>
#include <asm/paca.h>
#include <asm/processor.h>
@@ -995,15 +994,6 @@ void __init setup_arch(char **cmdline_p)
initmem_init();
- /*
- * Reserve large chunks of memory for use by CMA for kdump, fadump, KVM and
- * hugetlb. These must be called after initmem_init(), so that
- * pageblock_order is initialised.
- */
- fadump_cma_init();
- kdump_cma_reserve();
- kvm_cma_reserve();
-
early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
if (ppc_md.setup_arch)
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index d79c5d1098c0..2efbe05caed7 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -38,7 +38,7 @@
/* #define EXIT_DEBUG */
-const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+const struct kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS(),
STATS_DESC_ICOUNTER(VM, num_2M_pages),
STATS_DESC_ICOUNTER(VM, num_1G_pages)
@@ -53,7 +53,7 @@ const struct kvm_stats_header kvm_vm_stats_header = {
sizeof(kvm_vm_stats_desc),
};
-const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+const struct kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
STATS_DESC_COUNTER(VCPU, sum_exits),
STATS_DESC_COUNTER(VCPU, mmio_exits),
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 3401b96be475..f3ddb24ece74 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -36,7 +36,7 @@
unsigned long kvmppc_booke_handlers;
-const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+const struct kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS(),
STATS_DESC_ICOUNTER(VM, num_2M_pages),
STATS_DESC_ICOUNTER(VM, num_1G_pages)
@@ -51,7 +51,7 @@ const struct kvm_stats_header kvm_vm_stats_header = {
sizeof(kvm_vm_stats_desc),
};
-const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+const struct kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
STATS_DESC_COUNTER(VCPU, sum_exits),
STATS_DESC_COUNTER(VCPU, mmio_exits),
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
index f9acf866c709..e4469ad73a2e 100644
--- a/arch/powerpc/kvm/e500.h
+++ b/arch/powerpc/kvm/e500.h
@@ -39,15 +39,11 @@ enum vcpu_ftr {
/* bits [6-5] MAS2_X1 and MAS2_X0 and [4-0] bits for WIMGE */
#define E500_TLB_MAS2_ATTR (0x7f)
-struct tlbe_ref {
+struct tlbe_priv {
kvm_pfn_t pfn; /* valid only for TLB0, except briefly */
unsigned int flags; /* E500_TLB_* */
};
-struct tlbe_priv {
- struct tlbe_ref ref;
-};
-
#ifdef CONFIG_KVM_E500V2
struct vcpu_id_table;
#endif
diff --git a/arch/powerpc/kvm/e500_mmu.c b/arch/powerpc/kvm/e500_mmu.c
index 48580c85f23b..75ed1496ead5 100644
--- a/arch/powerpc/kvm/e500_mmu.c
+++ b/arch/powerpc/kvm/e500_mmu.c
@@ -920,12 +920,12 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
vcpu_e500->gtlb_offset[0] = 0;
vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
- vcpu_e500->gtlb_priv[0] = kzalloc_objs(struct tlbe_ref,
+ vcpu_e500->gtlb_priv[0] = kzalloc_objs(struct tlbe_priv,
vcpu_e500->gtlb_params[0].entries);
if (!vcpu_e500->gtlb_priv[0])
goto free_vcpu;
- vcpu_e500->gtlb_priv[1] = kzalloc_objs(struct tlbe_ref,
+ vcpu_e500->gtlb_priv[1] = kzalloc_objs(struct tlbe_priv,
vcpu_e500->gtlb_params[1].entries);
if (!vcpu_e500->gtlb_priv[1])
goto free_vcpu;
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 06caf8bbbe2b..37e0d3d9e244 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -189,16 +189,16 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
{
struct kvm_book3e_206_tlb_entry *gtlbe =
get_entry(vcpu_e500, tlbsel, esel);
- struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
+ struct tlbe_priv *tlbe = &vcpu_e500->gtlb_priv[tlbsel][esel];
/* Don't bother with unmapped entries */
- if (!(ref->flags & E500_TLB_VALID)) {
- WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
- "%s: flags %x\n", __func__, ref->flags);
+ if (!(tlbe->flags & E500_TLB_VALID)) {
+ WARN(tlbe->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
+ "%s: flags %x\n", __func__, tlbe->flags);
WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
}
- if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
+ if (tlbsel == 1 && tlbe->flags & E500_TLB_BITMAP) {
u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
int hw_tlb_indx;
unsigned long flags;
@@ -216,28 +216,28 @@ void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
}
mb();
vcpu_e500->g2h_tlb1_map[esel] = 0;
- ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
+ tlbe->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
local_irq_restore(flags);
}
- if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
+ if (tlbsel == 1 && tlbe->flags & E500_TLB_TLB0) {
/*
* TLB1 entry is backed by 4k pages. This should happen
* rarely and is not worth optimizing. Invalidate everything.
*/
kvmppc_e500_tlbil_all(vcpu_e500);
- ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
+ tlbe->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
}
/*
* If TLB entry is still valid then it's a TLB0 entry, and thus
* backed by at most one host tlbe per shadow pid
*/
- if (ref->flags & E500_TLB_VALID)
+ if (tlbe->flags & E500_TLB_VALID)
kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
/* Mark the TLB as not backed by the host anymore */
- ref->flags = 0;
+ tlbe->flags = 0;
}
static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
@@ -245,26 +245,26 @@ static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
}
-static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
- struct kvm_book3e_206_tlb_entry *gtlbe,
- kvm_pfn_t pfn, unsigned int wimg,
- bool writable)
+static inline void kvmppc_e500_tlbe_setup(struct tlbe_priv *tlbe,
+ struct kvm_book3e_206_tlb_entry *gtlbe,
+ kvm_pfn_t pfn, unsigned int wimg,
+ bool writable)
{
- ref->pfn = pfn;
- ref->flags = E500_TLB_VALID;
+ tlbe->pfn = pfn;
+ tlbe->flags = E500_TLB_VALID;
if (writable)
- ref->flags |= E500_TLB_WRITABLE;
+ tlbe->flags |= E500_TLB_WRITABLE;
/* Use guest supplied MAS2_G and MAS2_E */
- ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
+ tlbe->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
}
-static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
+static inline void kvmppc_e500_tlbe_release(struct tlbe_priv *tlbe)
{
- if (ref->flags & E500_TLB_VALID) {
+ if (tlbe->flags & E500_TLB_VALID) {
/* FIXME: don't log bogus pfn for TLB1 */
- trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
- ref->flags = 0;
+ trace_kvm_booke206_ref_release(tlbe->pfn, tlbe->flags);
+ tlbe->flags = 0;
}
}
@@ -284,11 +284,8 @@ static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
int i;
for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
- for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
- struct tlbe_ref *ref =
- &vcpu_e500->gtlb_priv[tlbsel][i].ref;
- kvmppc_e500_ref_release(ref);
- }
+ for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++)
+ kvmppc_e500_tlbe_release(&vcpu_e500->gtlb_priv[tlbsel][i]);
}
}
@@ -304,18 +301,18 @@ void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
static void kvmppc_e500_setup_stlbe(
struct kvm_vcpu *vcpu,
struct kvm_book3e_206_tlb_entry *gtlbe,
- int tsize, struct tlbe_ref *ref, u64 gvaddr,
+ int tsize, struct tlbe_priv *tlbe, u64 gvaddr,
struct kvm_book3e_206_tlb_entry *stlbe)
{
- kvm_pfn_t pfn = ref->pfn;
+ kvm_pfn_t pfn = tlbe->pfn;
u32 pr = vcpu->arch.shared->msr & MSR_PR;
- bool writable = !!(ref->flags & E500_TLB_WRITABLE);
+ bool writable = !!(tlbe->flags & E500_TLB_WRITABLE);
- BUG_ON(!(ref->flags & E500_TLB_VALID));
+ BUG_ON(!(tlbe->flags & E500_TLB_VALID));
/* Force IPROT=0 for all guest mappings. */
stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
- stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
+ stlbe->mas2 = (gvaddr & MAS2_EPN) | (tlbe->flags & E500_TLB_MAS2_ATTR);
stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
e500_shadow_mas3_attrib(gtlbe->mas7_3, writable, pr);
}
@@ -323,7 +320,7 @@ static void kvmppc_e500_setup_stlbe(
static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
- struct tlbe_ref *ref)
+ struct tlbe_priv *tlbe)
{
struct kvm_memory_slot *slot;
unsigned int psize;
@@ -455,9 +452,9 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
}
}
- kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg, writable);
+ kvmppc_e500_tlbe_setup(tlbe, gtlbe, pfn, wimg, writable);
kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
- ref, gvaddr, stlbe);
+ tlbe, gvaddr, stlbe);
writable = tlbe_is_writable(stlbe);
/* Clear i-cache for new pages */
@@ -474,17 +471,17 @@ static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
struct kvm_book3e_206_tlb_entry *stlbe)
{
struct kvm_book3e_206_tlb_entry *gtlbe;
- struct tlbe_ref *ref;
+ struct tlbe_priv *tlbe;
int stlbsel = 0;
int sesel = 0;
int r;
gtlbe = get_entry(vcpu_e500, 0, esel);
- ref = &vcpu_e500->gtlb_priv[0][esel].ref;
+ tlbe = &vcpu_e500->gtlb_priv[0][esel];
r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
- gtlbe, 0, stlbe, ref);
+ gtlbe, 0, stlbe, tlbe);
if (r)
return r;
@@ -494,7 +491,7 @@ static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
}
static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
- struct tlbe_ref *ref,
+ struct tlbe_priv *tlbe,
int esel)
{
unsigned int sesel = vcpu_e500->host_tlb1_nv++;
@@ -507,10 +504,10 @@ static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
}
- vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
+ vcpu_e500->gtlb_priv[1][esel].flags |= E500_TLB_BITMAP;
vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
- WARN_ON(!(ref->flags & E500_TLB_VALID));
+ WARN_ON(!(tlbe->flags & E500_TLB_VALID));
return sesel;
}
@@ -522,24 +519,24 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
struct kvm_book3e_206_tlb_entry *stlbe, int esel)
{
- struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
+ struct tlbe_priv *tlbe = &vcpu_e500->gtlb_priv[1][esel];
int sesel;
int r;
r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
- ref);
+ tlbe);
if (r)
return r;
/* Use TLB0 when we can only map a page with 4k */
if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
- vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
+ vcpu_e500->gtlb_priv[1][esel].flags |= E500_TLB_TLB0;
write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
return 0;
}
/* Otherwise map into TLB1 */
- sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
+ sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, tlbe, esel);
write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
return 0;
@@ -561,11 +558,11 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
/* Triggers after clear_tlb_privs or on initial mapping */
- if (!(priv->ref.flags & E500_TLB_VALID)) {
+ if (!(priv->flags & E500_TLB_VALID)) {
kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
} else {
kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
- &priv->ref, eaddr, &stlbe);
+ priv, eaddr, &stlbe);
write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
}
break;
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 9af969d2cc0c..25a99108caff 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -562,3 +562,4 @@ exc; std r10,32(3)
li r5,4096
b .Ldst_aligned
EXPORT_SYMBOL(__copy_tofrom_user)
+EXPORT_SYMBOL(__copy_tofrom_user_base)
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index 8474c682a178..17dbcfbae25f 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -5,13 +5,9 @@
*
* Author: Anton Blanchard <anton@au.ibm.com>
*/
+#include <linux/export.h>
#include <asm/ppc_asm.h>
-#ifndef SELFTEST_CASE
-/* 0 == don't use VMX, 1 == use VMX */
-#define SELFTEST_CASE 0
-#endif
-
#ifdef __BIG_ENDIAN__
#define LVS(VRT,RA,RB) lvsl VRT,RA,RB
#define VPERM(VRT,VRA,VRB,VRC) vperm VRT,VRA,VRB,VRC
@@ -47,10 +43,14 @@
ld r15,STK_REG(R15)(r1)
ld r14,STK_REG(R14)(r1)
.Ldo_err3:
- bl CFUNC(exit_vmx_usercopy)
+ ld r6,STK_REG(R31)(r1) /* original destination pointer */
+ ld r5,STK_REG(R29)(r1) /* original number of bytes */
+ subf r7,r6,r3 /* #bytes copied */
+ subf r3,r7,r5 /* #bytes not copied in r3 */
ld r0,STACKFRAMESIZE+16(r1)
mtlr r0
- b .Lexit
+ addi r1,r1,STACKFRAMESIZE
+ blr
#endif /* CONFIG_ALTIVEC */
.Ldo_err2:
@@ -74,7 +74,6 @@
_GLOBAL(__copy_tofrom_user_power7)
cmpldi r5,16
- cmpldi cr1,r5,3328
std r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
std r4,-STACKFRAMESIZE+STK_REG(R30)(r1)
@@ -82,12 +81,6 @@ _GLOBAL(__copy_tofrom_user_power7)
blt .Lshort_copy
-#ifdef CONFIG_ALTIVEC
-test_feature = SELFTEST_CASE
-BEGIN_FTR_SECTION
- bgt cr1,.Lvmx_copy
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif
.Lnonvmx_copy:
/* Get the source 8B aligned */
@@ -263,23 +256,14 @@ err1; stb r0,0(r3)
15: li r3,0
blr
-.Lunwind_stack_nonvmx_copy:
- addi r1,r1,STACKFRAMESIZE
- b .Lnonvmx_copy
-
-.Lvmx_copy:
#ifdef CONFIG_ALTIVEC
+_GLOBAL(__copy_tofrom_user_power7_vmx)
mflr r0
std r0,16(r1)
stdu r1,-STACKFRAMESIZE(r1)
- bl CFUNC(enter_vmx_usercopy)
- cmpwi cr1,r3,0
- ld r0,STACKFRAMESIZE+16(r1)
- ld r3,STK_REG(R31)(r1)
- ld r4,STK_REG(R30)(r1)
- ld r5,STK_REG(R29)(r1)
- mtlr r0
+ std r3,STK_REG(R31)(r1)
+ std r5,STK_REG(R29)(r1)
/*
* We prefetch both the source and destination using enhanced touch
* instructions. We use a stream ID of 0 for the load side and
@@ -300,8 +284,6 @@ err1; stb r0,0(r3)
DCBT_SETUP_STREAMS(r6, r7, r9, r10, r8)
- beq cr1,.Lunwind_stack_nonvmx_copy
-
/*
* If source and destination are not relatively aligned we use a
* slower permute loop.
@@ -478,7 +460,8 @@ err3; lbz r0,0(r4)
err3; stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
- b CFUNC(exit_vmx_usercopy) /* tail call optimise */
+ li r3,0
+ blr
.Lvmx_unaligned_copy:
/* Get the destination 16B aligned */
@@ -681,5 +664,7 @@ err3; lbz r0,0(r4)
err3; stb r0,0(r3)
15: addi r1,r1,STACKFRAMESIZE
- b CFUNC(exit_vmx_usercopy) /* tail call optimise */
+ li r3,0
+ blr
+EXPORT_SYMBOL(__copy_tofrom_user_power7_vmx)
#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/lib/vmx-helper.c b/arch/powerpc/lib/vmx-helper.c
index 54340912398f..554b248002b4 100644
--- a/arch/powerpc/lib/vmx-helper.c
+++ b/arch/powerpc/lib/vmx-helper.c
@@ -27,6 +27,7 @@ int enter_vmx_usercopy(void)
return 1;
}
+EXPORT_SYMBOL(enter_vmx_usercopy);
/*
* This function must return 0 because we tail call optimise when calling
@@ -49,6 +50,7 @@ int exit_vmx_usercopy(void)
set_dec(1);
return 0;
}
+EXPORT_SYMBOL(exit_vmx_usercopy);
int enter_vmx_ops(void)
{
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index a985fc96b953..b7982d0243d4 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -30,6 +30,10 @@
#include <asm/setup.h>
#include <asm/fixmap.h>
+#include <asm/fadump.h>
+#include <asm/kexec.h>
+#include <asm/kvm_ppc.h>
+
#include <mm/mmu_decl.h>
unsigned long long memory_limit __initdata;
@@ -268,6 +272,16 @@ void __init paging_init(void)
void __init arch_mm_preinit(void)
{
+
+ /*
+ * Reserve large chunks of memory for use by CMA for kdump, fadump, KVM
+ * and hugetlb. These must be called after pageblock_order is
+ * initialised.
+ */
+ fadump_cma_init();
+ kdump_cma_reserve();
+ kvm_cma_reserve();
+
/*
* book3s is limited to 16 page sizes due to encoding this in
* a 4-bit field for slices.
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 26aa26482c9a..992cc5c98214 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -103,6 +103,11 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
void
perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
{
+ perf_callchain_store(entry, perf_arch_instruction_pointer(regs));
+
+ if (!current->mm)
+ return;
+
if (!is_32bit_task())
perf_callchain_user_64(entry, regs);
else
diff --git a/arch/powerpc/perf/callchain_32.c b/arch/powerpc/perf/callchain_32.c
index ddcc2d8aa64a..0de21c5d272c 100644
--- a/arch/powerpc/perf/callchain_32.c
+++ b/arch/powerpc/perf/callchain_32.c
@@ -142,7 +142,6 @@ void perf_callchain_user_32(struct perf_callchain_entry_ctx *entry,
next_ip = perf_arch_instruction_pointer(regs);
lr = regs->link;
sp = regs->gpr[1];
- perf_callchain_store(entry, next_ip);
while (entry->nr < entry->max_stack) {
fp = (unsigned int __user *) (unsigned long) sp;
diff --git a/arch/powerpc/perf/callchain_64.c b/arch/powerpc/perf/callchain_64.c
index 115d1c105e8a..30fb61c5f0cb 100644
--- a/arch/powerpc/perf/callchain_64.c
+++ b/arch/powerpc/perf/callchain_64.c
@@ -77,7 +77,6 @@ void perf_callchain_user_64(struct perf_callchain_entry_ctx *entry,
next_ip = perf_arch_instruction_pointer(regs);
lr = regs->link;
sp = regs->gpr[1];
- perf_callchain_store(entry, next_ip);
while (entry->nr < entry->max_stack) {
fp = (unsigned long __user *) sp;
diff --git a/arch/riscv/boot/dts/microchip/mpfs.dtsi b/arch/riscv/boot/dts/microchip/mpfs.dtsi
index 5c2963e269b8..a0ffedc2d344 100644
--- a/arch/riscv/boot/dts/microchip/mpfs.dtsi
+++ b/arch/riscv/boot/dts/microchip/mpfs.dtsi
@@ -428,6 +428,7 @@
clocks = <&clkcfg CLK_CAN0>, <&clkcfg CLK_MSSPLL3>;
interrupt-parent = <&plic>;
interrupts = <56>;
+ resets = <&mss_top_sysreg CLK_CAN0>;
status = "disabled";
};
@@ -437,6 +438,7 @@
clocks = <&clkcfg CLK_CAN1>, <&clkcfg CLK_MSSPLL3>;
interrupt-parent = <&plic>;
interrupts = <57>;
+ resets = <&mss_top_sysreg CLK_CAN1>;
status = "disabled";
};
diff --git a/arch/riscv/kvm/aia.c b/arch/riscv/kvm/aia.c
index cac3c2b51d72..5ec503288555 100644
--- a/arch/riscv/kvm/aia.c
+++ b/arch/riscv/kvm/aia.c
@@ -13,6 +13,7 @@
#include <linux/irqchip/riscv-imsic.h>
#include <linux/irqdomain.h>
#include <linux/kvm_host.h>
+#include <linux/nospec.h>
#include <linux/percpu.h>
#include <linux/spinlock.h>
#include <asm/cpufeature.h>
@@ -182,9 +183,14 @@ int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
unsigned long *out_val)
{
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
+ unsigned long regs_max = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
- if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
+ if (!riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
return -ENOENT;
+ if (reg_num >= regs_max)
+ return -ENOENT;
+
+ reg_num = array_index_nospec(reg_num, regs_max);
*out_val = 0;
if (kvm_riscv_aia_available())
@@ -198,9 +204,14 @@ int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
unsigned long val)
{
struct kvm_vcpu_aia_csr *csr = &vcpu->arch.aia_context.guest_csr;
+ unsigned long regs_max = sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long);
- if (reg_num >= sizeof(struct kvm_riscv_aia_csr) / sizeof(unsigned long))
+ if (!riscv_isa_extension_available(vcpu->arch.isa, SSAIA))
return -ENOENT;
+ if (reg_num >= regs_max)
+ return -ENOENT;
+
+ reg_num = array_index_nospec(reg_num, regs_max);
if (kvm_riscv_aia_available()) {
((unsigned long *)csr)[reg_num] = val;
diff --git a/arch/riscv/kvm/aia_aplic.c b/arch/riscv/kvm/aia_aplic.c
index d1e50bf5c351..3464f3351df7 100644
--- a/arch/riscv/kvm/aia_aplic.c
+++ b/arch/riscv/kvm/aia_aplic.c
@@ -10,6 +10,7 @@
#include <linux/irqchip/riscv-aplic.h>
#include <linux/kvm_host.h>
#include <linux/math.h>
+#include <linux/nospec.h>
#include <linux/spinlock.h>
#include <linux/swab.h>
#include <kvm/iodev.h>
@@ -45,7 +46,7 @@ static u32 aplic_read_sourcecfg(struct aplic *aplic, u32 irq)
if (!irq || aplic->nr_irqs <= irq)
return 0;
- irqd = &aplic->irqs[irq];
+ irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
raw_spin_lock_irqsave(&irqd->lock, flags);
ret = irqd->sourcecfg;
@@ -61,7 +62,7 @@ static void aplic_write_sourcecfg(struct aplic *aplic, u32 irq, u32 val)
if (!irq || aplic->nr_irqs <= irq)
return;
- irqd = &aplic->irqs[irq];
+ irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
if (val & APLIC_SOURCECFG_D)
val = 0;
@@ -81,7 +82,7 @@ static u32 aplic_read_target(struct aplic *aplic, u32 irq)
if (!irq || aplic->nr_irqs <= irq)
return 0;
- irqd = &aplic->irqs[irq];
+ irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
raw_spin_lock_irqsave(&irqd->lock, flags);
ret = irqd->target;
@@ -97,7 +98,7 @@ static void aplic_write_target(struct aplic *aplic, u32 irq, u32 val)
if (!irq || aplic->nr_irqs <= irq)
return;
- irqd = &aplic->irqs[irq];
+ irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
val &= APLIC_TARGET_EIID_MASK |
(APLIC_TARGET_HART_IDX_MASK << APLIC_TARGET_HART_IDX_SHIFT) |
@@ -116,7 +117,7 @@ static bool aplic_read_pending(struct aplic *aplic, u32 irq)
if (!irq || aplic->nr_irqs <= irq)
return false;
- irqd = &aplic->irqs[irq];
+ irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
raw_spin_lock_irqsave(&irqd->lock, flags);
ret = (irqd->state & APLIC_IRQ_STATE_PENDING) ? true : false;
@@ -132,7 +133,7 @@ static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending)
if (!irq || aplic->nr_irqs <= irq)
return;
- irqd = &aplic->irqs[irq];
+ irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
raw_spin_lock_irqsave(&irqd->lock, flags);
@@ -170,7 +171,7 @@ static bool aplic_read_enabled(struct aplic *aplic, u32 irq)
if (!irq || aplic->nr_irqs <= irq)
return false;
- irqd = &aplic->irqs[irq];
+ irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
raw_spin_lock_irqsave(&irqd->lock, flags);
ret = (irqd->state & APLIC_IRQ_STATE_ENABLED) ? true : false;
@@ -186,7 +187,7 @@ static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled)
if (!irq || aplic->nr_irqs <= irq)
return;
- irqd = &aplic->irqs[irq];
+ irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
raw_spin_lock_irqsave(&irqd->lock, flags);
if (enabled)
@@ -205,7 +206,7 @@ static bool aplic_read_input(struct aplic *aplic, u32 irq)
if (!irq || aplic->nr_irqs <= irq)
return false;
- irqd = &aplic->irqs[irq];
+ irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
raw_spin_lock_irqsave(&irqd->lock, flags);
@@ -254,7 +255,7 @@ static void aplic_update_irq_range(struct kvm *kvm, u32 first, u32 last)
for (irq = first; irq <= last; irq++) {
if (!irq || aplic->nr_irqs <= irq)
continue;
- irqd = &aplic->irqs[irq];
+ irqd = &aplic->irqs[array_index_nospec(irq, aplic->nr_irqs)];
raw_spin_lock_irqsave(&irqd->lock, flags);
@@ -283,7 +284,7 @@ int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level)
if (!aplic || !source || (aplic->nr_irqs <= source))
return -ENODEV;
- irqd = &aplic->irqs[source];
+ irqd = &aplic->irqs[array_index_nospec(source, aplic->nr_irqs)];
ie = (aplic->domaincfg & APLIC_DOMAINCFG_IE) ? true : false;
raw_spin_lock_irqsave(&irqd->lock, flags);
diff --git a/arch/riscv/kvm/aia_device.c b/arch/riscv/kvm/aia_device.c
index b195a93add1c..49c71d3cdb00 100644
--- a/arch/riscv/kvm/aia_device.c
+++ b/arch/riscv/kvm/aia_device.c
@@ -11,6 +11,7 @@
#include <linux/irqchip/riscv-imsic.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
+#include <linux/cpufeature.h>
static int aia_create(struct kvm_device *dev, u32 type)
{
@@ -22,6 +23,9 @@ static int aia_create(struct kvm_device *dev, u32 type)
if (irqchip_in_kernel(kvm))
return -EEXIST;
+ if (!riscv_isa_extension_available(NULL, SSAIA))
+ return -ENODEV;
+
ret = -EBUSY;
if (kvm_trylock_all_vcpus(kvm))
return ret;
@@ -437,7 +441,7 @@ static int aia_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
{
- int nr_vcpus;
+ int nr_vcpus, r = -ENXIO;
switch (attr->group) {
case KVM_DEV_RISCV_AIA_GRP_CONFIG:
@@ -466,12 +470,18 @@ static int aia_has_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
}
break;
case KVM_DEV_RISCV_AIA_GRP_APLIC:
- return kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr);
+ mutex_lock(&dev->kvm->lock);
+ r = kvm_riscv_aia_aplic_has_attr(dev->kvm, attr->attr);
+ mutex_unlock(&dev->kvm->lock);
+ break;
case KVM_DEV_RISCV_AIA_GRP_IMSIC:
- return kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr);
+ mutex_lock(&dev->kvm->lock);
+ r = kvm_riscv_aia_imsic_has_attr(dev->kvm, attr->attr);
+ mutex_unlock(&dev->kvm->lock);
+ break;
}
- return -ENXIO;
+ return r;
}
struct kvm_device_ops kvm_riscv_aia_device_ops = {
diff --git a/arch/riscv/kvm/aia_imsic.c b/arch/riscv/kvm/aia_imsic.c
index 06752fa24798..8786f52cf65a 100644
--- a/arch/riscv/kvm/aia_imsic.c
+++ b/arch/riscv/kvm/aia_imsic.c
@@ -908,6 +908,10 @@ int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel,
int r, rc = KVM_INSN_CONTINUE_NEXT_SEPC;
struct imsic *imsic = vcpu->arch.aia_context.imsic_state;
+ /* If IMSIC vCPU state not initialized then forward to user space */
+ if (!imsic)
+ return KVM_INSN_EXIT_TO_USER_SPACE;
+
if (isel == KVM_RISCV_AIA_IMSIC_TOPEI) {
/* Read pending and enabled interrupt with highest priority */
topei = imsic_mrif_topei(imsic->swfile, imsic->nr_eix,
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 0b75eb2a1820..088d33ba90ed 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -245,6 +245,7 @@ out:
bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
{
struct kvm_gstage gstage;
+ bool mmu_locked;
if (!kvm->arch.pgd)
return false;
@@ -253,9 +254,12 @@ bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
gstage.flags = 0;
gstage.vmid = READ_ONCE(kvm->arch.vmid.vmid);
gstage.pgd = kvm->arch.pgd;
+ mmu_locked = spin_trylock(&kvm->mmu_lock);
kvm_riscv_gstage_unmap_range(&gstage, range->start << PAGE_SHIFT,
(range->end - range->start) << PAGE_SHIFT,
range->may_block);
+ if (mmu_locked)
+ spin_unlock(&kvm->mmu_lock);
return false;
}
@@ -535,7 +539,7 @@ int kvm_riscv_mmu_map(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
goto out_unlock;
/* Check if we are backed by a THP and thus use block mapping if possible */
- if (vma_pagesize == PAGE_SIZE)
+ if (!logging && (vma_pagesize == PAGE_SIZE))
vma_pagesize = transparent_hugepage_adjust(kvm, memslot, hva, &hfn, &gpa);
if (writable) {
diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c
index a55a95da54d0..fdd99ac1e714 100644
--- a/arch/riscv/kvm/vcpu.c
+++ b/arch/riscv/kvm/vcpu.c
@@ -24,7 +24,7 @@
#define CREATE_TRACE_POINTS
#include "trace.h"
-const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+const struct kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
STATS_DESC_COUNTER(VCPU, ecall_exit_stat),
STATS_DESC_COUNTER(VCPU, wfi_exit_stat),
diff --git a/arch/riscv/kvm/vcpu_fp.c b/arch/riscv/kvm/vcpu_fp.c
index 030904d82b58..bd5a9e7e7165 100644
--- a/arch/riscv/kvm/vcpu_fp.c
+++ b/arch/riscv/kvm/vcpu_fp.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
+#include <linux/nospec.h>
#include <linux/uaccess.h>
#include <asm/cpufeature.h>
@@ -93,9 +94,11 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
reg_val = &cntx->fp.f.fcsr;
else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
- reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
+ reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) {
+ reg_num = array_index_nospec(reg_num,
+ ARRAY_SIZE(cntx->fp.f.f));
reg_val = &cntx->fp.f.f[reg_num];
- else
+ } else
return -ENOENT;
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
riscv_isa_extension_available(vcpu->arch.isa, d)) {
@@ -107,6 +110,8 @@ int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
return -EINVAL;
+ reg_num = array_index_nospec(reg_num,
+ ARRAY_SIZE(cntx->fp.d.f));
reg_val = &cntx->fp.d.f[reg_num];
} else
return -ENOENT;
@@ -138,9 +143,11 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr))
reg_val = &cntx->fp.f.fcsr;
else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) &&
- reg_num <= KVM_REG_RISCV_FP_F_REG(f[31]))
+ reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) {
+ reg_num = array_index_nospec(reg_num,
+ ARRAY_SIZE(cntx->fp.f.f));
reg_val = &cntx->fp.f.f[reg_num];
- else
+ } else
return -ENOENT;
} else if ((rtype == KVM_REG_RISCV_FP_D) &&
riscv_isa_extension_available(vcpu->arch.isa, d)) {
@@ -152,6 +159,8 @@ int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) {
if (KVM_REG_SIZE(reg->id) != sizeof(u64))
return -EINVAL;
+ reg_num = array_index_nospec(reg_num,
+ ARRAY_SIZE(cntx->fp.d.f));
reg_val = &cntx->fp.d.f[reg_num];
} else
return -ENOENT;
diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c
index e7ab6cb00646..45ecc0082e90 100644
--- a/arch/riscv/kvm/vcpu_onereg.c
+++ b/arch/riscv/kvm/vcpu_onereg.c
@@ -10,6 +10,7 @@
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/nospec.h>
#include <linux/uaccess.h>
#include <linux/kvm_host.h>
#include <asm/cacheflush.h>
@@ -127,6 +128,7 @@ static int kvm_riscv_vcpu_isa_check_host(unsigned long kvm_ext, unsigned long *g
kvm_ext >= ARRAY_SIZE(kvm_isa_ext_arr))
return -ENOENT;
+ kvm_ext = array_index_nospec(kvm_ext, ARRAY_SIZE(kvm_isa_ext_arr));
*guest_ext = kvm_isa_ext_arr[kvm_ext];
switch (*guest_ext) {
case RISCV_ISA_EXT_SMNPM:
@@ -443,13 +445,16 @@ static int kvm_riscv_vcpu_get_reg_core(struct kvm_vcpu *vcpu,
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CORE);
+ unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
unsigned long reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
- if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
+ if (reg_num >= regs_max)
return -ENOENT;
+ reg_num = array_index_nospec(reg_num, regs_max);
+
if (reg_num == KVM_REG_RISCV_CORE_REG(regs.pc))
reg_val = cntx->sepc;
else if (KVM_REG_RISCV_CORE_REG(regs.pc) < reg_num &&
@@ -476,13 +481,16 @@ static int kvm_riscv_vcpu_set_reg_core(struct kvm_vcpu *vcpu,
unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK |
KVM_REG_SIZE_MASK |
KVM_REG_RISCV_CORE);
+ unsigned long regs_max = sizeof(struct kvm_riscv_core) / sizeof(unsigned long);
unsigned long reg_val;
if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long))
return -EINVAL;
- if (reg_num >= sizeof(struct kvm_riscv_core) / sizeof(unsigned long))
+ if (reg_num >= regs_max)
return -ENOENT;
+ reg_num = array_index_nospec(reg_num, regs_max);
+
if (copy_from_user(&reg_val, uaddr, KVM_REG_SIZE(reg->id)))
return -EFAULT;
@@ -507,10 +515,13 @@ static int kvm_riscv_vcpu_general_get_csr(struct kvm_vcpu *vcpu,
unsigned long *out_val)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+ unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
- if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
+ if (reg_num >= regs_max)
return -ENOENT;
+ reg_num = array_index_nospec(reg_num, regs_max);
+
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
kvm_riscv_vcpu_flush_interrupts(vcpu);
*out_val = (csr->hvip >> VSIP_TO_HVIP_SHIFT) & VSIP_VALID_MASK;
@@ -526,10 +537,13 @@ static int kvm_riscv_vcpu_general_set_csr(struct kvm_vcpu *vcpu,
unsigned long reg_val)
{
struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr;
+ unsigned long regs_max = sizeof(struct kvm_riscv_csr) / sizeof(unsigned long);
- if (reg_num >= sizeof(struct kvm_riscv_csr) / sizeof(unsigned long))
+ if (reg_num >= regs_max)
return -ENOENT;
+ reg_num = array_index_nospec(reg_num, regs_max);
+
if (reg_num == KVM_REG_RISCV_CSR_REG(sip)) {
reg_val &= VSIP_VALID_MASK;
reg_val <<= VSIP_TO_HVIP_SHIFT;
@@ -548,10 +562,15 @@ static inline int kvm_riscv_vcpu_smstateen_set_csr(struct kvm_vcpu *vcpu,
unsigned long reg_val)
{
struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
+ unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) /
+ sizeof(unsigned long);
- if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
- sizeof(unsigned long))
- return -EINVAL;
+ if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
+ return -ENOENT;
+ if (reg_num >= regs_max)
+ return -ENOENT;
+
+ reg_num = array_index_nospec(reg_num, regs_max);
((unsigned long *)csr)[reg_num] = reg_val;
return 0;
@@ -562,10 +581,15 @@ static int kvm_riscv_vcpu_smstateen_get_csr(struct kvm_vcpu *vcpu,
unsigned long *out_val)
{
struct kvm_vcpu_smstateen_csr *csr = &vcpu->arch.smstateen_csr;
+ unsigned long regs_max = sizeof(struct kvm_riscv_smstateen_csr) /
+ sizeof(unsigned long);
- if (reg_num >= sizeof(struct kvm_riscv_smstateen_csr) /
- sizeof(unsigned long))
- return -EINVAL;
+ if (!riscv_isa_extension_available(vcpu->arch.isa, SMSTATEEN))
+ return -ENOENT;
+ if (reg_num >= regs_max)
+ return -ENOENT;
+
+ reg_num = array_index_nospec(reg_num, regs_max);
*out_val = ((unsigned long *)csr)[reg_num];
return 0;
@@ -595,10 +619,7 @@ static int kvm_riscv_vcpu_get_reg_csr(struct kvm_vcpu *vcpu,
rc = kvm_riscv_vcpu_aia_get_csr(vcpu, reg_num, &reg_val);
break;
case KVM_REG_RISCV_CSR_SMSTATEEN:
- rc = -EINVAL;
- if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
- rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num,
- &reg_val);
+ rc = kvm_riscv_vcpu_smstateen_get_csr(vcpu, reg_num, &reg_val);
break;
default:
rc = -ENOENT;
@@ -640,10 +661,7 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu,
rc = kvm_riscv_vcpu_aia_set_csr(vcpu, reg_num, reg_val);
break;
case KVM_REG_RISCV_CSR_SMSTATEEN:
- rc = -EINVAL;
- if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN))
- rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num,
- reg_val);
+ rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, reg_val);
break;
default:
rc = -ENOENT;
diff --git a/arch/riscv/kvm/vcpu_pmu.c b/arch/riscv/kvm/vcpu_pmu.c
index 4d8d5e9aa53d..e873430e596b 100644
--- a/arch/riscv/kvm/vcpu_pmu.c
+++ b/arch/riscv/kvm/vcpu_pmu.c
@@ -10,6 +10,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kvm_host.h>
+#include <linux/nospec.h>
#include <linux/perf/riscv_pmu.h>
#include <asm/csr.h>
#include <asm/kvm_vcpu_sbi.h>
@@ -87,7 +88,8 @@ static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc)
static u64 kvm_pmu_get_perf_event_hw_config(u32 sbi_event_code)
{
- return hw_event_perf_map[sbi_event_code];
+ return hw_event_perf_map[array_index_nospec(sbi_event_code,
+ SBI_PMU_HW_GENERAL_MAX)];
}
static u64 kvm_pmu_get_perf_event_cache_config(u32 sbi_event_code)
@@ -218,6 +220,7 @@ static int pmu_fw_ctr_read_hi(struct kvm_vcpu *vcpu, unsigned long cidx,
return -EINVAL;
}
+ cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS);
pmc = &kvpmu->pmc[cidx];
if (pmc->cinfo.type != SBI_PMU_CTR_TYPE_FW)
@@ -244,6 +247,7 @@ static int pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
return -EINVAL;
}
+ cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS);
pmc = &kvpmu->pmc[cidx];
if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) {
@@ -520,11 +524,12 @@ int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
{
struct kvm_pmu *kvpmu = vcpu_to_pmu(vcpu);
- if (cidx > RISCV_KVM_MAX_COUNTERS || cidx == 1) {
+ if (cidx >= RISCV_KVM_MAX_COUNTERS || cidx == 1) {
retdata->err_val = SBI_ERR_INVALID_PARAM;
return 0;
}
+ cidx = array_index_nospec(cidx, RISCV_KVM_MAX_COUNTERS);
retdata->out_val = kvpmu->pmc[cidx].cinfo.value;
return 0;
@@ -559,7 +564,8 @@ int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
}
/* Start the counters that have been configured and requested by the guest */
for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) {
- pmc_index = i + ctr_base;
+ pmc_index = array_index_nospec(i + ctr_base,
+ RISCV_KVM_MAX_COUNTERS);
if (!test_bit(pmc_index, kvpmu->pmc_in_use))
continue;
/* The guest started the counter again. Reset the overflow status */
@@ -630,7 +636,8 @@ int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
/* Stop the counters that have been configured and requested by the guest */
for_each_set_bit(i, &ctr_mask, RISCV_MAX_COUNTERS) {
- pmc_index = i + ctr_base;
+ pmc_index = array_index_nospec(i + ctr_base,
+ RISCV_KVM_MAX_COUNTERS);
if (!test_bit(pmc_index, kvpmu->pmc_in_use))
continue;
pmc = &kvpmu->pmc[pmc_index];
@@ -761,6 +768,7 @@ int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_ba
}
}
+ ctr_idx = array_index_nospec(ctr_idx, RISCV_KVM_MAX_COUNTERS);
pmc = &kvpmu->pmc[ctr_idx];
pmc->idx = ctr_idx;
diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c
index 58bce57dc55b..13c63ae1a78b 100644
--- a/arch/riscv/kvm/vm.c
+++ b/arch/riscv/kvm/vm.c
@@ -13,7 +13,7 @@
#include <linux/kvm_host.h>
#include <asm/kvm_mmu.h>
-const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+const struct kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS()
};
static_assert(ARRAY_SIZE(kvm_vm_stats_desc) ==
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 7fdf960191d3..d10a17e6531d 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -147,10 +147,8 @@ void noinstr do_io_irq(struct pt_regs *regs)
bool from_idle;
from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
- if (from_idle) {
+ if (from_idle)
update_timer_idle();
- regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
- }
irq_enter_rcu();
@@ -176,6 +174,9 @@ void noinstr do_io_irq(struct pt_regs *regs)
set_irq_regs(old_regs);
irqentry_exit(regs, state);
+
+ if (from_idle)
+ regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
}
void noinstr do_ext_irq(struct pt_regs *regs)
@@ -185,10 +186,8 @@ void noinstr do_ext_irq(struct pt_regs *regs)
bool from_idle;
from_idle = test_and_clear_cpu_flag(CIF_ENABLED_WAIT);
- if (from_idle) {
+ if (from_idle)
update_timer_idle();
- regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
- }
irq_enter_rcu();
@@ -210,6 +209,9 @@ void noinstr do_ext_irq(struct pt_regs *regs)
irq_exit_rcu();
set_irq_regs(old_regs);
irqentry_exit(regs, state);
+
+ if (from_idle)
+ regs->psw.mask &= ~(PSW_MASK_EXT | PSW_MASK_IO | PSW_MASK_WAIT);
}
static void show_msi_interrupt(struct seq_file *p, int irq)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index bc7d6fa66eaf..3eb60aa932ec 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -65,7 +65,7 @@
#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
(KVM_MAX_VCPUS + LOCAL_IRQS))
-const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+const struct kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS(),
STATS_DESC_COUNTER(VM, inject_io),
STATS_DESC_COUNTER(VM, inject_float_mchk),
@@ -91,7 +91,7 @@ const struct kvm_stats_header kvm_vm_stats_header = {
sizeof(kvm_vm_stats_desc),
};
-const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+const struct kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
STATS_DESC_COUNTER(VCPU, exit_userspace),
STATS_DESC_COUNTER(VCPU, exit_null),
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ff07c45e3c73..6e4e3ef9b8c7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -2485,7 +2485,8 @@ int memslot_rmap_alloc(struct kvm_memory_slot *slot, unsigned long npages);
KVM_X86_QUIRK_MWAIT_NEVER_UD_FAULTS | \
KVM_X86_QUIRK_SLOT_ZAP_ALL | \
KVM_X86_QUIRK_STUFF_FEATURE_MSRS | \
- KVM_X86_QUIRK_IGNORE_GUEST_PAT)
+ KVM_X86_QUIRK_IGNORE_GUEST_PAT | \
+ KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM)
#define KVM_X86_CONDITIONAL_QUIRKS \
(KVM_X86_QUIRK_CD_NW_CLEARED | \
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 846a63215ce1..0d4538fa6c31 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -476,6 +476,7 @@ struct kvm_sync_regs {
#define KVM_X86_QUIRK_SLOT_ZAP_ALL (1 << 7)
#define KVM_X86_QUIRK_STUFF_FEATURE_MSRS (1 << 8)
#define KVM_X86_QUIRK_IGNORE_GUEST_PAT (1 << 9)
+#define KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM (1 << 10)
#define KVM_STATE_NESTED_FORMAT_VMX 0
#define KVM_STATE_NESTED_FORMAT_SVM 1
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index d93f87f29d03..961714e6adae 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1894,6 +1894,7 @@ void __init check_x2apic(void)
static inline void try_to_enable_x2apic(int remap_mode) { }
static inline void __x2apic_enable(void) { }
+static inline void __x2apic_disable(void) { }
#endif /* !CONFIG_X86_X2APIC */
void __init enable_IR_x2apic(void)
@@ -2456,6 +2457,11 @@ static void lapic_resume(void *data)
if (x2apic_mode) {
__x2apic_enable();
} else {
+ if (x2apic_enabled()) {
+ pr_warn_once("x2apic: re-enabled by firmware during resume. Disabling\n");
+ __x2apic_disable();
+ }
+
/*
* Make sure the APICBASE points to the right address
*
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index d2486506a808..8137927e7387 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -776,7 +776,10 @@ do { \
#define SYNTHESIZED_F(name) \
({ \
kvm_cpu_cap_synthesized |= feature_bit(name); \
- F(name); \
+ \
+ BUILD_BUG_ON(X86_FEATURE_##name >= MAX_CPU_FEATURES); \
+ if (boot_cpu_has(X86_FEATURE_##name)) \
+ F(name); \
})
/*
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index 30202942289a..9b140bbdc1d8 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1981,16 +1981,17 @@ int kvm_hv_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
if (entries[i] == KVM_HV_TLB_FLUSHALL_ENTRY)
goto out_flush_all;
- if (is_noncanonical_invlpg_address(entries[i], vcpu))
- continue;
-
/*
* Lower 12 bits of 'address' encode the number of additional
* pages to flush.
*/
gva = entries[i] & PAGE_MASK;
- for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++)
+ for (j = 0; j < (entries[i] & ~PAGE_MASK) + 1; j++) {
+ if (is_noncanonical_invlpg_address(gva + j * PAGE_SIZE, vcpu))
+ continue;
+
kvm_x86_call(flush_tlb_gva)(vcpu, gva + j * PAGE_SIZE);
+ }
++vcpu->stat.tlb_flush;
}
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index bb257793b6cb..eed96ff6e722 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -321,7 +321,8 @@ void kvm_fire_mask_notifiers(struct kvm *kvm, unsigned irqchip, unsigned pin,
idx = srcu_read_lock(&kvm->irq_srcu);
gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
if (gsi != -1)
- hlist_for_each_entry_rcu(kimn, &ioapic->mask_notifier_list, link)
+ hlist_for_each_entry_srcu(kimn, &ioapic->mask_notifier_list, link,
+ srcu_read_lock_held(&kvm->irq_srcu))
if (kimn->irq == gsi)
kimn->func(kimn, mask);
srcu_read_unlock(&kvm->irq_srcu, idx);
diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c
index f92214b1a938..f7ec7914e3c4 100644
--- a/arch/x86/kvm/svm/avic.c
+++ b/arch/x86/kvm/svm/avic.c
@@ -189,12 +189,12 @@ static void avic_activate_vmcb(struct vcpu_svm *svm)
struct kvm_vcpu *vcpu = &svm->vcpu;
vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK);
-
vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK;
vmcb->control.avic_physical_id |= avic_get_max_physical_id(vcpu);
-
vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
+ svm_clr_intercept(svm, INTERCEPT_CR8_WRITE);
+
/*
* Note: KVM supports hybrid-AVIC mode, where KVM emulates x2APIC MSR
* accesses, while interrupt injection to a running vCPU can be
@@ -226,6 +226,9 @@ static void avic_deactivate_vmcb(struct vcpu_svm *svm)
vmcb->control.int_ctl &= ~(AVIC_ENABLE_MASK | X2APIC_MODE_MASK);
vmcb->control.avic_physical_id &= ~AVIC_PHYSICAL_MAX_INDEX_MASK;
+ if (!sev_es_guest(svm->vcpu.kvm))
+ svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
+
/*
* If running nested and the guest uses its own MSR bitmap, there
* is no need to update L0's msr bitmap
@@ -368,7 +371,7 @@ void avic_init_vmcb(struct vcpu_svm *svm, struct vmcb *vmcb)
vmcb->control.avic_physical_id = __sme_set(__pa(kvm_svm->avic_physical_id_table));
vmcb->control.avic_vapic_bar = APIC_DEFAULT_PHYS_BASE;
- if (kvm_apicv_activated(svm->vcpu.kvm))
+ if (kvm_vcpu_apicv_active(&svm->vcpu))
avic_activate_vmcb(svm);
else
avic_deactivate_vmcb(svm);
diff --git a/arch/x86/kvm/svm/nested.c b/arch/x86/kvm/svm/nested.c
index 53ab6ce3cc26..b36c33255bed 100644
--- a/arch/x86/kvm/svm/nested.c
+++ b/arch/x86/kvm/svm/nested.c
@@ -418,6 +418,15 @@ static bool nested_vmcb_check_controls(struct kvm_vcpu *vcpu)
return __nested_vmcb_check_controls(vcpu, ctl);
}
+int nested_svm_check_cached_vmcb12(struct kvm_vcpu *vcpu)
+{
+ if (!nested_vmcb_check_save(vcpu) ||
+ !nested_vmcb_check_controls(vcpu))
+ return -EINVAL;
+
+ return 0;
+}
+
/*
* If a feature is not advertised to L1, clear the corresponding vmcb12
* intercept.
@@ -1028,8 +1037,7 @@ int nested_svm_vmrun(struct kvm_vcpu *vcpu)
nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
- if (!nested_vmcb_check_save(vcpu) ||
- !nested_vmcb_check_controls(vcpu)) {
+ if (nested_svm_check_cached_vmcb12(vcpu) < 0) {
vmcb12->control.exit_code = SVM_EXIT_ERR;
vmcb12->control.exit_info_1 = 0;
vmcb12->control.exit_info_2 = 0;
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 8f8bc863e214..e6477affac9a 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1077,8 +1077,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu, bool init_event)
svm_set_intercept(svm, INTERCEPT_CR0_WRITE);
svm_set_intercept(svm, INTERCEPT_CR3_WRITE);
svm_set_intercept(svm, INTERCEPT_CR4_WRITE);
- if (!kvm_vcpu_apicv_active(vcpu))
- svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
+ svm_set_intercept(svm, INTERCEPT_CR8_WRITE);
set_dr_intercepts(svm);
@@ -1189,7 +1188,7 @@ static void init_vmcb(struct kvm_vcpu *vcpu, bool init_event)
if (guest_cpu_cap_has(vcpu, X86_FEATURE_ERAPS))
svm->vmcb->control.erap_ctl |= ERAP_CONTROL_ALLOW_LARGER_RAP;
- if (kvm_vcpu_apicv_active(vcpu))
+ if (enable_apicv && irqchip_in_kernel(vcpu->kvm))
avic_init_vmcb(svm, vmcb);
if (vnmi)
@@ -2674,9 +2673,11 @@ static int dr_interception(struct kvm_vcpu *vcpu)
static int cr8_write_interception(struct kvm_vcpu *vcpu)
{
+ u8 cr8_prev = kvm_get_cr8(vcpu);
int r;
- u8 cr8_prev = kvm_get_cr8(vcpu);
+ WARN_ON_ONCE(kvm_vcpu_apicv_active(vcpu));
+
/* instruction emulation calls kvm_set_cr8() */
r = cr_interception(vcpu);
if (lapic_in_kernel(vcpu))
@@ -4879,11 +4880,15 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
vmcb12 = map.hva;
nested_copy_vmcb_control_to_cache(svm, &vmcb12->control);
nested_copy_vmcb_save_to_cache(svm, &vmcb12->save);
- ret = enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa, vmcb12, false);
- if (ret)
+ if (nested_svm_check_cached_vmcb12(vcpu) < 0)
+ goto unmap_save;
+
+ if (enter_svm_guest_mode(vcpu, smram64->svm_guest_vmcb_gpa,
+ vmcb12, false) != 0)
goto unmap_save;
+ ret = 0;
svm->nested.nested_run_pending = 1;
unmap_save:
diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index ebd7b36b1ceb..6942e6b0eda6 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -797,6 +797,7 @@ static inline int nested_svm_simple_vmexit(struct vcpu_svm *svm, u32 exit_code)
int nested_svm_exit_handled(struct vcpu_svm *svm);
int nested_svm_check_permissions(struct kvm_vcpu *vcpu);
+int nested_svm_check_cached_vmcb12(struct kvm_vcpu *vcpu);
int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
bool has_error_code, u32 error_code);
int nested_svm_exit_special(struct vcpu_svm *svm);
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 248635da6766..937aeb474af7 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -3300,10 +3300,24 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu,
if (CC(vmcs12->guest_cr4 & X86_CR4_CET && !(vmcs12->guest_cr0 & X86_CR0_WP)))
return -EINVAL;
- if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) &&
- (CC(!kvm_dr7_valid(vmcs12->guest_dr7)) ||
- CC(!vmx_is_valid_debugctl(vcpu, vmcs12->guest_ia32_debugctl, false))))
- return -EINVAL;
+ if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_DEBUG_CONTROLS) {
+ u64 debugctl = vmcs12->guest_ia32_debugctl;
+
+ /*
+ * FREEZE_IN_SMM is not virtualized, but allow L1 to set it in
+ * vmcs12's DEBUGCTL under a quirk for backwards compatibility.
+ * Note that the quirk only relaxes the consistency check. The
+ * vmcc02 bit is still under the control of the host. In
+ * particular, if a host administrator decides to clear the bit,
+ * then L1 has no say in the matter.
+ */
+ if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_VMCS12_ALLOW_FREEZE_IN_SMM))
+ debugctl &= ~DEBUGCTLMSR_FREEZE_IN_SMM;
+
+ if (CC(!kvm_dr7_valid(vmcs12->guest_dr7)) ||
+ CC(!vmx_is_valid_debugctl(vcpu, debugctl, false)))
+ return -EINVAL;
+ }
if ((vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_PAT) &&
CC(!kvm_pat_valid(vmcs12->guest_ia32_pat)))
@@ -6842,13 +6856,34 @@ void vmx_leave_nested(struct kvm_vcpu *vcpu)
free_nested(vcpu);
}
+int nested_vmx_check_restored_vmcs12(struct kvm_vcpu *vcpu)
+{
+ enum vm_entry_failure_code ignored;
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+
+ if (nested_cpu_has_shadow_vmcs(vmcs12) &&
+ vmcs12->vmcs_link_pointer != INVALID_GPA) {
+ struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
+
+ if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
+ !shadow_vmcs12->hdr.shadow_vmcs)
+ return -EINVAL;
+ }
+
+ if (nested_vmx_check_controls(vcpu, vmcs12) ||
+ nested_vmx_check_host_state(vcpu, vmcs12) ||
+ nested_vmx_check_guest_state(vcpu, vmcs12, &ignored))
+ return -EINVAL;
+
+ return 0;
+}
+
static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
struct kvm_nested_state __user *user_kvm_nested_state,
struct kvm_nested_state *kvm_state)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12;
- enum vm_entry_failure_code ignored;
struct kvm_vmx_nested_state_data __user *user_vmx_nested_state =
&user_kvm_nested_state->data.vmx[0];
int ret;
@@ -6979,25 +7014,20 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
vmx->nested.mtf_pending =
!!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING);
- ret = -EINVAL;
if (nested_cpu_has_shadow_vmcs(vmcs12) &&
vmcs12->vmcs_link_pointer != INVALID_GPA) {
struct vmcs12 *shadow_vmcs12 = get_shadow_vmcs12(vcpu);
+ ret = -EINVAL;
if (kvm_state->size <
sizeof(*kvm_state) +
sizeof(user_vmx_nested_state->vmcs12) + sizeof(*shadow_vmcs12))
goto error_guest_mode;
+ ret = -EFAULT;
if (copy_from_user(shadow_vmcs12,
user_vmx_nested_state->shadow_vmcs12,
- sizeof(*shadow_vmcs12))) {
- ret = -EFAULT;
- goto error_guest_mode;
- }
-
- if (shadow_vmcs12->hdr.revision_id != VMCS12_REVISION ||
- !shadow_vmcs12->hdr.shadow_vmcs)
+ sizeof(*shadow_vmcs12)))
goto error_guest_mode;
}
@@ -7008,9 +7038,8 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
kvm_state->hdr.vmx.preemption_timer_deadline;
}
- if (nested_vmx_check_controls(vcpu, vmcs12) ||
- nested_vmx_check_host_state(vcpu, vmcs12) ||
- nested_vmx_check_guest_state(vcpu, vmcs12, &ignored))
+ ret = nested_vmx_check_restored_vmcs12(vcpu);
+ if (ret < 0)
goto error_guest_mode;
vmx->nested.dirty_vmcs12 = true;
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index b844c5d59025..213a448104af 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -22,6 +22,7 @@ void nested_vmx_setup_ctls_msrs(struct vmcs_config *vmcs_conf, u32 ept_caps);
void nested_vmx_hardware_unsetup(void);
__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
void nested_vmx_set_vmcs_shadowing_bitmap(void);
+int nested_vmx_check_restored_vmcs12(struct kvm_vcpu *vcpu);
void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu);
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
bool from_vmentry);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 967b58a8ab9d..8b24e682535b 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1149,7 +1149,7 @@ static void add_atomic_switch_msr(struct vcpu_vmx *vmx, unsigned msr,
}
vmx_add_auto_msr(&m->guest, msr, guest_val, VM_ENTRY_MSR_LOAD_COUNT, kvm);
- vmx_add_auto_msr(&m->guest, msr, host_val, VM_EXIT_MSR_LOAD_COUNT, kvm);
+ vmx_add_auto_msr(&m->host, msr, host_val, VM_EXIT_MSR_LOAD_COUNT, kvm);
}
static bool update_transition_efer(struct vcpu_vmx *vmx)
@@ -8528,9 +8528,13 @@ int vmx_leave_smm(struct kvm_vcpu *vcpu, const union kvm_smram *smram)
}
if (vmx->nested.smm.guest_mode) {
+ /* Triple fault if the state is invalid. */
+ if (nested_vmx_check_restored_vmcs12(vcpu) < 0)
+ return 1;
+
ret = nested_vmx_enter_non_root_mode(vcpu, false);
- if (ret)
- return ret;
+ if (ret != NVMX_VMENTRY_SUCCESS)
+ return 1;
vmx->nested.nested_run_pending = 1;
vmx->nested.smm.guest_mode = false;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a03530795707..fd1c4a36b593 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -243,7 +243,7 @@ EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_ipiv);
bool __read_mostly enable_device_posted_irqs = true;
EXPORT_SYMBOL_FOR_KVM_INTERNAL(enable_device_posted_irqs);
-const struct _kvm_stats_desc kvm_vm_stats_desc[] = {
+const struct kvm_stats_desc kvm_vm_stats_desc[] = {
KVM_GENERIC_VM_STATS(),
STATS_DESC_COUNTER(VM, mmu_shadow_zapped),
STATS_DESC_COUNTER(VM, mmu_pte_write),
@@ -269,7 +269,7 @@ const struct kvm_stats_header kvm_vm_stats_header = {
sizeof(kvm_vm_stats_desc),
};
-const struct _kvm_stats_desc kvm_vcpu_stats_desc[] = {
+const struct kvm_stats_desc kvm_vcpu_stats_desc[] = {
KVM_GENERIC_VCPU_STATS(),
STATS_DESC_COUNTER(VCPU, pf_taken),
STATS_DESC_COUNTER(VCPU, pf_fixed),
diff --git a/drivers/accel/amdxdna/aie2_ctx.c b/drivers/accel/amdxdna/aie2_ctx.c
index afee5e667f77..c0d348884f74 100644
--- a/drivers/accel/amdxdna/aie2_ctx.c
+++ b/drivers/accel/amdxdna/aie2_ctx.c
@@ -165,7 +165,6 @@ aie2_sched_notify(struct amdxdna_sched_job *job)
trace_xdna_job(&job->base, job->hwctx->name, "signaled fence", job->seq);
- amdxdna_pm_suspend_put(job->hwctx->client->xdna);
job->hwctx->priv->completed++;
dma_fence_signal(fence);
@@ -290,19 +289,11 @@ aie2_sched_job_run(struct drm_sched_job *sched_job)
struct dma_fence *fence;
int ret;
- ret = amdxdna_pm_resume_get(hwctx->client->xdna);
- if (ret)
+ if (!hwctx->priv->mbox_chann)
return NULL;
- if (!hwctx->priv->mbox_chann) {
- amdxdna_pm_suspend_put(hwctx->client->xdna);
- return NULL;
- }
-
- if (!mmget_not_zero(job->mm)) {
- amdxdna_pm_suspend_put(hwctx->client->xdna);
+ if (!mmget_not_zero(job->mm))
return ERR_PTR(-ESRCH);
- }
kref_get(&job->refcnt);
fence = dma_fence_get(job->fence);
@@ -333,7 +324,6 @@ aie2_sched_job_run(struct drm_sched_job *sched_job)
out:
if (ret) {
- amdxdna_pm_suspend_put(hwctx->client->xdna);
dma_fence_put(job->fence);
aie2_job_put(job);
mmput(job->mm);
diff --git a/drivers/accel/amdxdna/amdxdna_ctx.c b/drivers/accel/amdxdna/amdxdna_ctx.c
index 666dfd7b2a80..838430903a3e 100644
--- a/drivers/accel/amdxdna/amdxdna_ctx.c
+++ b/drivers/accel/amdxdna/amdxdna_ctx.c
@@ -17,6 +17,7 @@
#include "amdxdna_ctx.h"
#include "amdxdna_gem.h"
#include "amdxdna_pci_drv.h"
+#include "amdxdna_pm.h"
#define MAX_HWCTX_ID 255
#define MAX_ARG_COUNT 4095
@@ -445,6 +446,7 @@ put_shmem_bo:
void amdxdna_sched_job_cleanup(struct amdxdna_sched_job *job)
{
trace_amdxdna_debug_point(job->hwctx->name, job->seq, "job release");
+ amdxdna_pm_suspend_put(job->hwctx->client->xdna);
amdxdna_arg_bos_put(job);
amdxdna_gem_put_obj(job->cmd_bo);
dma_fence_put(job->fence);
@@ -482,6 +484,12 @@ int amdxdna_cmd_submit(struct amdxdna_client *client,
goto cmd_put;
}
+ ret = amdxdna_pm_resume_get(xdna);
+ if (ret) {
+ XDNA_ERR(xdna, "Resume failed, ret %d", ret);
+ goto put_bos;
+ }
+
idx = srcu_read_lock(&client->hwctx_srcu);
hwctx = xa_load(&client->hwctx_xa, hwctx_hdl);
if (!hwctx) {
@@ -522,6 +530,8 @@ put_fence:
dma_fence_put(job->fence);
unlock_srcu:
srcu_read_unlock(&client->hwctx_srcu, idx);
+ amdxdna_pm_suspend_put(xdna);
+put_bos:
amdxdna_arg_bos_put(job);
cmd_put:
amdxdna_gem_put_obj(job->cmd_bo);
diff --git a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
index 421242acb184..fc0ee8d637f9 100644
--- a/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
+++ b/drivers/accel/ivpu/ivpu_hw_40xx_reg.h
@@ -121,12 +121,6 @@
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY 0x0003006cu
#define VPU_50XX_HOST_SS_AON_PWR_ISLAND_STATUS_DLY_STATUS_DLY_MASK GENMASK(7, 0)
-#define VPU_40XX_HOST_SS_AON_RETENTION0 0x0003000cu
-#define VPU_40XX_HOST_SS_AON_RETENTION1 0x00030010u
-#define VPU_40XX_HOST_SS_AON_RETENTION2 0x00030014u
-#define VPU_40XX_HOST_SS_AON_RETENTION3 0x00030018u
-#define VPU_40XX_HOST_SS_AON_RETENTION4 0x0003001cu
-
#define VPU_40XX_HOST_SS_AON_IDLE_GEN 0x00030200u
#define VPU_40XX_HOST_SS_AON_IDLE_GEN_EN_MASK BIT_MASK(0)
#define VPU_40XX_HOST_SS_AON_IDLE_GEN_HW_PG_EN_MASK BIT_MASK(1)
diff --git a/drivers/accel/ivpu/ivpu_hw_ip.c b/drivers/accel/ivpu/ivpu_hw_ip.c
index 959984c54341..37f95a0551ed 100644
--- a/drivers/accel/ivpu/ivpu_hw_ip.c
+++ b/drivers/accel/ivpu/ivpu_hw_ip.c
@@ -931,7 +931,6 @@ static int soc_cpu_boot_40xx(struct ivpu_device *vdev)
static int soc_cpu_boot_60xx(struct ivpu_device *vdev)
{
- REGV_WR64(VPU_40XX_HOST_SS_AON_RETENTION1, vdev->fw->mem_bp->vpu_addr);
soc_cpu_set_entry_point_40xx(vdev, vdev->fw->cold_boot_entry_point);
return 0;
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index df0ff0764d0d..6f4b545f7377 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -9,6 +9,7 @@ config ARCH_SUPPORTS_ACPI
menuconfig ACPI
bool "ACPI (Advanced Configuration and Power Interface) Support"
depends on ARCH_SUPPORTS_ACPI
+ select AUXILIARY_BUS
select PNP
select NLS
select CRC32
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 64199b19ceff..a09636a4168e 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -135,7 +135,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev,
}
}
- if (adev->device_type == ACPI_BUS_TYPE_DEVICE && !adev->pnp.type.backlight) {
+ if (adev->device_type == ACPI_BUS_TYPE_DEVICE) {
LIST_HEAD(resource_list);
count = acpi_dev_get_resources(adev, &resource_list, NULL, NULL);
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index b34a48068a8d..b1652cab631a 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -113,6 +113,10 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
PCI_ANY_ID, PCI_ANY_ID, NULL);
if (ide_dev) {
errata.piix4.bmisx = pci_resource_start(ide_dev, 4);
+ if (errata.piix4.bmisx)
+ dev_dbg(&ide_dev->dev,
+ "Bus master activity detection (BM-IDE) erratum enabled\n");
+
pci_dev_put(ide_dev);
}
@@ -131,20 +135,17 @@ static int acpi_processor_errata_piix4(struct pci_dev *dev)
if (isa_dev) {
pci_read_config_byte(isa_dev, 0x76, &value1);
pci_read_config_byte(isa_dev, 0x77, &value2);
- if ((value1 & 0x80) || (value2 & 0x80))
+ if ((value1 & 0x80) || (value2 & 0x80)) {
errata.piix4.fdma = 1;
+ dev_dbg(&isa_dev->dev,
+ "Type-F DMA livelock erratum (C3 disabled)\n");
+ }
pci_dev_put(isa_dev);
}
break;
}
- if (ide_dev)
- dev_dbg(&ide_dev->dev, "Bus master activity detection (BM-IDE) erratum enabled\n");
-
- if (isa_dev)
- dev_dbg(&isa_dev->dev, "Type-F DMA livelock erratum (C3 disabled)\n");
-
return 0;
}
diff --git a/drivers/acpi/acpi_video.c b/drivers/acpi/acpi_video.c
index 3fa28f1abca3..adbaf0226c90 100644
--- a/drivers/acpi/acpi_video.c
+++ b/drivers/acpi/acpi_video.c
@@ -9,6 +9,7 @@
#define pr_fmt(fmt) "ACPI: video: " fmt
+#include <linux/auxiliary_bus.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
@@ -21,7 +22,6 @@
#include <linux/sort.h>
#include <linux/pci.h>
#include <linux/pci_ids.h>
-#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/dmi.h>
#include <linux/suspend.h>
@@ -77,8 +77,9 @@ static int register_count;
static DEFINE_MUTEX(register_count_mutex);
static DEFINE_MUTEX(video_list_lock);
static LIST_HEAD(video_bus_head);
-static int acpi_video_bus_probe(struct platform_device *pdev);
-static void acpi_video_bus_remove(struct platform_device *pdev);
+static int acpi_video_bus_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id);
+static void acpi_video_bus_remove(struct auxiliary_device *aux);
static void acpi_video_bus_notify(acpi_handle handle, u32 event, void *data);
/*
@@ -93,19 +94,16 @@ enum acpi_video_level_idx {
ACPI_VIDEO_FIRST_LEVEL, /* actual supported levels begin here */
};
-static const struct acpi_device_id video_device_ids[] = {
- {ACPI_VIDEO_HID, 0},
- {"", 0},
+static const struct auxiliary_device_id video_bus_auxiliary_id_table[] = {
+ { .name = "acpi.video_bus" },
+ {},
};
-MODULE_DEVICE_TABLE(acpi, video_device_ids);
+MODULE_DEVICE_TABLE(auxiliary, video_bus_auxiliary_id_table);
-static struct platform_driver acpi_video_bus = {
+static struct auxiliary_driver acpi_video_bus = {
.probe = acpi_video_bus_probe,
.remove = acpi_video_bus_remove,
- .driver = {
- .name = "acpi-video",
- .acpi_match_table = video_device_ids,
- },
+ .id_table = video_bus_auxiliary_id_table,
};
struct acpi_video_bus_flags {
@@ -1885,7 +1883,7 @@ static void acpi_video_dev_add_notify_handler(struct acpi_video_device *device)
}
static int acpi_video_bus_add_notify_handler(struct acpi_video_bus *video,
- struct platform_device *pdev)
+ struct device *parent)
{
struct input_dev *input;
struct acpi_video_device *dev;
@@ -1908,7 +1906,7 @@ static int acpi_video_bus_add_notify_handler(struct acpi_video_bus *video,
input->phys = video->phys;
input->id.bustype = BUS_HOST;
input->id.product = 0x06;
- input->dev.parent = &pdev->dev;
+ input->dev.parent = parent;
input->evbit[0] = BIT(EV_KEY);
set_bit(KEY_SWITCHVIDEOMODE, input->keybit);
set_bit(KEY_VIDEO_NEXT, input->keybit);
@@ -1980,9 +1978,10 @@ static int acpi_video_bus_put_devices(struct acpi_video_bus *video)
static int instance;
-static int acpi_video_bus_probe(struct platform_device *pdev)
+static int acpi_video_bus_probe(struct auxiliary_device *aux_dev,
+ const struct auxiliary_device_id *id_unused)
{
- struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
+ struct acpi_device *device = ACPI_COMPANION(&aux_dev->dev);
struct acpi_video_bus *video;
bool auto_detect;
int error;
@@ -2019,7 +2018,7 @@ static int acpi_video_bus_probe(struct platform_device *pdev)
instance++;
}
- platform_set_drvdata(pdev, video);
+ auxiliary_set_drvdata(aux_dev, video);
video->device = device;
strscpy(acpi_device_name(device), ACPI_VIDEO_BUS_NAME);
@@ -2068,7 +2067,7 @@ static int acpi_video_bus_probe(struct platform_device *pdev)
!auto_detect)
acpi_video_bus_register_backlight(video);
- error = acpi_video_bus_add_notify_handler(video, pdev);
+ error = acpi_video_bus_add_notify_handler(video, &aux_dev->dev);
if (error)
goto err_del;
@@ -2096,10 +2095,10 @@ err_free_video:
return error;
}
-static void acpi_video_bus_remove(struct platform_device *pdev)
+static void acpi_video_bus_remove(struct auxiliary_device *aux_dev)
{
- struct acpi_video_bus *video = platform_get_drvdata(pdev);
- struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
+ struct acpi_video_bus *video = auxiliary_get_drvdata(aux_dev);
+ struct acpi_device *device = ACPI_COMPANION(&aux_dev->dev);
acpi_dev_remove_notify_handler(device, ACPI_DEVICE_NOTIFY,
acpi_video_bus_notify);
@@ -2163,7 +2162,7 @@ int acpi_video_register(void)
dmi_check_system(video_dmi_table);
- ret = platform_driver_register(&acpi_video_bus);
+ ret = auxiliary_driver_register(&acpi_video_bus);
if (ret)
goto leave;
@@ -2183,7 +2182,7 @@ void acpi_video_unregister(void)
{
mutex_lock(&register_count_mutex);
if (register_count) {
- platform_driver_unregister(&acpi_video_bus);
+ auxiliary_driver_unregister(&acpi_video_bus);
register_count = 0;
may_report_brightness_keys = false;
}
diff --git a/drivers/acpi/acpica/acpredef.h b/drivers/acpi/acpica/acpredef.h
index 6c9b5bf7d392..07d5790d09f8 100644
--- a/drivers/acpi/acpica/acpredef.h
+++ b/drivers/acpi/acpica/acpredef.h
@@ -451,7 +451,7 @@ const union acpi_predefined_info acpi_gbl_predefined_methods[] = {
{{"_DSM",
METHOD_4ARGS(ACPI_TYPE_BUFFER, ACPI_TYPE_INTEGER, ACPI_TYPE_INTEGER,
- ACPI_TYPE_ANY | ACPI_TYPE_PACKAGE) |
+ ACPI_TYPE_PACKAGE | ACPI_TYPE_ANY) |
ARG_COUNT_IS_MINIMUM,
METHOD_RETURNS(ACPI_RTYPE_ALL)}}, /* Must return a value, but it can be of any type */
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index f6707325f582..2ec095e2009e 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -818,9 +818,6 @@ const struct acpi_device *acpi_companion_match(const struct device *dev)
if (list_empty(&adev->pnp.ids))
return NULL;
- if (adev->pnp.type.backlight)
- return adev;
-
return acpi_primary_dev_companion(adev, dev);
}
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 5b777316b9ac..62b9c83d4f20 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1681,7 +1681,7 @@ acpi_status __init acpi_os_initialize(void)
* Use acpi_os_map_generic_address to pre-map the reset
* register if it's in system memory.
*/
- void *rv;
+ void __iomem *rv;
rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
pr_debug("%s: Reset register mapping %s\n", __func__,
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index dfdd004fb1a9..e8cdbdb46fdb 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -6,6 +6,7 @@
#define pr_fmt(fmt) "ACPI: " fmt
#include <linux/async.h>
+#include <linux/auxiliary_bus.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
@@ -2192,6 +2193,44 @@ static acpi_status acpi_bus_check_add_2(acpi_handle handle, u32 lvl_not_used,
return acpi_bus_check_add(handle, false, (struct acpi_device **)ret_p);
}
+static void acpi_video_bus_device_release(struct device *dev)
+{
+ struct auxiliary_device *aux_dev = to_auxiliary_dev(dev);
+
+ kfree(aux_dev);
+}
+
+static void acpi_create_video_bus_device(struct acpi_device *adev,
+ struct acpi_device *parent)
+{
+ struct auxiliary_device *aux_dev;
+ static unsigned int aux_dev_id;
+
+ aux_dev = kzalloc_obj(*aux_dev);
+ if (!aux_dev)
+ return;
+
+ aux_dev->id = aux_dev_id++;
+ aux_dev->name = "video_bus";
+ aux_dev->dev.parent = acpi_get_first_physical_node(parent);
+ if (!aux_dev->dev.parent)
+ goto err;
+
+ aux_dev->dev.release = acpi_video_bus_device_release;
+
+ if (auxiliary_device_init(aux_dev))
+ goto err;
+
+ ACPI_COMPANION_SET(&aux_dev->dev, adev);
+ if (__auxiliary_device_add(aux_dev, "acpi"))
+ auxiliary_device_uninit(aux_dev);
+
+ return;
+
+err:
+ kfree(aux_dev);
+}
+
struct acpi_scan_system_dev {
struct list_head node;
struct acpi_device *adev;
@@ -2229,6 +2268,12 @@ static void acpi_default_enumeration(struct acpi_device *device)
sd->adev = device;
list_add_tail(&sd->node, &acpi_scan_system_dev_list);
}
+ } else if (device->pnp.type.backlight) {
+ struct acpi_device *parent;
+
+ parent = acpi_dev_parent(device);
+ if (parent)
+ acpi_create_video_bus_device(device, parent);
} else {
/* For a regular device object, create a platform device. */
acpi_create_platform_device(device, NULL);
diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs
index fdd97112ef5c..9dfc154e5dd4 100644
--- a/drivers/android/binder/page_range.rs
+++ b/drivers/android/binder/page_range.rs
@@ -142,6 +142,30 @@ pub(crate) struct ShrinkablePageRange {
_pin: PhantomPinned,
}
+// We do not define any ops. For now, used only to check identity of vmas.
+static BINDER_VM_OPS: bindings::vm_operations_struct = pin_init::zeroed();
+
+// To ensure that we do not accidentally install pages into or zap pages from the wrong vma, we
+// check its vm_ops and private data before using it.
+fn check_vma(vma: &virt::VmaRef, owner: *const ShrinkablePageRange) -> Option<&virt::VmaMixedMap> {
+ // SAFETY: Just reading the vm_ops pointer of any active vma is safe.
+ let vm_ops = unsafe { (*vma.as_ptr()).vm_ops };
+ if !ptr::eq(vm_ops, &BINDER_VM_OPS) {
+ return None;
+ }
+
+ // SAFETY: Reading the vm_private_data pointer of a binder-owned vma is safe.
+ let vm_private_data = unsafe { (*vma.as_ptr()).vm_private_data };
+ // The ShrinkablePageRange is only dropped when the Process is dropped, which only happens once
+ // the file's ->release handler is invoked, which means the ShrinkablePageRange outlives any
+ // VMA associated with it, so there can't be any false positives due to pointer reuse here.
+ if !ptr::eq(vm_private_data, owner.cast()) {
+ return None;
+ }
+
+ vma.as_mixedmap_vma()
+}
+
struct Inner {
/// Array of pages.
///
@@ -308,6 +332,18 @@ impl ShrinkablePageRange {
inner.size = num_pages;
inner.vma_addr = vma.start();
+ // This pointer is only used for comparison - it's not dereferenced.
+ //
+ // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on
+ // `vm_private_data`.
+ unsafe {
+ (*vma.as_ptr()).vm_private_data = ptr::from_ref(self).cast_mut().cast::<c_void>()
+ };
+
+ // SAFETY: We own the vma, and we don't use any methods on VmaNew that rely on
+ // `vm_ops`.
+ unsafe { (*vma.as_ptr()).vm_ops = &BINDER_VM_OPS };
+
Ok(num_pages)
}
@@ -399,22 +435,25 @@ impl ShrinkablePageRange {
//
// Using `mmput_async` avoids this, because then the `mm` cleanup is instead queued to a
// workqueue.
- MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?)
- .mmap_read_lock()
- .vma_lookup(vma_addr)
- .ok_or(ESRCH)?
- .as_mixedmap_vma()
- .ok_or(ESRCH)?
- .vm_insert_page(user_page_addr, &new_page)
- .inspect_err(|err| {
- pr_warn!(
- "Failed to vm_insert_page({}): vma_addr:{} i:{} err:{:?}",
- user_page_addr,
- vma_addr,
- i,
- err
- )
- })?;
+ let mm = MmWithUser::into_mmput_async(self.mm.mmget_not_zero().ok_or(ESRCH)?);
+ {
+ let vma_read;
+ let mmap_read;
+ let vma = if let Some(ret) = mm.lock_vma_under_rcu(vma_addr) {
+ vma_read = ret;
+ check_vma(&vma_read, self)
+ } else {
+ mmap_read = mm.mmap_read_lock();
+ mmap_read
+ .vma_lookup(vma_addr)
+ .and_then(|vma| check_vma(vma, self))
+ };
+
+ match vma {
+ Some(vma) => vma.vm_insert_page(user_page_addr, &new_page)?,
+ None => return Err(ESRCH),
+ }
+ }
let inner = self.lock.lock();
@@ -667,12 +706,15 @@ unsafe extern "C" fn rust_shrink_free_page(
let mmap_read;
let mm_mutex;
let vma_addr;
+ let range_ptr;
{
// CAST: The `list_head` field is first in `PageInfo`.
let info = item as *mut PageInfo;
// SAFETY: The `range` field of `PageInfo` is immutable.
- let range = unsafe { &*((*info).range) };
+ range_ptr = unsafe { (*info).range };
+ // SAFETY: The `range` outlives its `PageInfo` values.
+ let range = unsafe { &*range_ptr };
mm = match range.mm.mmget_not_zero() {
Some(mm) => MmWithUser::into_mmput_async(mm),
@@ -717,9 +759,11 @@ unsafe extern "C" fn rust_shrink_free_page(
// SAFETY: The lru lock is locked when this method is called.
unsafe { bindings::spin_unlock(&raw mut (*lru).lock) };
- if let Some(vma) = mmap_read.vma_lookup(vma_addr) {
- let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
- vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
+ if let Some(unchecked_vma) = mmap_read.vma_lookup(vma_addr) {
+ if let Some(vma) = check_vma(unchecked_vma, range_ptr) {
+ let user_page_addr = vma_addr + (page_index << PAGE_SHIFT);
+ vma.zap_page_range_single(user_page_addr, PAGE_SIZE);
+ }
}
drop(mmap_read);
diff --git a/drivers/android/binder/process.rs b/drivers/android/binder/process.rs
index 41de5593197c..f06498129aa9 100644
--- a/drivers/android/binder/process.rs
+++ b/drivers/android/binder/process.rs
@@ -1295,7 +1295,8 @@ impl Process {
}
pub(crate) fn dead_binder_done(&self, cookie: u64, thread: &Thread) {
- if let Some(death) = self.inner.lock().pull_delivered_death(cookie) {
+ let death = self.inner.lock().pull_delivered_death(cookie);
+ if let Some(death) = death {
death.set_notification_done(thread);
}
}
diff --git a/drivers/android/binder/range_alloc/array.rs b/drivers/android/binder/range_alloc/array.rs
index 07e1dec2ce63..ada1d1b4302e 100644
--- a/drivers/android/binder/range_alloc/array.rs
+++ b/drivers/android/binder/range_alloc/array.rs
@@ -118,7 +118,7 @@ impl<T> ArrayRangeAllocator<T> {
size: usize,
is_oneway: bool,
pid: Pid,
- ) -> Result<usize> {
+ ) -> Result<(usize, bool)> {
// Compute new value of free_oneway_space, which is set only on success.
let new_oneway_space = if is_oneway {
match self.free_oneway_space.checked_sub(size) {
@@ -146,7 +146,38 @@ impl<T> ArrayRangeAllocator<T> {
.ok()
.unwrap();
- Ok(insert_at_offset)
+ // Start detecting spammers once we have less than 20%
+ // of async space left (which is less than 10% of total
+ // buffer size).
+ //
+ // (This will short-circuit, so `low_oneway_space` is
+ // only called when necessary.)
+ let oneway_spam_detected =
+ is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
+
+ Ok((insert_at_offset, oneway_spam_detected))
+ }
+
+ /// Find the amount and size of buffers allocated by the current caller.
+ ///
+ /// The idea is that once we cross the threshold, whoever is responsible
+ /// for the low async space is likely to try to send another async transaction,
+ /// and at some point we'll catch them in the act. This is more efficient
+ /// than keeping a map per pid.
+ fn low_oneway_space(&self, calling_pid: Pid) -> bool {
+ let mut total_alloc_size = 0;
+ let mut num_buffers = 0;
+
+ // Warn if this pid has more than 50 transactions, or more than 50% of
+ // async space (which is 25% of total buffer size). Oneway spam is only
+ // detected when the threshold is exceeded.
+ for range in &self.ranges {
+ if range.state.is_oneway() && range.state.pid() == calling_pid {
+ total_alloc_size += range.size;
+ num_buffers += 1;
+ }
+ }
+ num_buffers > 50 || total_alloc_size > self.size / 4
}
pub(crate) fn reservation_abort(&mut self, offset: usize) -> Result<FreedRange> {
diff --git a/drivers/android/binder/range_alloc/mod.rs b/drivers/android/binder/range_alloc/mod.rs
index 2301e2bc1a1f..1f4734468ff1 100644
--- a/drivers/android/binder/range_alloc/mod.rs
+++ b/drivers/android/binder/range_alloc/mod.rs
@@ -188,11 +188,11 @@ impl<T> RangeAllocator<T> {
self.reserve_new(args)
}
Impl::Array(array) => {
- let offset =
+ let (offset, oneway_spam_detected) =
array.reserve_new(args.debug_id, args.size, args.is_oneway, args.pid)?;
Ok(ReserveNew::Success(ReserveNewSuccess {
offset,
- oneway_spam_detected: false,
+ oneway_spam_detected,
_empty_array_alloc: args.empty_array_alloc,
_new_tree_alloc: args.new_tree_alloc,
_tree_alloc: args.tree_alloc,
diff --git a/drivers/android/binder/range_alloc/tree.rs b/drivers/android/binder/range_alloc/tree.rs
index 838fdd2b47ea..48796fcdb362 100644
--- a/drivers/android/binder/range_alloc/tree.rs
+++ b/drivers/android/binder/range_alloc/tree.rs
@@ -164,15 +164,6 @@ impl<T> TreeRangeAllocator<T> {
self.free_oneway_space
};
- // Start detecting spammers once we have less than 20%
- // of async space left (which is less than 10% of total
- // buffer size).
- //
- // (This will short-circut, so `low_oneway_space` is
- // only called when necessary.)
- let oneway_spam_detected =
- is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
-
let (found_size, found_off, tree_node, free_tree_node) = match self.find_best_match(size) {
None => {
pr_warn!("ENOSPC from range_alloc.reserve_new - size: {}", size);
@@ -203,6 +194,15 @@ impl<T> TreeRangeAllocator<T> {
self.free_tree.insert(free_tree_node);
}
+ // Start detecting spammers once we have less than 20%
+ // of async space left (which is less than 10% of total
+ // buffer size).
+ //
+ // (This will short-circuit, so `low_oneway_space` is
+ // only called when necessary.)
+ let oneway_spam_detected =
+ is_oneway && new_oneway_space < self.size / 10 && self.low_oneway_space(pid);
+
Ok((found_off, oneway_spam_detected))
}
diff --git a/drivers/android/binder/thread.rs b/drivers/android/binder/thread.rs
index 0b62d24b2118..c004214b1662 100644
--- a/drivers/android/binder/thread.rs
+++ b/drivers/android/binder/thread.rs
@@ -1015,12 +1015,9 @@ impl Thread {
// Copy offsets if there are any.
if offsets_size > 0 {
- {
- let mut reader =
- UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
- .reader();
- alloc.copy_into(&mut reader, aligned_data_size, offsets_size)?;
- }
+ let mut offsets_reader =
+ UserSlice::new(UserPtr::from_addr(trd_data_ptr.offsets as _), offsets_size)
+ .reader();
let offsets_start = aligned_data_size;
let offsets_end = aligned_data_size + offsets_size;
@@ -1041,11 +1038,9 @@ impl Thread {
.step_by(size_of::<u64>())
.enumerate()
{
- let offset: usize = view
- .alloc
- .read::<u64>(index_offset)?
- .try_into()
- .map_err(|_| EINVAL)?;
+ let offset = offsets_reader.read::<u64>()?;
+ view.alloc.write(index_offset, &offset)?;
+ let offset: usize = offset.try_into().map_err(|_| EINVAL)?;
if offset < end_of_previous_object || !is_aligned(offset, size_of::<u32>()) {
pr_warn!("Got transaction with invalid offset.");
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 0ee8ea971aa4..335288e8b5b3 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -1895,6 +1895,7 @@ void pm_runtime_reinit(struct device *dev)
void pm_runtime_remove(struct device *dev)
{
__pm_runtime_disable(dev, false);
+ flush_work(&dev->power.work);
pm_runtime_reinit(dev);
}
diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
index 004f367243b6..63aeb7a76a8c 100644
--- a/drivers/block/ublk_drv.c
+++ b/drivers/block/ublk_drv.c
@@ -4443,7 +4443,9 @@ static int ublk_ctrl_start_dev(struct ublk_device *ub,
/* Skip partition scan if disabled by user */
if (ub->dev_info.flags & UBLK_F_NO_AUTO_PART_SCAN) {
- clear_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
+ /* Not clear for unprivileged daemons, see comment above */
+ if (!ub->unprivileged_daemons)
+ clear_bit(GD_SUPPRESS_PART_SCAN, &disk->state);
} else {
/* Schedule async partition scan for trusted daemons */
if (!ub->unprivileged_daemons)
@@ -5006,15 +5008,22 @@ static int ublk_ctrl_get_features(const struct ublksrv_ctrl_cmd *header)
return 0;
}
-static void ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header)
+static int ublk_ctrl_set_size(struct ublk_device *ub, const struct ublksrv_ctrl_cmd *header)
{
struct ublk_param_basic *p = &ub->params.basic;
u64 new_size = header->data[0];
+ int ret = 0;
mutex_lock(&ub->mutex);
+ if (!ub->ub_disk) {
+ ret = -ENODEV;
+ goto out;
+ }
p->dev_sectors = new_size;
set_capacity_and_notify(ub->ub_disk, p->dev_sectors);
+out:
mutex_unlock(&ub->mutex);
+ return ret;
}
struct count_busy {
@@ -5335,8 +5344,7 @@ static int ublk_ctrl_uring_cmd(struct io_uring_cmd *cmd,
ret = ublk_ctrl_end_recovery(ub, &header);
break;
case UBLK_CMD_UPDATE_SIZE:
- ublk_ctrl_set_size(ub, &header);
- ret = 0;
+ ret = ublk_ctrl_set_size(ub, &header);
break;
case UBLK_CMD_QUIESCE_DEV:
ret = ublk_ctrl_quiesce_dev(ub, &header);
diff --git a/drivers/cache/ax45mp_cache.c b/drivers/cache/ax45mp_cache.c
index 1d7dd3d2c101..934c5087ec2b 100644
--- a/drivers/cache/ax45mp_cache.c
+++ b/drivers/cache/ax45mp_cache.c
@@ -178,11 +178,11 @@ static const struct of_device_id ax45mp_cache_ids[] = {
static int __init ax45mp_cache_init(void)
{
- struct device_node *np;
struct resource res;
int ret;
- np = of_find_matching_node(NULL, ax45mp_cache_ids);
+ struct device_node *np __free(device_node) =
+ of_find_matching_node(NULL, ax45mp_cache_ids);
if (!of_device_is_available(np))
return -ENODEV;
diff --git a/drivers/cache/starfive_starlink_cache.c b/drivers/cache/starfive_starlink_cache.c
index 24c7d078ca22..3a25d2d7c70c 100644
--- a/drivers/cache/starfive_starlink_cache.c
+++ b/drivers/cache/starfive_starlink_cache.c
@@ -102,11 +102,11 @@ static const struct of_device_id starlink_cache_ids[] = {
static int __init starlink_cache_init(void)
{
- struct device_node *np;
u32 block_size;
int ret;
- np = of_find_matching_node(NULL, starlink_cache_ids);
+ struct device_node *np __free(device_node) =
+ of_find_matching_node(NULL, starlink_cache_ids);
if (!of_device_is_available(np))
return -ENODEV;
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 65fbb8e807b9..c7876e9e024f 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -359,16 +359,6 @@ noinstr int cpuidle_enter_state(struct cpuidle_device *dev,
int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
bool *stop_tick)
{
- /*
- * If there is only a single idle state (or none), there is nothing
- * meaningful for the governor to choose. Skip the governor and
- * always use state 0 with the tick running.
- */
- if (drv->state_count <= 1) {
- *stop_tick = false;
- return 0;
- }
-
return cpuidle_curr_governor->select(drv, dev, stop_tick);
}
diff --git a/drivers/crypto/ccp/sev-dev.c b/drivers/crypto/ccp/sev-dev.c
index 8b2dfc11289b..aebf4dad545e 100644
--- a/drivers/crypto/ccp/sev-dev.c
+++ b/drivers/crypto/ccp/sev-dev.c
@@ -2408,10 +2408,8 @@ static int sev_ioctl_do_snp_platform_status(struct sev_issue_cmd *argp)
* in Firmware state on failure. Use snp_reclaim_pages() to
* transition either case back to Hypervisor-owned state.
*/
- if (snp_reclaim_pages(__pa(data), 1, true)) {
- snp_leak_pages(__page_to_pfn(status_page), 1);
+ if (snp_reclaim_pages(__pa(data), 1, true))
return -EFAULT;
- }
}
if (ret)
diff --git a/drivers/crypto/padlock-sha.c b/drivers/crypto/padlock-sha.c
index 329f60ad422e..9214bbfc868f 100644
--- a/drivers/crypto/padlock-sha.c
+++ b/drivers/crypto/padlock-sha.c
@@ -332,6 +332,13 @@ static int __init padlock_init(void)
if (!x86_match_cpu(padlock_sha_ids) || !boot_cpu_has(X86_FEATURE_PHE_EN))
return -ENODEV;
+ /*
+ * Skip family 0x07 and newer used by Zhaoxin processors,
+ * as the driver's self-tests fail on these CPUs.
+ */
+ if (c->x86 >= 0x07)
+ return -ENODEV;
+
/* Register the newly added algorithm module if on *
* VIA Nano processor, or else just do as before */
if (c->x86_model < 0x0f) {
diff --git a/drivers/firmware/arm_ffa/driver.c b/drivers/firmware/arm_ffa/driver.c
index 12a625387d6e..f2f94d4d533e 100644
--- a/drivers/firmware/arm_ffa/driver.c
+++ b/drivers/firmware/arm_ffa/driver.c
@@ -205,12 +205,12 @@ static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt)
return 0;
}
-static int ffa_rxtx_unmap(u16 vm_id)
+static int ffa_rxtx_unmap(void)
{
ffa_value_t ret;
invoke_ffa_fn((ffa_value_t){
- .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0),
+ .a0 = FFA_RXTX_UNMAP,
}, &ret);
if (ret.a0 == FFA_ERROR)
@@ -2097,7 +2097,7 @@ static int __init ffa_init(void)
pr_err("failed to setup partitions\n");
ffa_notifications_cleanup();
- ffa_rxtx_unmap(drv_info->vm_id);
+ ffa_rxtx_unmap();
free_pages:
if (drv_info->tx_buffer)
free_pages_exact(drv_info->tx_buffer, rxtx_bufsz);
@@ -2112,7 +2112,7 @@ static void __exit ffa_exit(void)
{
ffa_notifications_cleanup();
ffa_partitions_cleanup();
- ffa_rxtx_unmap(drv_info->vm_id);
+ ffa_rxtx_unmap();
free_pages_exact(drv_info->tx_buffer, drv_info->rxtx_bufsz);
free_pages_exact(drv_info->rx_buffer, drv_info->rxtx_bufsz);
kfree(drv_info);
diff --git a/drivers/firmware/arm_scmi/notify.c b/drivers/firmware/arm_scmi/notify.c
index 9168794adae4..40ec184eedae 100644
--- a/drivers/firmware/arm_scmi/notify.c
+++ b/drivers/firmware/arm_scmi/notify.c
@@ -1066,7 +1066,7 @@ static int scmi_register_event_handler(struct scmi_notify_instance *ni,
* since at creation time we usually want to have all setup and ready before
* events really start flowing.
*
- * Return: A properly refcounted handler on Success, NULL on Failure
+ * Return: A properly refcounted handler on Success, ERR_PTR on Failure
*/
static inline struct scmi_event_handler *
__scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
@@ -1113,7 +1113,7 @@ __scmi_event_handler_get_ops(struct scmi_notify_instance *ni,
}
mutex_unlock(&ni->pending_mtx);
- return hndl;
+ return hndl ?: ERR_PTR(-ENODEV);
}
static struct scmi_event_handler *
diff --git a/drivers/firmware/arm_scmi/protocols.h b/drivers/firmware/arm_scmi/protocols.h
index 4c75970326e6..f51245aca259 100644
--- a/drivers/firmware/arm_scmi/protocols.h
+++ b/drivers/firmware/arm_scmi/protocols.h
@@ -189,13 +189,13 @@ struct scmi_protocol_handle {
/**
* struct scmi_iterator_state - Iterator current state descriptor
- * @desc_index: Starting index for the current mulit-part request.
+ * @desc_index: Starting index for the current multi-part request.
* @num_returned: Number of returned items in the last multi-part reply.
* @num_remaining: Number of remaining items in the multi-part message.
* @max_resources: Maximum acceptable number of items, configured by the caller
* depending on the underlying resources that it is querying.
* @loop_idx: The iterator loop index in the current multi-part reply.
- * @rx_len: Size in bytes of the currenly processed message; it can be used by
+ * @rx_len: Size in bytes of the currently processed message; it can be used by
* the user of the iterator to verify a reply size.
* @priv: Optional pointer to some additional state-related private data setup
* by the caller during the iterations.
diff --git a/drivers/firmware/arm_scpi.c b/drivers/firmware/arm_scpi.c
index 00e74449ce09..2acad5fa5a28 100644
--- a/drivers/firmware/arm_scpi.c
+++ b/drivers/firmware/arm_scpi.c
@@ -18,6 +18,7 @@
#include <linux/bitmap.h>
#include <linux/bitfield.h>
+#include <linux/cleanup.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/export.h>
@@ -940,13 +941,13 @@ static int scpi_probe(struct platform_device *pdev)
int idx = scpi_drvinfo->num_chans;
struct scpi_chan *pchan = scpi_drvinfo->channels + idx;
struct mbox_client *cl = &pchan->cl;
- struct device_node *shmem = of_parse_phandle(np, "shmem", idx);
+ struct device_node *shmem __free(device_node) =
+ of_parse_phandle(np, "shmem", idx);
if (!of_match_node(shmem_of_match, shmem))
return -ENXIO;
ret = of_address_to_resource(shmem, 0, &res);
- of_node_put(shmem);
if (ret) {
dev_err(dev, "failed to get SCPI payload mem resource\n");
return ret;
diff --git a/drivers/firmware/cirrus/cs_dsp.c b/drivers/firmware/cirrus/cs_dsp.c
index b4f1c01e3b5b..5d8be0ac7c5e 100644
--- a/drivers/firmware/cirrus/cs_dsp.c
+++ b/drivers/firmware/cirrus/cs_dsp.c
@@ -1610,11 +1610,17 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
region_name);
if (reg) {
+ /*
+ * Although we expect the underlying bus does not require
+ * physically-contiguous buffers, we pessimistically use
+ * a temporary buffer instead of trusting that the
+ * alignment of region->data is ok.
+ */
region_len = le32_to_cpu(region->len);
if (region_len > buf_len) {
buf_len = round_up(region_len, PAGE_SIZE);
- kfree(buf);
- buf = kmalloc(buf_len, GFP_KERNEL | GFP_DMA);
+ vfree(buf);
+ buf = vmalloc(buf_len);
if (!buf) {
ret = -ENOMEM;
goto out_fw;
@@ -1643,7 +1649,7 @@ static int cs_dsp_load(struct cs_dsp *dsp, const struct firmware *firmware,
ret = 0;
out_fw:
- kfree(buf);
+ vfree(buf);
if (ret == -EOVERFLOW)
cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
@@ -2331,11 +2337,17 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
}
if (reg) {
+ /*
+ * Although we expect the underlying bus does not require
+ * physically-contiguous buffers, we pessimistically use
+ * a temporary buffer instead of trusting that the
+ * alignment of blk->data is ok.
+ */
region_len = le32_to_cpu(blk->len);
if (region_len > buf_len) {
buf_len = round_up(region_len, PAGE_SIZE);
- kfree(buf);
- buf = kmalloc(buf_len, GFP_KERNEL | GFP_DMA);
+ vfree(buf);
+ buf = vmalloc(buf_len);
if (!buf) {
ret = -ENOMEM;
goto out_fw;
@@ -2366,7 +2378,7 @@ static int cs_dsp_load_coeff(struct cs_dsp *dsp, const struct firmware *firmware
ret = 0;
out_fw:
- kfree(buf);
+ vfree(buf);
if (ret == -EOVERFLOW)
cs_dsp_err(dsp, "%s: file content overflows file data\n", file);
diff --git a/drivers/firmware/stratix10-rsu.c b/drivers/firmware/stratix10-rsu.c
index 41da07c445a6..e1912108a0fe 100644
--- a/drivers/firmware/stratix10-rsu.c
+++ b/drivers/firmware/stratix10-rsu.c
@@ -768,7 +768,9 @@ static int stratix10_rsu_probe(struct platform_device *pdev)
rsu_async_status_callback);
if (ret) {
dev_err(dev, "Error, getting RSU status %i\n", ret);
+ stratix10_svc_remove_async_client(priv->chan);
stratix10_svc_free_channel(priv->chan);
+ return ret;
}
/* get DCMF version from firmware */
diff --git a/drivers/firmware/stratix10-svc.c b/drivers/firmware/stratix10-svc.c
index 6f5c298582ab..e9e35d67ef96 100644
--- a/drivers/firmware/stratix10-svc.c
+++ b/drivers/firmware/stratix10-svc.c
@@ -37,15 +37,14 @@
* service layer will return error to FPGA manager when timeout occurs,
* timeout is set to 30 seconds (30 * 1000) at Intel Stratix10 SoC.
*/
-#define SVC_NUM_DATA_IN_FIFO 32
+#define SVC_NUM_DATA_IN_FIFO 8
#define SVC_NUM_CHANNEL 4
-#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 200
+#define FPGA_CONFIG_DATA_CLAIM_TIMEOUT_MS 2000
#define FPGA_CONFIG_STATUS_TIMEOUT_SEC 30
#define BYTE_TO_WORD_SIZE 4
/* stratix10 service layer clients */
#define STRATIX10_RSU "stratix10-rsu"
-#define INTEL_FCS "intel-fcs"
/* Maximum number of SDM client IDs. */
#define MAX_SDM_CLIENT_IDS 16
@@ -105,11 +104,9 @@ struct stratix10_svc_chan;
/**
* struct stratix10_svc - svc private data
* @stratix10_svc_rsu: pointer to stratix10 RSU device
- * @intel_svc_fcs: pointer to the FCS device
*/
struct stratix10_svc {
struct platform_device *stratix10_svc_rsu;
- struct platform_device *intel_svc_fcs;
};
/**
@@ -251,12 +248,10 @@ struct stratix10_async_ctrl {
* @num_active_client: number of active service client
* @node: list management
* @genpool: memory pool pointing to the memory region
- * @task: pointer to the thread task which handles SMC or HVC call
- * @svc_fifo: a queue for storing service message data
* @complete_status: state for completion
- * @svc_fifo_lock: protect access to service message data queue
* @invoke_fn: function to issue secure monitor call or hypervisor call
* @svc: manages the list of client svc drivers
+ * @sdm_lock: only allows a single command single response to SDM
* @actrl: async control structure
*
* This struct is used to create communication channels for service clients, to
@@ -269,12 +264,10 @@ struct stratix10_svc_controller {
int num_active_client;
struct list_head node;
struct gen_pool *genpool;
- struct task_struct *task;
- struct kfifo svc_fifo;
struct completion complete_status;
- spinlock_t svc_fifo_lock;
svc_invoke_fn *invoke_fn;
struct stratix10_svc *svc;
+ struct mutex sdm_lock;
struct stratix10_async_ctrl actrl;
};
@@ -283,6 +276,9 @@ struct stratix10_svc_controller {
* @ctrl: pointer to service controller which is the provider of this channel
* @scl: pointer to service client which owns the channel
* @name: service client name associated with the channel
+ * @task: pointer to the thread task which handles SMC or HVC call
+ * @svc_fifo: a queue for storing service message data (separate fifo for every channel)
+ * @svc_fifo_lock: protect access to service message data queue (locking pending fifo)
* @lock: protect access to the channel
* @async_chan: reference to asynchronous channel object for this channel
*
@@ -293,6 +289,9 @@ struct stratix10_svc_chan {
struct stratix10_svc_controller *ctrl;
struct stratix10_svc_client *scl;
char *name;
+ struct task_struct *task;
+ struct kfifo svc_fifo;
+ spinlock_t svc_fifo_lock;
spinlock_t lock;
struct stratix10_async_chan *async_chan;
};
@@ -527,10 +526,10 @@ static void svc_thread_recv_status_ok(struct stratix10_svc_data *p_data,
*/
static int svc_normal_to_secure_thread(void *data)
{
- struct stratix10_svc_controller
- *ctrl = (struct stratix10_svc_controller *)data;
- struct stratix10_svc_data *pdata;
- struct stratix10_svc_cb_data *cbdata;
+ struct stratix10_svc_chan *chan = (struct stratix10_svc_chan *)data;
+ struct stratix10_svc_controller *ctrl = chan->ctrl;
+ struct stratix10_svc_data *pdata = NULL;
+ struct stratix10_svc_cb_data *cbdata = NULL;
struct arm_smccc_res res;
unsigned long a0, a1, a2, a3, a4, a5, a6, a7;
int ret_fifo = 0;
@@ -555,12 +554,12 @@ static int svc_normal_to_secure_thread(void *data)
a6 = 0;
a7 = 0;
- pr_debug("smc_hvc_shm_thread is running\n");
+ pr_debug("%s: %s: Thread is running!\n", __func__, chan->name);
while (!kthread_should_stop()) {
- ret_fifo = kfifo_out_spinlocked(&ctrl->svc_fifo,
+ ret_fifo = kfifo_out_spinlocked(&chan->svc_fifo,
pdata, sizeof(*pdata),
- &ctrl->svc_fifo_lock);
+ &chan->svc_fifo_lock);
if (!ret_fifo)
continue;
@@ -569,9 +568,25 @@ static int svc_normal_to_secure_thread(void *data)
(unsigned int)pdata->paddr, pdata->command,
(unsigned int)pdata->size);
+ /* SDM can only process one command at a time */
+ pr_debug("%s: %s: Thread is waiting for mutex!\n",
+ __func__, chan->name);
+ if (mutex_lock_interruptible(&ctrl->sdm_lock)) {
+ /* item already dequeued; notify client to unblock it */
+ cbdata->status = BIT(SVC_STATUS_ERROR);
+ cbdata->kaddr1 = NULL;
+ cbdata->kaddr2 = NULL;
+ cbdata->kaddr3 = NULL;
+ if (pdata->chan->scl)
+ pdata->chan->scl->receive_cb(pdata->chan->scl,
+ cbdata);
+ break;
+ }
+
switch (pdata->command) {
case COMMAND_RECONFIG_DATA_CLAIM:
svc_thread_cmd_data_claim(ctrl, pdata, cbdata);
+ mutex_unlock(&ctrl->sdm_lock);
continue;
case COMMAND_RECONFIG:
a0 = INTEL_SIP_SMC_FPGA_CONFIG_START;
@@ -700,10 +715,11 @@ static int svc_normal_to_secure_thread(void *data)
break;
default:
pr_warn("it shouldn't happen\n");
- break;
+ mutex_unlock(&ctrl->sdm_lock);
+ continue;
}
- pr_debug("%s: before SMC call -- a0=0x%016x a1=0x%016x",
- __func__,
+ pr_debug("%s: %s: before SMC call -- a0=0x%016x a1=0x%016x",
+ __func__, chan->name,
(unsigned int)a0,
(unsigned int)a1);
pr_debug(" a2=0x%016x\n", (unsigned int)a2);
@@ -712,8 +728,8 @@ static int svc_normal_to_secure_thread(void *data)
pr_debug(" a5=0x%016x\n", (unsigned int)a5);
ctrl->invoke_fn(a0, a1, a2, a3, a4, a5, a6, a7, &res);
- pr_debug("%s: after SMC call -- res.a0=0x%016x",
- __func__, (unsigned int)res.a0);
+ pr_debug("%s: %s: after SMC call -- res.a0=0x%016x",
+ __func__, chan->name, (unsigned int)res.a0);
pr_debug(" res.a1=0x%016x, res.a2=0x%016x",
(unsigned int)res.a1, (unsigned int)res.a2);
pr_debug(" res.a3=0x%016x\n", (unsigned int)res.a3);
@@ -728,6 +744,7 @@ static int svc_normal_to_secure_thread(void *data)
cbdata->kaddr2 = NULL;
cbdata->kaddr3 = NULL;
pdata->chan->scl->receive_cb(pdata->chan->scl, cbdata);
+ mutex_unlock(&ctrl->sdm_lock);
continue;
}
@@ -801,6 +818,8 @@ static int svc_normal_to_secure_thread(void *data)
break;
}
+
+ mutex_unlock(&ctrl->sdm_lock);
}
kfree(cbdata);
@@ -1696,22 +1715,33 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
if (!p_data)
return -ENOMEM;
- /* first client will create kernel thread */
- if (!chan->ctrl->task) {
- chan->ctrl->task =
- kthread_run_on_cpu(svc_normal_to_secure_thread,
- (void *)chan->ctrl,
- cpu, "svc_smc_hvc_thread");
- if (IS_ERR(chan->ctrl->task)) {
+ /* first caller creates the per-channel kthread */
+ if (!chan->task) {
+ struct task_struct *task;
+
+ task = kthread_run_on_cpu(svc_normal_to_secure_thread,
+ (void *)chan,
+ cpu, "svc_smc_hvc_thread");
+ if (IS_ERR(task)) {
dev_err(chan->ctrl->dev,
"failed to create svc_smc_hvc_thread\n");
kfree(p_data);
return -EINVAL;
}
+
+ spin_lock(&chan->lock);
+ if (chan->task) {
+ /* another caller won the race; discard our thread */
+ spin_unlock(&chan->lock);
+ kthread_stop(task);
+ } else {
+ chan->task = task;
+ spin_unlock(&chan->lock);
+ }
}
- pr_debug("%s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__,
- p_msg->payload, p_msg->command,
+ pr_debug("%s: %s: sent P-va=%p, P-com=%x, P-size=%u\n", __func__,
+ chan->name, p_msg->payload, p_msg->command,
(unsigned int)p_msg->payload_length);
if (list_empty(&svc_data_mem)) {
@@ -1747,12 +1777,16 @@ int stratix10_svc_send(struct stratix10_svc_chan *chan, void *msg)
p_data->arg[2] = p_msg->arg[2];
p_data->size = p_msg->payload_length;
p_data->chan = chan;
- pr_debug("%s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n", __func__,
- (unsigned int)p_data->paddr, p_data->command,
- (unsigned int)p_data->size);
- ret = kfifo_in_spinlocked(&chan->ctrl->svc_fifo, p_data,
+ pr_debug("%s: %s: put to FIFO pa=0x%016x, cmd=%x, size=%u\n",
+ __func__,
+ chan->name,
+ (unsigned int)p_data->paddr,
+ p_data->command,
+ (unsigned int)p_data->size);
+
+ ret = kfifo_in_spinlocked(&chan->svc_fifo, p_data,
sizeof(*p_data),
- &chan->ctrl->svc_fifo_lock);
+ &chan->svc_fifo_lock);
kfree(p_data);
@@ -1773,11 +1807,12 @@ EXPORT_SYMBOL_GPL(stratix10_svc_send);
*/
void stratix10_svc_done(struct stratix10_svc_chan *chan)
{
- /* stop thread when thread is running AND only one active client */
- if (chan->ctrl->task && chan->ctrl->num_active_client <= 1) {
- pr_debug("svc_smc_hvc_shm_thread is stopped\n");
- kthread_stop(chan->ctrl->task);
- chan->ctrl->task = NULL;
+ /* stop thread when thread is running */
+ if (chan->task) {
+ pr_debug("%s: %s: svc_smc_hvc_shm_thread is stopping\n",
+ __func__, chan->name);
+ kthread_stop(chan->task);
+ chan->task = NULL;
}
}
EXPORT_SYMBOL_GPL(stratix10_svc_done);
@@ -1817,8 +1852,8 @@ void *stratix10_svc_allocate_memory(struct stratix10_svc_chan *chan,
pmem->paddr = pa;
pmem->size = s;
list_add_tail(&pmem->node, &svc_data_mem);
- pr_debug("%s: va=%p, pa=0x%016x\n", __func__,
- pmem->vaddr, (unsigned int)pmem->paddr);
+ pr_debug("%s: %s: va=%p, pa=0x%016x\n", __func__,
+ chan->name, pmem->vaddr, (unsigned int)pmem->paddr);
return (void *)va;
}
@@ -1855,6 +1890,13 @@ static const struct of_device_id stratix10_svc_drv_match[] = {
{},
};
+static const char * const chan_names[SVC_NUM_CHANNEL] = {
+ SVC_CLIENT_FPGA,
+ SVC_CLIENT_RSU,
+ SVC_CLIENT_FCS,
+ SVC_CLIENT_HWMON
+};
+
static int stratix10_svc_drv_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -1862,11 +1904,11 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
struct stratix10_svc_chan *chans;
struct gen_pool *genpool;
struct stratix10_svc_sh_memory *sh_memory;
- struct stratix10_svc *svc;
+ struct stratix10_svc *svc = NULL;
svc_invoke_fn *invoke_fn;
size_t fifo_size;
- int ret;
+ int ret, i = 0;
/* get SMC or HVC function */
invoke_fn = get_invoke_func(dev);
@@ -1905,8 +1947,8 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
controller->num_active_client = 0;
controller->chans = chans;
controller->genpool = genpool;
- controller->task = NULL;
controller->invoke_fn = invoke_fn;
+ INIT_LIST_HEAD(&controller->node);
init_completion(&controller->complete_status);
ret = stratix10_svc_async_init(controller);
@@ -1917,32 +1959,20 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
}
fifo_size = sizeof(struct stratix10_svc_data) * SVC_NUM_DATA_IN_FIFO;
- ret = kfifo_alloc(&controller->svc_fifo, fifo_size, GFP_KERNEL);
- if (ret) {
- dev_err(dev, "failed to allocate FIFO\n");
- goto err_async_exit;
- }
- spin_lock_init(&controller->svc_fifo_lock);
-
- chans[0].scl = NULL;
- chans[0].ctrl = controller;
- chans[0].name = SVC_CLIENT_FPGA;
- spin_lock_init(&chans[0].lock);
+ mutex_init(&controller->sdm_lock);
- chans[1].scl = NULL;
- chans[1].ctrl = controller;
- chans[1].name = SVC_CLIENT_RSU;
- spin_lock_init(&chans[1].lock);
-
- chans[2].scl = NULL;
- chans[2].ctrl = controller;
- chans[2].name = SVC_CLIENT_FCS;
- spin_lock_init(&chans[2].lock);
-
- chans[3].scl = NULL;
- chans[3].ctrl = controller;
- chans[3].name = SVC_CLIENT_HWMON;
- spin_lock_init(&chans[3].lock);
+ for (i = 0; i < SVC_NUM_CHANNEL; i++) {
+ chans[i].scl = NULL;
+ chans[i].ctrl = controller;
+ chans[i].name = (char *)chan_names[i];
+ spin_lock_init(&chans[i].lock);
+ ret = kfifo_alloc(&chans[i].svc_fifo, fifo_size, GFP_KERNEL);
+ if (ret) {
+ dev_err(dev, "failed to allocate FIFO %d\n", i);
+ goto err_free_fifos;
+ }
+ spin_lock_init(&chans[i].svc_fifo_lock);
+ }
list_add_tail(&controller->node, &svc_ctrl);
platform_set_drvdata(pdev, controller);
@@ -1951,7 +1981,7 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
svc = devm_kzalloc(dev, sizeof(*svc), GFP_KERNEL);
if (!svc) {
ret = -ENOMEM;
- goto err_free_kfifo;
+ goto err_free_fifos;
}
controller->svc = svc;
@@ -1959,51 +1989,43 @@ static int stratix10_svc_drv_probe(struct platform_device *pdev)
if (!svc->stratix10_svc_rsu) {
dev_err(dev, "failed to allocate %s device\n", STRATIX10_RSU);
ret = -ENOMEM;
- goto err_free_kfifo;
+ goto err_free_fifos;
}
ret = platform_device_add(svc->stratix10_svc_rsu);
- if (ret) {
- platform_device_put(svc->stratix10_svc_rsu);
- goto err_free_kfifo;
- }
-
- svc->intel_svc_fcs = platform_device_alloc(INTEL_FCS, 1);
- if (!svc->intel_svc_fcs) {
- dev_err(dev, "failed to allocate %s device\n", INTEL_FCS);
- ret = -ENOMEM;
- goto err_unregister_rsu_dev;
- }
-
- ret = platform_device_add(svc->intel_svc_fcs);
- if (ret) {
- platform_device_put(svc->intel_svc_fcs);
- goto err_unregister_rsu_dev;
- }
+ if (ret)
+ goto err_put_device;
ret = of_platform_default_populate(dev_of_node(dev), NULL, dev);
if (ret)
- goto err_unregister_fcs_dev;
+ goto err_unregister_rsu_dev;
pr_info("Intel Service Layer Driver Initialized\n");
return 0;
-err_unregister_fcs_dev:
- platform_device_unregister(svc->intel_svc_fcs);
err_unregister_rsu_dev:
platform_device_unregister(svc->stratix10_svc_rsu);
-err_free_kfifo:
- kfifo_free(&controller->svc_fifo);
-err_async_exit:
+ goto err_free_fifos;
+err_put_device:
+ platform_device_put(svc->stratix10_svc_rsu);
+err_free_fifos:
+ /* only remove from list if list_add_tail() was reached */
+ if (!list_empty(&controller->node))
+ list_del(&controller->node);
+ /* free only the FIFOs that were successfully allocated */
+ while (i--)
+ kfifo_free(&chans[i].svc_fifo);
stratix10_svc_async_exit(controller);
err_destroy_pool:
gen_pool_destroy(genpool);
+
return ret;
}
static void stratix10_svc_drv_remove(struct platform_device *pdev)
{
+ int i;
struct stratix10_svc_controller *ctrl = platform_get_drvdata(pdev);
struct stratix10_svc *svc = ctrl->svc;
@@ -2011,14 +2033,16 @@ static void stratix10_svc_drv_remove(struct platform_device *pdev)
of_platform_depopulate(ctrl->dev);
- platform_device_unregister(svc->intel_svc_fcs);
platform_device_unregister(svc->stratix10_svc_rsu);
- kfifo_free(&ctrl->svc_fifo);
- if (ctrl->task) {
- kthread_stop(ctrl->task);
- ctrl->task = NULL;
+ for (i = 0; i < SVC_NUM_CHANNEL; i++) {
+ if (ctrl->chans[i].task) {
+ kthread_stop(ctrl->chans[i].task);
+ ctrl->chans[i].task = NULL;
+ }
+ kfifo_free(&ctrl->chans[i].svc_fifo);
}
+
if (ctrl->genpool)
gen_pool_destroy(ctrl->genpool);
list_del(&ctrl->node);
diff --git a/drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c b/drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
index 6fc4e3452b88..ee781d2f0b8e 100644
--- a/drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
+++ b/drivers/gpib/lpvo_usb_gpib/lpvo_usb_gpib.c
@@ -38,8 +38,10 @@ MODULE_DESCRIPTION("GPIB driver for LPVO usb devices");
/*
* Table of devices that work with this driver.
*
- * Currently, only one device is known to be used in the
- * lpvo_usb_gpib adapter (FTDI 0403:6001).
+ * Currently, only one device is known to be used in the lpvo_usb_gpib
+ * adapter (FTDI 0403:6001) but as this device id is already handled by the
+ * ftdi_sio USB serial driver the LPVO driver must not bind to it by default.
+ *
* If your adapter uses a different chip, insert a line
* in the following table with proper <Vendor-id>, <Product-id>.
*
@@ -50,7 +52,6 @@ MODULE_DESCRIPTION("GPIB driver for LPVO usb devices");
*/
static const struct usb_device_id skel_table[] = {
- { USB_DEVICE(0x0403, 0x6001) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, skel_table);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3e19b51a2763..d8296dfc5e8a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2690,8 +2690,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)
break;
default:
r = amdgpu_discovery_set_ip_blocks(adev);
- if (r)
+ if (r) {
+ adev->num_ip_blocks = 0;
return r;
+ }
break;
}
@@ -3247,6 +3249,8 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev,
i = state == AMD_CG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
+ if (!adev->ip_blocks[i].version)
+ continue;
/* skip CG for GFX, SDMA on S0ix */
if (adev->in_s0ix &&
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
@@ -3286,6 +3290,8 @@ int amdgpu_device_set_pg_state(struct amdgpu_device *adev,
i = state == AMD_PG_STATE_GATE ? j : adev->num_ip_blocks - j - 1;
if (!adev->ip_blocks[i].status.late_initialized)
continue;
+ if (!adev->ip_blocks[i].version)
+ continue;
/* skip PG for GFX, SDMA on S0ix */
if (adev->in_s0ix &&
(adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GFX ||
@@ -3493,6 +3499,8 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev)
int i, r;
for (i = 0; i < adev->num_ip_blocks; i++) {
+ if (!adev->ip_blocks[i].version)
+ continue;
if (!adev->ip_blocks[i].version->funcs->early_fini)
continue;
@@ -3570,6 +3578,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
if (!adev->ip_blocks[i].status.sw)
continue;
+ if (!adev->ip_blocks[i].version)
+ continue;
if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_GMC) {
amdgpu_ucode_free_bo(adev);
amdgpu_free_static_csa(&adev->virt.csa_obj);
@@ -3596,6 +3606,8 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.late_initialized)
continue;
+ if (!adev->ip_blocks[i].version)
+ continue;
if (adev->ip_blocks[i].version->funcs->late_fini)
adev->ip_blocks[i].version->funcs->late_fini(&adev->ip_blocks[i]);
adev->ip_blocks[i].status.late_initialized = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 77e2133de5cf..7f19554b9ad1 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -83,7 +83,7 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
{
struct amdgpu_device *adev = drm_to_adev(dev);
- if (adev == NULL)
+ if (adev == NULL || !adev->num_ip_blocks)
return;
amdgpu_unregister_gpu_instance(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index dc8d2f52c7d6..e244c12ceb23 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -368,15 +368,15 @@ struct amdgpu_mode_info {
struct drm_property *plane_ctm_property;
/**
- * @shaper_lut_property: Plane property to set pre-blending shaper LUT
- * that converts color content before 3D LUT. If
- * plane_shaper_tf_property != Identity TF, AMD color module will
+ * @plane_shaper_lut_property: Plane property to set pre-blending
+ * shaper LUT that converts color content before 3D LUT.
+ * If plane_shaper_tf_property != Identity TF, AMD color module will
* combine the user LUT values with pre-defined TF into the LUT
* parameters to be programmed.
*/
struct drm_property *plane_shaper_lut_property;
/**
- * @shaper_lut_size_property: Plane property for the size of
+ * @plane_shaper_lut_size_property: Plane property for the size of
* pre-blending shaper LUT as supported by the driver (read-only).
*/
struct drm_property *plane_shaper_lut_size_property;
@@ -400,10 +400,10 @@ struct amdgpu_mode_info {
*/
struct drm_property *plane_lut3d_property;
/**
- * @plane_degamma_lut_size_property: Plane property to define the max
- * size of 3D LUT as supported by the driver (read-only). The max size
- * is the max size of one dimension and, therefore, the max number of
- * entries for 3D LUT array is the 3D LUT size cubed;
+ * @plane_lut3d_size_property: Plane property to define the max size
+ * of 3D LUT as supported by the driver (read-only). The max size is
+ * the max size of one dimension and, therefore, the max number of
+ * entries for 3D LUT array is the 3D LUT size cubed.
*/
struct drm_property *plane_lut3d_size_property;
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
index 5bfa5d1d0b36..023c7345ea54 100644
--- a/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mes_v12_0.c
@@ -731,6 +731,9 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
int i;
struct amdgpu_device *adev = mes->adev;
union MESAPI_SET_HW_RESOURCES mes_set_hw_res_pkt;
+ uint32_t mes_rev = (pipe == AMDGPU_MES_SCHED_PIPE) ?
+ (mes->sched_version & AMDGPU_MES_VERSION_MASK) :
+ (mes->kiq_version & AMDGPU_MES_VERSION_MASK);
memset(&mes_set_hw_res_pkt, 0, sizeof(mes_set_hw_res_pkt));
@@ -785,7 +788,7 @@ static int mes_v12_0_set_hw_resources(struct amdgpu_mes *mes, int pipe)
* handling support, other queue will not use the oversubscribe timer.
* handling mode - 0: disabled; 1: basic version; 2: basic+ version
*/
- mes_set_hw_res_pkt.oversubscription_timer = 50;
+ mes_set_hw_res_pkt.oversubscription_timer = mes_rev < 0x8b ? 0 : 50;
mes_set_hw_res_pkt.unmapped_doorbell_handling = 1;
if (amdgpu_mes_log_enable) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 8ea31699d38b..f5d2847e1cbb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -593,6 +593,7 @@ int pqm_update_queue_properties(struct process_queue_manager *pqm,
p->queue_size)) {
pr_debug("ring buf 0x%llx size 0x%llx not mapped on GPU\n",
p->queue_address, p->queue_size);
+ amdgpu_bo_unreserve(vm->root.bo);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
index 3711d400773a..4c4e61bc91b5 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn20/dcn20_dccg.h
@@ -38,7 +38,11 @@
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 0),\
DCCG_SRII(PIXEL_RATE_CNTL, OTG, 1),\
SR(DISPCLK_FREQ_CHANGE_CNTL),\
- SR(DC_MEM_GLOBAL_PWR_REQ_CNTL)
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\
+ SR(MICROSECOND_TIME_BASE_DIV),\
+ SR(MILLISECOND_TIME_BASE_DIV),\
+ SR(DCCG_GATE_DISABLE_CNTL),\
+ SR(DCCG_GATE_DISABLE_CNTL2)
#define DCCG_REG_LIST_DCN2() \
DCCG_COMMON_REG_LIST_DCN_BASE(),\
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c b/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c
index 75c69348027e..c4d4eea140f3 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn21/dcn21_dccg.c
@@ -96,6 +96,25 @@ static void dccg21_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppcl
dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
+/*
+ * On DCN21 S0i3 resume, BIOS programs MICROSECOND_TIME_BASE_DIV to
+ * 0x00120464 as a marker that golden init has already been done.
+ * dcn21_s0i3_golden_init_wa() reads this marker later in bios_golden_init()
+ * to decide whether to skip golden init.
+ *
+ * dccg2_init() unconditionally overwrites MICROSECOND_TIME_BASE_DIV to
+ * 0x00120264, destroying the marker before it can be read.
+ *
+ * Guard the call: if the S0i3 marker is present, skip dccg2_init() so the
+ * WA can function correctly. bios_golden_init() will handle init in that case.
+ */
+static void dccg21_init(struct dccg *dccg)
+{
+ if (dccg2_is_s0i3_golden_init_wa_done(dccg))
+ return;
+
+ dccg2_init(dccg);
+}
static const struct dccg_funcs dccg21_funcs = {
.update_dpp_dto = dccg21_update_dpp_dto,
@@ -103,7 +122,7 @@ static const struct dccg_funcs dccg21_funcs = {
.set_fifo_errdet_ovr_en = dccg2_set_fifo_errdet_ovr_en,
.otg_add_pixel = dccg2_otg_add_pixel,
.otg_drop_pixel = dccg2_otg_drop_pixel,
- .dccg_init = dccg2_init,
+ .dccg_init = dccg21_init,
.refclk_setup = dccg2_refclk_setup, /* Deprecated - for backward compatibility only */
.allow_clock_gating = dccg2_allow_clock_gating,
.enable_memory_low_power = dccg2_enable_memory_low_power,
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h
index 067e49cb238e..e2381ca0be0b 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn301/dcn301_dccg.h
@@ -34,7 +34,13 @@
DCCG_SRII(DTO_PARAM, DPPCLK, 1),\
DCCG_SRII(DTO_PARAM, DPPCLK, 2),\
DCCG_SRII(DTO_PARAM, DPPCLK, 3),\
- SR(REFCLK_CNTL)
+ SR(REFCLK_CNTL),\
+ SR(DISPCLK_FREQ_CHANGE_CNTL),\
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\
+ SR(MICROSECOND_TIME_BASE_DIV),\
+ SR(MILLISECOND_TIME_BASE_DIV),\
+ SR(DCCG_GATE_DISABLE_CNTL),\
+ SR(DCCG_GATE_DISABLE_CNTL2)
#define DCCG_MASK_SH_LIST_DCN301(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_ENABLE, DPPCLK, 0, mask_sh),\
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
index bf659920d4cc..b5e3849ef12a 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn31/dcn31_dccg.h
@@ -64,9 +64,12 @@
SR(DSCCLK1_DTO_PARAM),\
SR(DSCCLK2_DTO_PARAM),\
SR(DSCCLK_DTO_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL),\
SR(DCCG_GATE_DISABLE_CNTL2),\
SR(DCCG_GATE_DISABLE_CNTL3),\
- SR(HDMISTREAMCLK0_DTO_PARAM)
+ SR(HDMISTREAMCLK0_DTO_PARAM),\
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\
+ SR(MICROSECOND_TIME_BASE_DIV)
#define DCCG_MASK_SH_LIST_DCN31(mask_sh) \
diff --git a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
index a609635f35db..ecbdc05f7c45 100644
--- a/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/dccg/dcn314/dcn314_dccg.h
@@ -70,11 +70,14 @@
SR(DSCCLK2_DTO_PARAM),\
SR(DSCCLK3_DTO_PARAM),\
SR(DSCCLK_DTO_CTRL),\
+ SR(DCCG_GATE_DISABLE_CNTL),\
SR(DCCG_GATE_DISABLE_CNTL2),\
SR(DCCG_GATE_DISABLE_CNTL3),\
SR(HDMISTREAMCLK0_DTO_PARAM),\
SR(OTG_PIXEL_RATE_DIV),\
- SR(DTBCLK_P_CNTL)
+ SR(DTBCLK_P_CNTL),\
+ SR(DC_MEM_GLOBAL_PWR_REQ_CNTL),\
+ SR(MICROSECOND_TIME_BASE_DIV)
#define DCCG_MASK_SH_LIST_DCN314_COMMON(mask_sh) \
DCCG_SFI(DPPCLK_DTO_CTRL, DTO_DB_EN, DPPCLK, 0, mask_sh),\
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index b32c053950c9..a8d63d4d1f6e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -2222,7 +2222,8 @@ static int smu_v13_0_0_restore_user_od_settings(struct smu_context *smu)
user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
BIT(PP_OD_FEATURE_UCLK_BIT) |
BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
- BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
res = smu_v13_0_0_upload_overdrive_table(smu, user_od_table);
user_od_table->OverDriveTable.FeatureCtrlMask = 0;
if (res == 0)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index f08cfa510a8a..5500a0f12f0e 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -2224,7 +2224,8 @@ static int smu_v13_0_7_restore_user_od_settings(struct smu_context *smu)
user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
BIT(PP_OD_FEATURE_UCLK_BIT) |
BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
- BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
res = smu_v13_0_7_upload_overdrive_table(smu, user_od_table);
user_od_table->OverDriveTable.FeatureCtrlMask = 0;
if (res == 0)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
index 9994d4369da8..73762d9b5969 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu14/smu_v14_0_2_ppt.c
@@ -2311,7 +2311,8 @@ static int smu_v14_0_2_restore_user_od_settings(struct smu_context *smu)
user_od_table->OverDriveTable.FeatureCtrlMask = BIT(PP_OD_FEATURE_GFXCLK_BIT) |
BIT(PP_OD_FEATURE_UCLK_BIT) |
BIT(PP_OD_FEATURE_GFX_VF_CURVE_BIT) |
- BIT(PP_OD_FEATURE_FAN_CURVE_BIT);
+ BIT(PP_OD_FEATURE_FAN_CURVE_BIT) |
+ BIT(PP_OD_FEATURE_ZERO_FAN_BIT);
res = smu_v14_0_2_upload_overdrive_table(smu, user_od_table);
user_od_table->OverDriveTable.FeatureCtrlMask = 0;
if (res == 0)
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi83.c b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
index f6736b4457bb..17a885244e1e 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi83.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi83.c
@@ -351,9 +351,9 @@ static u8 sn65dsi83_get_dsi_range(struct sn65dsi83 *ctx,
* DSI_CLK = mode clock * bpp / dsi_data_lanes / 2
* the 2 is there because the bus is DDR.
*/
- return DIV_ROUND_UP(clamp((unsigned int)mode->clock *
- mipi_dsi_pixel_format_to_bpp(ctx->dsi->format) /
- ctx->dsi->lanes / 2, 40000U, 500000U), 5000U);
+ return clamp((unsigned int)mode->clock *
+ mipi_dsi_pixel_format_to_bpp(ctx->dsi->format) /
+ ctx->dsi->lanes / 2, 40000U, 500000U) / 5000U;
}
static u8 sn65dsi83_get_dsi_div(struct sn65dsi83 *ctx)
@@ -517,6 +517,7 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state)
{
struct sn65dsi83 *ctx = bridge_to_sn65dsi83(bridge);
+ const unsigned int dual_factor = ctx->lvds_dual_link ? 2 : 1;
const struct drm_bridge_state *bridge_state;
const struct drm_crtc_state *crtc_state;
const struct drm_display_mode *mode;
@@ -653,18 +654,18 @@ static void sn65dsi83_atomic_pre_enable(struct drm_bridge *bridge,
/* 32 + 1 pixel clock to ensure proper operation */
le16val = cpu_to_le16(32 + 1);
regmap_bulk_write(ctx->regmap, REG_VID_CHA_SYNC_DELAY_LOW, &le16val, 2);
- le16val = cpu_to_le16(mode->hsync_end - mode->hsync_start);
+ le16val = cpu_to_le16((mode->hsync_end - mode->hsync_start) / dual_factor);
regmap_bulk_write(ctx->regmap, REG_VID_CHA_HSYNC_PULSE_WIDTH_LOW,
&le16val, 2);
le16val = cpu_to_le16(mode->vsync_end - mode->vsync_start);
regmap_bulk_write(ctx->regmap, REG_VID_CHA_VSYNC_PULSE_WIDTH_LOW,
&le16val, 2);
regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_BACK_PORCH,
- mode->htotal - mode->hsync_end);
+ (mode->htotal - mode->hsync_end) / dual_factor);
regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_BACK_PORCH,
mode->vtotal - mode->vsync_end);
regmap_write(ctx->regmap, REG_VID_CHA_HORIZONTAL_FRONT_PORCH,
- mode->hsync_start - mode->hdisplay);
+ (mode->hsync_start - mode->hdisplay) / dual_factor);
regmap_write(ctx->regmap, REG_VID_CHA_VERTICAL_FRONT_PORCH,
mode->vsync_start - mode->vdisplay);
regmap_write(ctx->regmap, REG_VID_CHA_TEST_PATTERN, 0x00);
diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c
index d0122d477610..17c2dead2c13 100644
--- a/drivers/gpu/drm/gud/gud_drv.c
+++ b/drivers/gpu/drm/gud/gud_drv.c
@@ -339,7 +339,9 @@ static int gud_stats_debugfs(struct seq_file *m, void *data)
}
static const struct drm_crtc_helper_funcs gud_crtc_helper_funcs = {
- .atomic_check = drm_crtc_helper_atomic_check
+ .atomic_check = drm_crtc_helper_atomic_check,
+ .atomic_enable = gud_crtc_atomic_enable,
+ .atomic_disable = gud_crtc_atomic_disable,
};
static const struct drm_crtc_funcs gud_crtc_funcs = {
@@ -364,6 +366,10 @@ static const struct drm_plane_funcs gud_plane_funcs = {
DRM_GEM_SHADOW_PLANE_FUNCS,
};
+static const struct drm_mode_config_helper_funcs gud_mode_config_helpers = {
+ .atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
+};
+
static const struct drm_mode_config_funcs gud_mode_config_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
.atomic_check = drm_atomic_helper_check,
@@ -499,6 +505,7 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id)
drm->mode_config.min_height = le32_to_cpu(desc.min_height);
drm->mode_config.max_height = le32_to_cpu(desc.max_height);
drm->mode_config.funcs = &gud_mode_config_funcs;
+ drm->mode_config.helper_private = &gud_mode_config_helpers;
/* Format init */
formats_dev = devm_kmalloc(dev, GUD_FORMATS_MAX_NUM, GFP_KERNEL);
diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h
index d27c31648341..8eec8335f5f9 100644
--- a/drivers/gpu/drm/gud/gud_internal.h
+++ b/drivers/gpu/drm/gud/gud_internal.h
@@ -62,6 +62,10 @@ int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val);
void gud_clear_damage(struct gud_device *gdrm);
void gud_flush_work(struct work_struct *work);
+void gud_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
+void gud_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state);
int gud_plane_atomic_check(struct drm_plane *plane,
struct drm_atomic_state *state);
void gud_plane_atomic_update(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c
index 4b77be94348d..b355bf4d3389 100644
--- a/drivers/gpu/drm/gud/gud_pipe.c
+++ b/drivers/gpu/drm/gud/gud_pipe.c
@@ -580,6 +580,39 @@ out:
return ret;
}
+void gud_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = crtc->dev;
+ struct gud_device *gdrm = to_gud_device(drm);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1);
+ gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0);
+ gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, 1);
+
+ drm_dev_exit(idx);
+}
+
+void gud_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_atomic_state *state)
+{
+ struct drm_device *drm = crtc->dev;
+ struct gud_device *gdrm = to_gud_device(drm);
+ int idx;
+
+ if (!drm_dev_enter(drm, &idx))
+ return;
+
+ gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, 0);
+ gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0);
+
+ drm_dev_exit(idx);
+}
+
void gud_plane_atomic_update(struct drm_plane *plane,
struct drm_atomic_state *atomic_state)
{
@@ -607,24 +640,12 @@ void gud_plane_atomic_update(struct drm_plane *plane,
mutex_unlock(&gdrm->damage_lock);
}
- if (!drm_dev_enter(drm, &idx))
+ if (!crtc || !drm_dev_enter(drm, &idx))
return;
- if (!old_state->fb)
- gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 1);
-
- if (fb && (crtc->state->mode_changed || crtc->state->connectors_changed))
- gud_usb_set(gdrm, GUD_REQ_SET_STATE_COMMIT, 0, NULL, 0);
-
- if (crtc->state->active_changed)
- gud_usb_set_u8(gdrm, GUD_REQ_SET_DISPLAY_ENABLE, crtc->state->active);
-
- if (!fb)
- goto ctrl_disable;
-
ret = drm_gem_fb_begin_cpu_access(fb, DMA_FROM_DEVICE);
if (ret)
- goto ctrl_disable;
+ goto out;
drm_atomic_helper_damage_iter_init(&iter, old_state, new_state);
drm_atomic_for_each_plane_damage(&iter, &damage)
@@ -632,9 +653,6 @@ void gud_plane_atomic_update(struct drm_plane *plane,
drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE);
-ctrl_disable:
- if (!crtc->state->enable)
- gud_usb_set_u8(gdrm, GUD_REQ_SET_CONTROLLER_ENABLE, 0);
-
+out:
drm_dev_exit(idx);
}
diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c
index 07ffee38974b..f4f1b68f7543 100644
--- a/drivers/gpu/drm/i915/display/intel_alpm.c
+++ b/drivers/gpu/drm/i915/display/intel_alpm.c
@@ -43,12 +43,6 @@ bool intel_alpm_is_alpm_aux_less(struct intel_dp *intel_dp,
void intel_alpm_init(struct intel_dp *intel_dp)
{
- u8 dpcd;
-
- if (drm_dp_dpcd_readb(&intel_dp->aux, DP_RECEIVER_ALPM_CAP, &dpcd) < 0)
- return;
-
- intel_dp->alpm_dpcd = dpcd;
mutex_init(&intel_dp->alpm.lock);
}
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 3b8ba8ab76a1..c4246481fc2f 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -1614,7 +1614,6 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta
}
intel_set_transcoder_timings(crtc_state);
- intel_vrr_set_transcoder_timings(crtc_state);
if (cpu_transcoder != TRANSCODER_EDP)
intel_de_write(display, TRANS_MULT(display, cpu_transcoder),
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index 559cf3bb23fd..696edf40b243 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -4577,6 +4577,7 @@ static bool
intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector)
{
struct intel_display *display = to_intel_display(intel_dp);
+ int ret;
/* this function is meant to be called only once */
drm_WARN_ON(display->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
@@ -4616,6 +4617,12 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp, struct intel_connector *connector
*/
intel_dp_init_source_oui(intel_dp);
+ /* Read the ALPM DPCD caps */
+ ret = drm_dp_dpcd_read_byte(&intel_dp->aux, DP_RECEIVER_ALPM_CAP,
+ &intel_dp->alpm_dpcd);
+ if (ret < 0)
+ return false;
+
/*
* This has to be called after intel_dp->edp_dpcd is filled, PSR checks
* for SET_POWER_CAPABLE bit in intel_dp->edp_dpcd[1]
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 4ce1173a2e91..b7302a32ded4 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -2619,6 +2619,12 @@ void intel_psr2_program_trans_man_trk_ctl(struct intel_dsb *dsb,
intel_de_write_dsb(display, dsb, PIPE_SRCSZ_ERLY_TPT(crtc->pipe),
crtc_state->pipe_srcsz_early_tpt);
+
+ if (!crtc_state->dsc.compression_enable)
+ return;
+
+ intel_dsc_su_et_parameters_configure(dsb, encoder, crtc_state,
+ drm_rect_height(&crtc_state->psr2_su_area));
}
static void psr2_man_trk_ctl_calc(struct intel_crtc_state *crtc_state,
@@ -2689,11 +2695,12 @@ static void clip_area_update(struct drm_rect *overlap_damage_area,
overlap_damage_area->y2 = damage_area->y2;
}
-static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
+static bool intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
u16 y_alignment;
+ bool su_area_changed = false;
/* ADLP aligns the SU region to vdsc slice height in case dsc is enabled */
if (crtc_state->dsc.compression_enable &&
@@ -2702,10 +2709,18 @@ static void intel_psr2_sel_fetch_pipe_alignment(struct intel_crtc_state *crtc_st
else
y_alignment = crtc_state->su_y_granularity;
- crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
- if (crtc_state->psr2_su_area.y2 % y_alignment)
+ if (crtc_state->psr2_su_area.y1 % y_alignment) {
+ crtc_state->psr2_su_area.y1 -= crtc_state->psr2_su_area.y1 % y_alignment;
+ su_area_changed = true;
+ }
+
+ if (crtc_state->psr2_su_area.y2 % y_alignment) {
crtc_state->psr2_su_area.y2 = ((crtc_state->psr2_su_area.y2 /
y_alignment) + 1) * y_alignment;
+ su_area_changed = true;
+ }
+
+ return su_area_changed;
}
/*
@@ -2839,7 +2854,7 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
struct intel_crtc_state *crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
struct intel_plane_state *new_plane_state, *old_plane_state;
struct intel_plane *plane;
- bool full_update = false, cursor_in_su_area = false;
+ bool full_update = false, su_area_changed;
int i, ret;
if (!crtc_state->enable_psr2_sel_fetch)
@@ -2946,15 +2961,32 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
if (ret)
return ret;
- /*
- * Adjust su area to cover cursor fully as necessary (early
- * transport). This needs to be done after
- * drm_atomic_add_affected_planes to ensure visible cursor is added into
- * affected planes even when cursor is not updated by itself.
- */
- intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
+ do {
+ bool cursor_in_su_area;
- intel_psr2_sel_fetch_pipe_alignment(crtc_state);
+ /*
+ * Adjust su area to cover cursor fully as necessary
+ * (early transport). This needs to be done after
+ * drm_atomic_add_affected_planes to ensure visible
+ * cursor is added into affected planes even when
+ * cursor is not updated by itself.
+ */
+ intel_psr2_sel_fetch_et_alignment(state, crtc, &cursor_in_su_area);
+
+ su_area_changed = intel_psr2_sel_fetch_pipe_alignment(crtc_state);
+
+ /*
+ * If the cursor was outside the SU area before
+ * alignment, the alignment step (which only expands
+ * SU) may pull the cursor partially inside, so we
+ * must run ET alignment again to fully cover it. But
+ * if the cursor was already fully inside before
+ * alignment, expanding the SU area won't change that,
+ * so no further work is needed.
+ */
+ if (cursor_in_su_area)
+ break;
+ } while (su_area_changed);
/*
* Now that we have the pipe damaged area check if it intersect with
@@ -3014,6 +3046,10 @@ int intel_psr2_sel_fetch_update(struct intel_atomic_state *state,
}
skip_sel_fetch_set_loop:
+ if (full_update)
+ clip_area_update(&crtc_state->psr2_su_area, &crtc_state->pipe_src,
+ &crtc_state->pipe_src);
+
psr2_man_trk_ctl_calc(crtc_state, full_update);
crtc_state->pipe_srcsz_early_tpt =
psr2_pipe_srcsz_early_tpt_calc(crtc_state, full_update);
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 5493082f30a7..2065dac1e3fd 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -767,6 +767,29 @@ void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
sizeof(dp_dsc_pps_sdp));
}
+void intel_dsc_su_et_parameters_configure(struct intel_dsb *dsb, struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state, int su_lines)
+{
+ struct intel_display *display = to_intel_display(crtc_state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ const struct drm_dsc_config *vdsc_cfg = &crtc_state->dsc.config;
+ enum pipe pipe = crtc->pipe;
+ int vdsc_instances_per_pipe = intel_dsc_get_vdsc_per_pipe(crtc_state);
+ int slice_row_per_frame = su_lines / vdsc_cfg->slice_height;
+ u32 val;
+
+ drm_WARN_ON_ONCE(display->drm, su_lines % vdsc_cfg->slice_height);
+ drm_WARN_ON_ONCE(display->drm, vdsc_instances_per_pipe > 2);
+
+ val = DSC_SUPS0_SU_SLICE_ROW_PER_FRAME(slice_row_per_frame);
+ val |= DSC_SUPS0_SU_PIC_HEIGHT(su_lines);
+
+ intel_de_write_dsb(display, dsb, LNL_DSC0_SU_PARAMETER_SET_0(pipe), val);
+
+ if (vdsc_instances_per_pipe == 2)
+ intel_de_write_dsb(display, dsb, LNL_DSC1_SU_PARAMETER_SET_0(pipe), val);
+}
+
static i915_reg_t dss_ctl1_reg(struct intel_crtc *crtc, enum transcoder cpu_transcoder)
{
return is_pipe_dsc(crtc, cpu_transcoder) ?
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.h b/drivers/gpu/drm/i915/display/intel_vdsc.h
index 99f64ac54b27..99bb9042592a 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.h
@@ -13,6 +13,7 @@ struct drm_printer;
enum transcoder;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_dsb;
struct intel_encoder;
bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state);
@@ -31,6 +32,8 @@ void intel_dsc_dsi_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
void intel_dsc_dp_pps_write(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state);
+void intel_dsc_su_et_parameters_configure(struct intel_dsb *dsb, struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state, int su_lines);
void intel_vdsc_state_dump(struct drm_printer *p, int indent,
const struct intel_crtc_state *crtc_state);
int intel_vdsc_min_cdclk(const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
index 2d478a84b07c..2b2e3c1b8138 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
+++ b/drivers/gpu/drm/i915/display/intel_vdsc_regs.h
@@ -196,6 +196,18 @@
#define DSC_PPS18_NSL_BPG_OFFSET(offset) REG_FIELD_PREP(DSC_PPS18_NSL_BPG_OFFSET_MASK, offset)
#define DSC_PPS18_SL_OFFSET_ADJ(offset) REG_FIELD_PREP(DSC_PPS18_SL_OFFSET_ADJ_MASK, offset)
+#define _LNL_DSC0_SU_PARAMETER_SET_0_PA 0x78064
+#define _LNL_DSC1_SU_PARAMETER_SET_0_PA 0x78164
+#define _LNL_DSC0_SU_PARAMETER_SET_0_PB 0x78264
+#define _LNL_DSC1_SU_PARAMETER_SET_0_PB 0x78364
+#define LNL_DSC0_SU_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe), _LNL_DSC0_SU_PARAMETER_SET_0_PA, _LNL_DSC0_SU_PARAMETER_SET_0_PB)
+#define LNL_DSC1_SU_PARAMETER_SET_0(pipe) _MMIO_PIPE((pipe), _LNL_DSC1_SU_PARAMETER_SET_0_PA, _LNL_DSC1_SU_PARAMETER_SET_0_PB)
+
+#define DSC_SUPS0_SU_SLICE_ROW_PER_FRAME_MASK REG_GENMASK(31, 20)
+#define DSC_SUPS0_SU_SLICE_ROW_PER_FRAME(rows) REG_FIELD_PREP(DSC_SUPS0_SU_SLICE_ROW_PER_FRAME_MASK, (rows))
+#define DSC_SUPS0_SU_PIC_HEIGHT_MASK REG_GENMASK(15, 0)
+#define DSC_SUPS0_SU_PIC_HEIGHT(h) REG_FIELD_PREP(DSC_SUPS0_SU_PIC_HEIGHT_MASK, (h))
+
/* Icelake Rate Control Buffer Threshold Registers */
#define DSCA_RC_BUF_THRESH_0 _MMIO(0x6B230)
#define DSCA_RC_BUF_THRESH_0_UDW _MMIO(0x6B230 + 4)
diff --git a/drivers/gpu/drm/i915/display/intel_vrr.c b/drivers/gpu/drm/i915/display/intel_vrr.c
index db74744ddb31..bea005752327 100644
--- a/drivers/gpu/drm/i915/display/intel_vrr.c
+++ b/drivers/gpu/drm/i915/display/intel_vrr.c
@@ -598,6 +598,18 @@ void intel_vrr_set_transcoder_timings(const struct intel_crtc_state *crtc_state)
return;
/*
+ * Bspec says:
+ * "(note: VRR needs to be programmed after
+ * TRANS_DDI_FUNC_CTL and before TRANS_CONF)."
+ *
+ * In practice it turns out that ICL can hang if
+ * TRANS_VRR_VMAX/FLIPLINE are written before
+ * enabling TRANS_DDI_FUNC_CTL.
+ */
+ drm_WARN_ON(display->drm,
+ !(intel_de_read(display, TRANS_DDI_FUNC_CTL(display, cpu_transcoder)) & TRANS_DDI_FUNC_ENABLE));
+
+ /*
* This bit seems to have two meanings depending on the platform:
* TGL: generate VRR "safe window" for DSB vblank waits
* ADL/DG2: make TRANS_SET_CONTEXT_LATENCY effective with VRR
@@ -939,6 +951,8 @@ void intel_vrr_transcoder_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_display *display = to_intel_display(crtc_state);
+ intel_vrr_set_transcoder_timings(crtc_state);
+
if (!intel_vrr_possible(crtc_state))
return;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index c6c64ba29bc4..720a9ad39aa2 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -153,8 +153,12 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
}
} while (1);
- nr_pages = min_t(unsigned long,
- folio_nr_pages(folio), page_count - i);
+ nr_pages = min_array(((unsigned long[]) {
+ folio_nr_pages(folio),
+ page_count - i,
+ max_segment / PAGE_SIZE,
+ }), 3);
+
if (!i ||
sg->length >= max_segment ||
folio_pfn(folio) != next_pfn) {
@@ -164,7 +168,9 @@ int shmem_sg_alloc_table(struct drm_i915_private *i915, struct sg_table *st,
st->nents++;
sg_set_folio(sg, folio, nr_pages * PAGE_SIZE, 0);
} else {
- /* XXX: could overflow? */
+ nr_pages = min_t(unsigned long, nr_pages,
+ (max_segment - sg->length) / PAGE_SIZE);
+
sg->length += nr_pages * PAGE_SIZE;
}
next_pfn = folio_pfn(folio) + nr_pages;
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
index d77b4774d414..e2225c5ba647 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c
@@ -78,7 +78,7 @@ static void a2xx_gpummu_destroy(struct msm_mmu *mmu)
{
struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu);
- dma_free_attrs(mmu->dev, TABLE_SIZE, gpummu->table, gpummu->pt_base,
+ dma_free_attrs(mmu->dev, TABLE_SIZE + 32, gpummu->table, gpummu->pt_base,
DMA_ATTR_FORCE_CONTIGUOUS);
kfree(gpummu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
index 550a53a7865e..38561f26837e 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_catalog.c
@@ -1759,7 +1759,7 @@ static const u32 x285_protect_regs[] = {
A6XX_PROTECT_NORDWR(0x27c06, 0x0000),
};
-DECLARE_ADRENO_PROTECT(x285_protect, 64);
+DECLARE_ADRENO_PROTECT(x285_protect, 15);
static const struct adreno_reglist_pipe a840_nonctxt_regs[] = {
{ REG_A8XX_CP_SMMU_STREAM_ID_LPAC, 0x00000101, BIT(PIPE_NONE) },
@@ -1966,5 +1966,4 @@ static inline __always_unused void __build_asserts(void)
BUILD_BUG_ON(a660_protect.count > a660_protect.count_max);
BUILD_BUG_ON(a690_protect.count > a690_protect.count_max);
BUILD_BUG_ON(a730_protect.count > a730_protect.count_max);
- BUILD_BUG_ON(a840_protect.count > a840_protect.count_max);
}
diff --git a/drivers/gpu/drm/msm/adreno/a8xx_gpu.c b/drivers/gpu/drm/msm/adreno/a8xx_gpu.c
index 5a320f5bde41..b1887e0cf698 100644
--- a/drivers/gpu/drm/msm/adreno/a8xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a8xx_gpu.c
@@ -310,11 +310,21 @@ static void a8xx_set_ubwc_config(struct msm_gpu *gpu)
hbb = cfg->highest_bank_bit - 13;
hbb_hi = hbb >> 2;
hbb_lo = hbb & 3;
- a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_GRAS_NC_MODE_CNTL, hbb << 5);
- a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_GRAS_NC_MODE_CNTL, hbb << 5);
+
+ a8xx_write_pipe(gpu, PIPE_BV, REG_A8XX_GRAS_NC_MODE_CNTL,
+ hbb << 5 |
+ level3_swizzling_dis << 4 |
+ level2_swizzling_dis << 3);
+
+ a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_GRAS_NC_MODE_CNTL,
+ hbb << 5 |
+ level3_swizzling_dis << 4 |
+ level2_swizzling_dis << 3);
a8xx_write_pipe(gpu, PIPE_BR, REG_A8XX_RB_CCU_NC_MODE_CNTL,
yuvnotcomptofc << 6 |
+ level3_swizzling_dis << 5 |
+ level2_swizzling_dis << 4 |
hbb_hi << 3 |
hbb_lo << 1);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 554d746f115b..4edfe80c5be7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -302,6 +302,7 @@ static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,kgsl-3d0" },
{}
};
+MODULE_DEVICE_TABLE(of, dt_match);
static int adreno_runtime_resume(struct device *dev)
{
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
index 303d33dc7783..9f2bceca1789 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_0_sc8280xp.h
@@ -133,7 +133,7 @@ static const struct dpu_sspp_cfg sc8280xp_sspp[] = {
static const struct dpu_lm_cfg sc8280xp_lm[] = {
{
.name = "lm_0", .id = LM_0,
- .base = 0x44000, .len = 0x320,
+ .base = 0x44000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
@@ -141,7 +141,7 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
.dspp = DSPP_0,
}, {
.name = "lm_1", .id = LM_1,
- .base = 0x45000, .len = 0x320,
+ .base = 0x45000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
@@ -149,7 +149,7 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
.dspp = DSPP_1,
}, {
.name = "lm_2", .id = LM_2,
- .base = 0x46000, .len = 0x320,
+ .base = 0x46000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
@@ -157,7 +157,7 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
.dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
- .base = 0x47000, .len = 0x320,
+ .base = 0x47000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
@@ -165,14 +165,14 @@ static const struct dpu_lm_cfg sc8280xp_lm[] = {
.dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
- .base = 0x48000, .len = 0x320,
+ .base = 0x48000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
- .base = 0x49000, .len = 0x320,
+ .base = 0x49000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
index b09a6af4c474..04b22167f93d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_1_sm8450.h
@@ -134,7 +134,7 @@ static const struct dpu_sspp_cfg sm8450_sspp[] = {
static const struct dpu_lm_cfg sm8450_lm[] = {
{
.name = "lm_0", .id = LM_0,
- .base = 0x44000, .len = 0x320,
+ .base = 0x44000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
@@ -142,7 +142,7 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
.dspp = DSPP_0,
}, {
.name = "lm_1", .id = LM_1,
- .base = 0x45000, .len = 0x320,
+ .base = 0x45000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
@@ -150,7 +150,7 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
.dspp = DSPP_1,
}, {
.name = "lm_2", .id = LM_2,
- .base = 0x46000, .len = 0x320,
+ .base = 0x46000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
@@ -158,7 +158,7 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
.dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
- .base = 0x47000, .len = 0x320,
+ .base = 0x47000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
@@ -166,14 +166,14 @@ static const struct dpu_lm_cfg sm8450_lm[] = {
.dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
- .base = 0x48000, .len = 0x320,
+ .base = 0x48000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
- .base = 0x49000, .len = 0x320,
+ .base = 0x49000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
index 0f7b4a224e4c..42cf3bd5a12a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_8_4_sa8775p.h
@@ -366,8 +366,8 @@ static const struct dpu_intf_cfg sa8775p_intf[] = {
.type = INTF_NONE,
.controller_id = MSM_DP_CONTROLLER_0, /* pair with intf_0 for DP MST */
.prog_fetch_lines_worst_case = 24,
- .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
- .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ .intr_underrun = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 16),
+ .intr_vsync = DPU_IRQ_IDX(MDP_SSPP_TOP0_INTR, 17),
}, {
.name = "intf_7", .id = INTF_7,
.base = 0x3b000, .len = 0x280,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
index 465b6460f875..4c7eb55d474c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_0_sm8550.h
@@ -131,7 +131,7 @@ static const struct dpu_sspp_cfg sm8550_sspp[] = {
static const struct dpu_lm_cfg sm8550_lm[] = {
{
.name = "lm_0", .id = LM_0,
- .base = 0x44000, .len = 0x320,
+ .base = 0x44000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
@@ -139,7 +139,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
.dspp = DSPP_0,
}, {
.name = "lm_1", .id = LM_1,
- .base = 0x45000, .len = 0x320,
+ .base = 0x45000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
@@ -147,7 +147,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
.dspp = DSPP_1,
}, {
.name = "lm_2", .id = LM_2,
- .base = 0x46000, .len = 0x320,
+ .base = 0x46000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
@@ -155,7 +155,7 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
.dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
- .base = 0x47000, .len = 0x320,
+ .base = 0x47000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
@@ -163,14 +163,14 @@ static const struct dpu_lm_cfg sm8550_lm[] = {
.dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
- .base = 0x48000, .len = 0x320,
+ .base = 0x48000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
- .base = 0x49000, .len = 0x320,
+ .base = 0x49000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
index 6caa7d40f368..dec83ea8167d 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_1_sar2130p.h
@@ -131,7 +131,7 @@ static const struct dpu_sspp_cfg sar2130p_sspp[] = {
static const struct dpu_lm_cfg sar2130p_lm[] = {
{
.name = "lm_0", .id = LM_0,
- .base = 0x44000, .len = 0x320,
+ .base = 0x44000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
@@ -139,7 +139,7 @@ static const struct dpu_lm_cfg sar2130p_lm[] = {
.dspp = DSPP_0,
}, {
.name = "lm_1", .id = LM_1,
- .base = 0x45000, .len = 0x320,
+ .base = 0x45000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
@@ -147,7 +147,7 @@ static const struct dpu_lm_cfg sar2130p_lm[] = {
.dspp = DSPP_1,
}, {
.name = "lm_2", .id = LM_2,
- .base = 0x46000, .len = 0x320,
+ .base = 0x46000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
@@ -155,7 +155,7 @@ static const struct dpu_lm_cfg sar2130p_lm[] = {
.dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
- .base = 0x47000, .len = 0x320,
+ .base = 0x47000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
@@ -163,14 +163,14 @@ static const struct dpu_lm_cfg sar2130p_lm[] = {
.dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
- .base = 0x48000, .len = 0x320,
+ .base = 0x48000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
- .base = 0x49000, .len = 0x320,
+ .base = 0x49000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
index 7243eebb85f3..52ff4baa668a 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/catalog/dpu_9_2_x1e80100.h
@@ -130,7 +130,7 @@ static const struct dpu_sspp_cfg x1e80100_sspp[] = {
static const struct dpu_lm_cfg x1e80100_lm[] = {
{
.name = "lm_0", .id = LM_0,
- .base = 0x44000, .len = 0x320,
+ .base = 0x44000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_1,
@@ -138,7 +138,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
.dspp = DSPP_0,
}, {
.name = "lm_1", .id = LM_1,
- .base = 0x45000, .len = 0x320,
+ .base = 0x45000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_0,
@@ -146,7 +146,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
.dspp = DSPP_1,
}, {
.name = "lm_2", .id = LM_2,
- .base = 0x46000, .len = 0x320,
+ .base = 0x46000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_3,
@@ -154,7 +154,7 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
.dspp = DSPP_2,
}, {
.name = "lm_3", .id = LM_3,
- .base = 0x47000, .len = 0x320,
+ .base = 0x47000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_2,
@@ -162,14 +162,14 @@ static const struct dpu_lm_cfg x1e80100_lm[] = {
.dspp = DSPP_3,
}, {
.name = "lm_4", .id = LM_4,
- .base = 0x48000, .len = 0x320,
+ .base = 0x48000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_5,
.pingpong = PINGPONG_4,
}, {
.name = "lm_5", .id = LM_5,
- .base = 0x49000, .len = 0x320,
+ .base = 0x49000, .len = 0x400,
.features = MIXER_MSM8998_MASK,
.sblk = &sdm845_lm_sblk,
.lm_pair = LM_4,
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
index 188ee0af2c90..23dcbe1ce1b8 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_dspp.c
@@ -89,7 +89,7 @@ static void dpu_setup_dspp_gc(struct dpu_hw_dspp *ctx,
base = ctx->cap->sblk->gc.base;
if (!base) {
- DRM_ERROR("invalid ctx %pK gc base\n", ctx);
+ DRM_ERROR("invalid ctx %p gc base\n", ctx);
return;
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp_v13.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp_v13.c
index e65f1fc026fd..f8f96ad971d7 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp_v13.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp_v13.c
@@ -156,11 +156,13 @@ static void dpu_hw_sspp_setup_pe_config_v13(struct dpu_hw_sspp *ctx,
u8 color;
u32 lr_pe[4], tb_pe[4];
const u32 bytemask = 0xff;
- u32 offset = ctx->cap->sblk->sspp_rec0_blk.base;
+ u32 offset;
if (!ctx || !pe_ext)
return;
+ offset = ctx->cap->sblk->sspp_rec0_blk.base;
+
c = &ctx->hw;
/* program SW pixel extension override for all pipes*/
for (color = 0; color < DPU_MAX_PLANES; color++) {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
index 451a4fcf3e65..7e77d88f8959 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -350,26 +350,28 @@ static bool _dpu_rm_check_lm_and_get_connected_blks(struct dpu_rm *rm,
return true;
}
-static bool dpu_rm_find_lms(struct dpu_rm *rm,
- struct dpu_global_state *global_state,
- uint32_t crtc_id, bool skip_dspp,
- struct msm_display_topology *topology,
- int *lm_idx, int *pp_idx, int *dspp_idx)
+static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
+ struct dpu_global_state *global_state,
+ uint32_t crtc_id,
+ struct msm_display_topology *topology)
{
+ int lm_idx[MAX_BLOCKS];
+ int pp_idx[MAX_BLOCKS];
+ int dspp_idx[MAX_BLOCKS] = {0};
int i, lm_count = 0;
+ if (!topology->num_lm) {
+ DPU_ERROR("zero LMs in topology\n");
+ return -EINVAL;
+ }
+
/* Find a primary mixer */
for (i = 0; i < ARRAY_SIZE(rm->mixer_blks) &&
lm_count < topology->num_lm; i++) {
if (!rm->mixer_blks[i])
continue;
- if (skip_dspp && to_dpu_hw_mixer(rm->mixer_blks[i])->cap->dspp) {
- DPU_DEBUG("Skipping LM_%d, skipping LMs with DSPPs\n", i);
- continue;
- }
-
/*
* Reset lm_count to an even index. This will drop the previous
* primary mixer if failed to find its peer.
@@ -408,38 +410,12 @@ static bool dpu_rm_find_lms(struct dpu_rm *rm,
}
}
- return lm_count == topology->num_lm;
-}
-
-static int _dpu_rm_reserve_lms(struct dpu_rm *rm,
- struct dpu_global_state *global_state,
- uint32_t crtc_id,
- struct msm_display_topology *topology)
-
-{
- int lm_idx[MAX_BLOCKS];
- int pp_idx[MAX_BLOCKS];
- int dspp_idx[MAX_BLOCKS] = {0};
- int i;
- bool found;
-
- if (!topology->num_lm) {
- DPU_ERROR("zero LMs in topology\n");
- return -EINVAL;
- }
-
- /* Try using non-DSPP LM blocks first */
- found = dpu_rm_find_lms(rm, global_state, crtc_id, !topology->num_dspp,
- topology, lm_idx, pp_idx, dspp_idx);
- if (!found && !topology->num_dspp)
- found = dpu_rm_find_lms(rm, global_state, crtc_id, false,
- topology, lm_idx, pp_idx, dspp_idx);
- if (!found) {
+ if (lm_count != topology->num_lm) {
DPU_DEBUG("unable to find appropriate mixers\n");
return -ENAVAIL;
}
- for (i = 0; i < topology->num_lm; i++) {
+ for (i = 0; i < lm_count; i++) {
global_state->mixer_to_crtc_id[lm_idx[i]] = crtc_id;
global_state->pingpong_to_crtc_id[pp_idx[i]] = crtc_id;
global_state->dspp_to_crtc_id[dspp_idx[i]] =
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index e0de545d4077..db6da99375a1 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -584,13 +584,30 @@ void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
* FIXME: Reconsider this if/when CMD mode handling is rewritten to use
* transfer time and data overhead as a starting point of the calculations.
*/
-static unsigned long dsi_adjust_pclk_for_compression(const struct drm_display_mode *mode,
- const struct drm_dsc_config *dsc)
+static unsigned long
+dsi_adjust_pclk_for_compression(const struct drm_display_mode *mode,
+ const struct drm_dsc_config *dsc,
+ bool is_bonded_dsi)
{
- int new_hdisplay = DIV_ROUND_UP(mode->hdisplay * drm_dsc_get_bpp_int(dsc),
- dsc->bits_per_component * 3);
+ int hdisplay, new_hdisplay, new_htotal;
- int new_htotal = mode->htotal - mode->hdisplay + new_hdisplay;
+ /*
+ * For bonded DSI, split hdisplay across two links and round up each
+ * half separately, passing the full hdisplay would only round up once.
+ * This also aligns with the hdisplay we program later in
+ * dsi_timing_setup()
+ */
+ hdisplay = mode->hdisplay;
+ if (is_bonded_dsi)
+ hdisplay /= 2;
+
+ new_hdisplay = DIV_ROUND_UP(hdisplay * drm_dsc_get_bpp_int(dsc),
+ dsc->bits_per_component * 3);
+
+ if (is_bonded_dsi)
+ new_hdisplay *= 2;
+
+ new_htotal = mode->htotal - mode->hdisplay + new_hdisplay;
return mult_frac(mode->clock * 1000u, new_htotal, mode->htotal);
}
@@ -603,7 +620,7 @@ static unsigned long dsi_get_pclk_rate(const struct drm_display_mode *mode,
pclk_rate = mode->clock * 1000u;
if (dsc)
- pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc);
+ pclk_rate = dsi_adjust_pclk_for_compression(mode, dsc, is_bonded_dsi);
/*
* For bonded DSI mode, the current DRM mode has the complete width of the
@@ -993,7 +1010,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
if (msm_host->dsc) {
struct drm_dsc_config *dsc = msm_host->dsc;
- u32 bytes_per_pclk;
+ u32 bits_per_pclk;
/* update dsc params with timing params */
if (!dsc || !mode->hdisplay || !mode->vdisplay) {
@@ -1015,7 +1032,9 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
/*
* DPU sends 3 bytes per pclk cycle to DSI. If widebus is
- * enabled, bus width is extended to 6 bytes.
+ * enabled, MDP always sends out 48-bit compressed data per
+ * pclk and on average, DSI consumes an amount of compressed
+ * data equivalent to the uncompressed pixel depth per pclk.
*
* Calculate the number of pclks needed to transmit one line of
* the compressed data.
@@ -1027,12 +1046,12 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_bonded_dsi)
* unused anyway.
*/
h_total -= hdisplay;
- if (wide_bus_enabled && !(msm_host->mode_flags & MIPI_DSI_MODE_VIDEO))
- bytes_per_pclk = 6;
+ if (wide_bus_enabled)
+ bits_per_pclk = mipi_dsi_pixel_format_to_bpp(msm_host->format);
else
- bytes_per_pclk = 3;
+ bits_per_pclk = 24;
- hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc), bytes_per_pclk);
+ hdisplay = DIV_ROUND_UP(msm_dsc_get_bytes_per_line(msm_host->dsc) * 8, bits_per_pclk);
h_total += hdisplay;
ha_end = ha_start + hdisplay;
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
index 8cb0db3a9880..01182442dfd6 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy_7nm.c
@@ -51,8 +51,8 @@
#define DSI_PHY_7NM_QUIRK_V4_3 BIT(3)
/* Hardware is V5.2 */
#define DSI_PHY_7NM_QUIRK_V5_2 BIT(4)
-/* Hardware is V7.0 */
-#define DSI_PHY_7NM_QUIRK_V7_0 BIT(5)
+/* Hardware is V7.2 */
+#define DSI_PHY_7NM_QUIRK_V7_2 BIT(5)
struct dsi_pll_config {
bool enable_ssc;
@@ -143,7 +143,7 @@ static void dsi_pll_calc_dec_frac(struct dsi_pll_7nm *pll, struct dsi_pll_config
if (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_PRE_V4_1) {
config->pll_clock_inverters = 0x28;
- } else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
+ } else if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) {
if (pll_freq < 163000000ULL)
config->pll_clock_inverters = 0xa0;
else if (pll_freq < 175000000ULL)
@@ -284,7 +284,7 @@ static void dsi_pll_config_hzindep_reg(struct dsi_pll_7nm *pll)
}
if ((pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
- (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
+ (pll->phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) {
if (pll->vco_current_rate < 1557000000ULL)
vco_config_1 = 0x08;
else
@@ -699,7 +699,7 @@ static int dsi_7nm_set_usecase(struct msm_dsi_phy *phy)
case MSM_DSI_PHY_MASTER:
pll_7nm->slave = pll_7nm_list[(pll_7nm->phy->id + 1) % DSI_MAX];
/* v7.0: Enable ATB_EN0 and alternate clock output to external phy */
- if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)
+ if (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)
writel(0x07, base + REG_DSI_7nm_PHY_CMN_CTRL_5);
break;
case MSM_DSI_PHY_SLAVE:
@@ -987,7 +987,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* Request for REFGEN READY */
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) ||
(phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
- (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) {
writel(0x1, phy->base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10);
udelay(500);
}
@@ -1021,7 +1021,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
lane_ctrl0 = 0x1f;
}
- if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
+ if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) {
if (phy->cphy_mode) {
/* TODO: different for second phy */
vreg_ctrl_0 = 0x57;
@@ -1097,7 +1097,7 @@ static int dsi_7nm_phy_enable(struct msm_dsi_phy *phy,
/* program CMN_CTRL_4 for minor_ver 2 chipsets*/
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
- (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0) ||
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2) ||
(readl(base + REG_DSI_7nm_PHY_CMN_REVISION_ID0) & (0xf0)) == 0x20)
writel(0x04, base + REG_DSI_7nm_PHY_CMN_CTRL_4);
@@ -1213,7 +1213,7 @@ static void dsi_7nm_phy_disable(struct msm_dsi_phy *phy)
/* Turn off REFGEN Vote */
if ((phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V4_3) ||
(phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V5_2) ||
- (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_0)) {
+ (phy->cfg->quirks & DSI_PHY_7NM_QUIRK_V7_2)) {
writel(0x0, base + REG_DSI_7nm_PHY_CMN_GLBL_DIGTOP_SPARE10);
wmb();
/* Delay to ensure HW removes vote before PHY shut down */
@@ -1502,7 +1502,7 @@ const struct msm_dsi_phy_cfg dsi_phy_3nm_8750_cfgs = {
#endif
.io_start = { 0xae95000, 0xae97000 },
.num_dsi_phy = 2,
- .quirks = DSI_PHY_7NM_QUIRK_V7_0,
+ .quirks = DSI_PHY_7NM_QUIRK_V7_2,
};
const struct msm_dsi_phy_cfg dsi_phy_3nm_kaanapali_cfgs = {
@@ -1525,5 +1525,5 @@ const struct msm_dsi_phy_cfg dsi_phy_3nm_kaanapali_cfgs = {
#endif
.io_start = { 0x9ac1000, 0x9ac4000 },
.num_dsi_phy = 2,
- .quirks = DSI_PHY_7NM_QUIRK_V7_0,
+ .quirks = DSI_PHY_7NM_QUIRK_V7_2,
};
diff --git a/drivers/gpu/drm/sitronix/st7586.c b/drivers/gpu/drm/sitronix/st7586.c
index b57ebf37a664..16b6b4e368af 100644
--- a/drivers/gpu/drm/sitronix/st7586.c
+++ b/drivers/gpu/drm/sitronix/st7586.c
@@ -347,6 +347,12 @@ static int st7586_probe(struct spi_device *spi)
if (ret)
return ret;
+ /*
+ * Override value set by mipi_dbi_spi_init(). This driver is a bit
+ * non-standard, so best to set it explicitly here.
+ */
+ dbi->write_memory_bpw = 8;
+
/* Cannot read from this controller via SPI */
dbi->read_commands = NULL;
@@ -356,15 +362,6 @@ static int st7586_probe(struct spi_device *spi)
if (ret)
return ret;
- /*
- * we are using 8-bit data, so we are not actually swapping anything,
- * but setting mipi->swap_bytes makes mipi_dbi_typec3_command() do the
- * right thing and not use 16-bit transfers (which results in swapped
- * bytes on little-endian systems and causes out of order data to be
- * sent to the display).
- */
- dbi->swap_bytes = true;
-
drm_mode_config_reset(drm);
ret = drm_dev_register(drm, 0);
diff --git a/drivers/gpu/nova-core/gsp.rs b/drivers/gpu/nova-core/gsp.rs
index 174feaca0a6b..c69adaa92bbe 100644
--- a/drivers/gpu/nova-core/gsp.rs
+++ b/drivers/gpu/nova-core/gsp.rs
@@ -47,16 +47,12 @@ struct PteArray<const NUM_ENTRIES: usize>([u64; NUM_ENTRIES]);
unsafe impl<const NUM_ENTRIES: usize> AsBytes for PteArray<NUM_ENTRIES> {}
impl<const NUM_PAGES: usize> PteArray<NUM_PAGES> {
- /// Creates a new page table array mapping `NUM_PAGES` GSP pages starting at address `start`.
- fn new(start: DmaAddress) -> Result<Self> {
- let mut ptes = [0u64; NUM_PAGES];
- for (i, pte) in ptes.iter_mut().enumerate() {
- *pte = start
- .checked_add(num::usize_as_u64(i) << GSP_PAGE_SHIFT)
- .ok_or(EOVERFLOW)?;
- }
-
- Ok(Self(ptes))
+ /// Returns the page table entry for `index`, for a mapping starting at `start`.
+ // TODO: Replace with `IoView` projection once available.
+ fn entry(start: DmaAddress, index: usize) -> Result<u64> {
+ start
+ .checked_add(num::usize_as_u64(index) << GSP_PAGE_SHIFT)
+ .ok_or(EOVERFLOW)
}
}
@@ -86,16 +82,22 @@ impl LogBuffer {
NUM_PAGES * GSP_PAGE_SIZE,
GFP_KERNEL | __GFP_ZERO,
)?);
- let ptes = PteArray::<NUM_PAGES>::new(obj.0.dma_handle())?;
+
+ let start_addr = obj.0.dma_handle();
// SAFETY: `obj` has just been created and we are its sole user.
- unsafe {
- // Copy the self-mapping PTE at the expected location.
+ let pte_region = unsafe {
obj.0
- .as_slice_mut(size_of::<u64>(), size_of_val(&ptes))?
- .copy_from_slice(ptes.as_bytes())
+ .as_slice_mut(size_of::<u64>(), NUM_PAGES * size_of::<u64>())?
};
+ // Write values one by one to avoid an on-stack instance of `PteArray`.
+ for (i, chunk) in pte_region.chunks_exact_mut(size_of::<u64>()).enumerate() {
+ let pte_value = PteArray::<0>::entry(start_addr, i)?;
+
+ chunk.copy_from_slice(&pte_value.to_ne_bytes());
+ }
+
Ok(obj)
}
}
@@ -143,14 +145,14 @@ impl Gsp {
// _kgspInitLibosLoggingStructures (allocates memory for buffers)
// kgspSetupLibosInitArgs_IMPL (creates pLibosInitArgs[] array)
dma_write!(
- libos[0] = LibosMemoryRegionInitArgument::new("LOGINIT", &loginit.0)
- )?;
+ libos, [0]?, LibosMemoryRegionInitArgument::new("LOGINIT", &loginit.0)
+ );
dma_write!(
- libos[1] = LibosMemoryRegionInitArgument::new("LOGINTR", &logintr.0)
- )?;
- dma_write!(libos[2] = LibosMemoryRegionInitArgument::new("LOGRM", &logrm.0))?;
- dma_write!(rmargs[0].inner = fw::GspArgumentsCached::new(cmdq))?;
- dma_write!(libos[3] = LibosMemoryRegionInitArgument::new("RMARGS", rmargs))?;
+ libos, [1]?, LibosMemoryRegionInitArgument::new("LOGINTR", &logintr.0)
+ );
+ dma_write!(libos, [2]?, LibosMemoryRegionInitArgument::new("LOGRM", &logrm.0));
+ dma_write!(rmargs, [0]?.inner, fw::GspArgumentsCached::new(cmdq));
+ dma_write!(libos, [3]?, LibosMemoryRegionInitArgument::new("RMARGS", rmargs));
},
}))
})
diff --git a/drivers/gpu/nova-core/gsp/boot.rs b/drivers/gpu/nova-core/gsp/boot.rs
index be427fe26a58..94833f7996e8 100644
--- a/drivers/gpu/nova-core/gsp/boot.rs
+++ b/drivers/gpu/nova-core/gsp/boot.rs
@@ -157,7 +157,7 @@ impl super::Gsp {
let wpr_meta =
CoherentAllocation::<GspFwWprMeta>::alloc_coherent(dev, 1, GFP_KERNEL | __GFP_ZERO)?;
- dma_write!(wpr_meta[0] = GspFwWprMeta::new(&gsp_fw, &fb_layout))?;
+ dma_write!(wpr_meta, [0]?, GspFwWprMeta::new(&gsp_fw, &fb_layout));
self.cmdq
.send_command(bar, commands::SetSystemInfo::new(pdev))?;
diff --git a/drivers/gpu/nova-core/gsp/cmdq.rs b/drivers/gpu/nova-core/gsp/cmdq.rs
index 46819a82a51a..03a4f3599849 100644
--- a/drivers/gpu/nova-core/gsp/cmdq.rs
+++ b/drivers/gpu/nova-core/gsp/cmdq.rs
@@ -2,11 +2,7 @@
use core::{
cmp,
- mem,
- sync::atomic::{
- fence,
- Ordering, //
- }, //
+ mem, //
};
use kernel::{
@@ -146,30 +142,36 @@ static_assert!(align_of::<MsgqData>() == GSP_PAGE_SIZE);
#[repr(C)]
// There is no struct defined for this in the open-gpu-kernel-source headers.
// Instead it is defined by code in `GspMsgQueuesInit()`.
-struct Msgq {
+// TODO: Revert to private once `IoView` projections replace the `gsp_mem` module.
+pub(super) struct Msgq {
/// Header for sending messages, including the write pointer.
- tx: MsgqTxHeader,
+ pub(super) tx: MsgqTxHeader,
/// Header for receiving messages, including the read pointer.
- rx: MsgqRxHeader,
+ pub(super) rx: MsgqRxHeader,
/// The message queue proper.
msgq: MsgqData,
}
/// Structure shared between the driver and the GSP and containing the command and message queues.
#[repr(C)]
-struct GspMem {
+// TODO: Revert to private once `IoView` projections replace the `gsp_mem` module.
+pub(super) struct GspMem {
/// Self-mapping page table entries.
- ptes: PteArray<{ GSP_PAGE_SIZE / size_of::<u64>() }>,
+ ptes: PteArray<{ Self::PTE_ARRAY_SIZE }>,
/// CPU queue: the driver writes commands here, and the GSP reads them. It also contains the
/// write and read pointers that the CPU updates.
///
/// This member is read-only for the GSP.
- cpuq: Msgq,
+ pub(super) cpuq: Msgq,
/// GSP queue: the GSP writes messages here, and the driver reads them. It also contains the
/// write and read pointers that the GSP updates.
///
/// This member is read-only for the driver.
- gspq: Msgq,
+ pub(super) gspq: Msgq,
+}
+
+impl GspMem {
+ const PTE_ARRAY_SIZE: usize = GSP_PAGE_SIZE / size_of::<u64>();
}
// SAFETY: These structs don't meet the no-padding requirements of AsBytes but
@@ -201,9 +203,19 @@ impl DmaGspMem {
let gsp_mem =
CoherentAllocation::<GspMem>::alloc_coherent(dev, 1, GFP_KERNEL | __GFP_ZERO)?;
- dma_write!(gsp_mem[0].ptes = PteArray::new(gsp_mem.dma_handle())?)?;
- dma_write!(gsp_mem[0].cpuq.tx = MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES))?;
- dma_write!(gsp_mem[0].cpuq.rx = MsgqRxHeader::new())?;
+
+ let start = gsp_mem.dma_handle();
+ // Write values one by one to avoid an on-stack instance of `PteArray`.
+ for i in 0..GspMem::PTE_ARRAY_SIZE {
+ dma_write!(gsp_mem, [0]?.ptes.0[i], PteArray::<0>::entry(start, i)?);
+ }
+
+ dma_write!(
+ gsp_mem,
+ [0]?.cpuq.tx,
+ MsgqTxHeader::new(MSGQ_SIZE, RX_HDR_OFF, MSGQ_NUM_PAGES)
+ );
+ dma_write!(gsp_mem, [0]?.cpuq.rx, MsgqRxHeader::new());
Ok(Self(gsp_mem))
}
@@ -317,12 +329,7 @@ impl DmaGspMem {
//
// - The returned value is between `0` and `MSGQ_NUM_PAGES`.
fn gsp_write_ptr(&self) -> u32 {
- let gsp_mem = self.0.start_ptr();
-
- // SAFETY:
- // - The 'CoherentAllocation' contains at least one object.
- // - By the invariants of `CoherentAllocation` the pointer is valid.
- (unsafe { (*gsp_mem).gspq.tx.write_ptr() } % MSGQ_NUM_PAGES)
+ super::fw::gsp_mem::gsp_write_ptr(&self.0)
}
// Returns the index of the memory page the GSP will read the next command from.
@@ -331,12 +338,7 @@ impl DmaGspMem {
//
// - The returned value is between `0` and `MSGQ_NUM_PAGES`.
fn gsp_read_ptr(&self) -> u32 {
- let gsp_mem = self.0.start_ptr();
-
- // SAFETY:
- // - The 'CoherentAllocation' contains at least one object.
- // - By the invariants of `CoherentAllocation` the pointer is valid.
- (unsafe { (*gsp_mem).gspq.rx.read_ptr() } % MSGQ_NUM_PAGES)
+ super::fw::gsp_mem::gsp_read_ptr(&self.0)
}
// Returns the index of the memory page the CPU can read the next message from.
@@ -345,27 +347,12 @@ impl DmaGspMem {
//
// - The returned value is between `0` and `MSGQ_NUM_PAGES`.
fn cpu_read_ptr(&self) -> u32 {
- let gsp_mem = self.0.start_ptr();
-
- // SAFETY:
- // - The ['CoherentAllocation'] contains at least one object.
- // - By the invariants of CoherentAllocation the pointer is valid.
- (unsafe { (*gsp_mem).cpuq.rx.read_ptr() } % MSGQ_NUM_PAGES)
+ super::fw::gsp_mem::cpu_read_ptr(&self.0)
}
// Informs the GSP that it can send `elem_count` new pages into the message queue.
fn advance_cpu_read_ptr(&mut self, elem_count: u32) {
- let rptr = self.cpu_read_ptr().wrapping_add(elem_count) % MSGQ_NUM_PAGES;
-
- // Ensure read pointer is properly ordered.
- fence(Ordering::SeqCst);
-
- let gsp_mem = self.0.start_ptr_mut();
-
- // SAFETY:
- // - The 'CoherentAllocation' contains at least one object.
- // - By the invariants of `CoherentAllocation` the pointer is valid.
- unsafe { (*gsp_mem).cpuq.rx.set_read_ptr(rptr) };
+ super::fw::gsp_mem::advance_cpu_read_ptr(&self.0, elem_count)
}
// Returns the index of the memory page the CPU can write the next command to.
@@ -374,26 +361,12 @@ impl DmaGspMem {
//
// - The returned value is between `0` and `MSGQ_NUM_PAGES`.
fn cpu_write_ptr(&self) -> u32 {
- let gsp_mem = self.0.start_ptr();
-
- // SAFETY:
- // - The 'CoherentAllocation' contains at least one object.
- // - By the invariants of `CoherentAllocation` the pointer is valid.
- (unsafe { (*gsp_mem).cpuq.tx.write_ptr() } % MSGQ_NUM_PAGES)
+ super::fw::gsp_mem::cpu_write_ptr(&self.0)
}
// Informs the GSP that it can process `elem_count` new pages from the command queue.
fn advance_cpu_write_ptr(&mut self, elem_count: u32) {
- let wptr = self.cpu_write_ptr().wrapping_add(elem_count) & MSGQ_NUM_PAGES;
- let gsp_mem = self.0.start_ptr_mut();
-
- // SAFETY:
- // - The 'CoherentAllocation' contains at least one object.
- // - By the invariants of `CoherentAllocation` the pointer is valid.
- unsafe { (*gsp_mem).cpuq.tx.set_write_ptr(wptr) };
-
- // Ensure all command data is visible before triggering the GSP read.
- fence(Ordering::SeqCst);
+ super::fw::gsp_mem::advance_cpu_write_ptr(&self.0, elem_count)
}
}
diff --git a/drivers/gpu/nova-core/gsp/fw.rs b/drivers/gpu/nova-core/gsp/fw.rs
index 83ff91614e36..040b30ec3089 100644
--- a/drivers/gpu/nova-core/gsp/fw.rs
+++ b/drivers/gpu/nova-core/gsp/fw.rs
@@ -40,6 +40,75 @@ use crate::{
},
};
+// TODO: Replace with `IoView` projections once available; the `unwrap()` calls go away once we
+// switch to the new `dma::Coherent` API.
+pub(super) mod gsp_mem {
+ use core::sync::atomic::{
+ fence,
+ Ordering, //
+ };
+
+ use kernel::{
+ dma::CoherentAllocation,
+ dma_read,
+ dma_write,
+ prelude::*, //
+ };
+
+ use crate::gsp::cmdq::{
+ GspMem,
+ MSGQ_NUM_PAGES, //
+ };
+
+ pub(in crate::gsp) fn gsp_write_ptr(qs: &CoherentAllocation<GspMem>) -> u32 {
+ // PANIC: A `dma::CoherentAllocation` always contains at least one element.
+ || -> Result<u32> { Ok(dma_read!(qs, [0]?.gspq.tx.0.writePtr) % MSGQ_NUM_PAGES) }().unwrap()
+ }
+
+ pub(in crate::gsp) fn gsp_read_ptr(qs: &CoherentAllocation<GspMem>) -> u32 {
+ // PANIC: A `dma::CoherentAllocation` always contains at least one element.
+ || -> Result<u32> { Ok(dma_read!(qs, [0]?.gspq.rx.0.readPtr) % MSGQ_NUM_PAGES) }().unwrap()
+ }
+
+ pub(in crate::gsp) fn cpu_read_ptr(qs: &CoherentAllocation<GspMem>) -> u32 {
+ // PANIC: A `dma::CoherentAllocation` always contains at least one element.
+ || -> Result<u32> { Ok(dma_read!(qs, [0]?.cpuq.rx.0.readPtr) % MSGQ_NUM_PAGES) }().unwrap()
+ }
+
+ pub(in crate::gsp) fn advance_cpu_read_ptr(qs: &CoherentAllocation<GspMem>, count: u32) {
+ let rptr = cpu_read_ptr(qs).wrapping_add(count) % MSGQ_NUM_PAGES;
+
+ // Ensure read pointer is properly ordered.
+ fence(Ordering::SeqCst);
+
+ // PANIC: A `dma::CoherentAllocation` always contains at least one element.
+ || -> Result {
+ dma_write!(qs, [0]?.cpuq.rx.0.readPtr, rptr);
+ Ok(())
+ }()
+ .unwrap()
+ }
+
+ pub(in crate::gsp) fn cpu_write_ptr(qs: &CoherentAllocation<GspMem>) -> u32 {
+ // PANIC: A `dma::CoherentAllocation` always contains at least one element.
+ || -> Result<u32> { Ok(dma_read!(qs, [0]?.cpuq.tx.0.writePtr) % MSGQ_NUM_PAGES) }().unwrap()
+ }
+
+ pub(in crate::gsp) fn advance_cpu_write_ptr(qs: &CoherentAllocation<GspMem>, count: u32) {
+ let wptr = cpu_write_ptr(qs).wrapping_add(count) % MSGQ_NUM_PAGES;
+
+ // PANIC: A `dma::CoherentAllocation` always contains at least one element.
+ || -> Result {
+ dma_write!(qs, [0]?.cpuq.tx.0.writePtr, wptr);
+ Ok(())
+ }()
+ .unwrap();
+
+ // Ensure all command data is visible before triggering the GSP read.
+ fence(Ordering::SeqCst);
+ }
+}
+
/// Empty type to group methods related to heap parameters for running the GSP firmware.
enum GspFwHeapParams {}
@@ -708,22 +777,6 @@ impl MsgqTxHeader {
entryOff: num::usize_into_u32::<GSP_PAGE_SIZE>(),
})
}
-
- /// Returns the value of the write pointer for this queue.
- pub(crate) fn write_ptr(&self) -> u32 {
- let ptr = core::ptr::from_ref(&self.0.writePtr);
-
- // SAFETY: `ptr` is a valid pointer to a `u32`.
- unsafe { ptr.read_volatile() }
- }
-
- /// Sets the value of the write pointer for this queue.
- pub(crate) fn set_write_ptr(&mut self, val: u32) {
- let ptr = core::ptr::from_mut(&mut self.0.writePtr);
-
- // SAFETY: `ptr` is a valid pointer to a `u32`.
- unsafe { ptr.write_volatile(val) }
- }
}
// SAFETY: Padding is explicit and does not contain uninitialized data.
@@ -739,22 +792,6 @@ impl MsgqRxHeader {
pub(crate) fn new() -> Self {
Self(Default::default())
}
-
- /// Returns the value of the read pointer for this queue.
- pub(crate) fn read_ptr(&self) -> u32 {
- let ptr = core::ptr::from_ref(&self.0.readPtr);
-
- // SAFETY: `ptr` is a valid pointer to a `u32`.
- unsafe { ptr.read_volatile() }
- }
-
- /// Sets the value of the read pointer for this queue.
- pub(crate) fn set_read_ptr(&mut self, val: u32) {
- let ptr = core::ptr::from_mut(&mut self.0.readPtr);
-
- // SAFETY: `ptr` is a valid pointer to a `u32`.
- unsafe { ptr.write_volatile(val) }
- }
}
// SAFETY: Padding is explicit and does not contain uninitialized data.
diff --git a/drivers/hid/bpf/hid_bpf_dispatch.c b/drivers/hid/bpf/hid_bpf_dispatch.c
index f3d15994ca1e..50c7b45c59e3 100644
--- a/drivers/hid/bpf/hid_bpf_dispatch.c
+++ b/drivers/hid/bpf/hid_bpf_dispatch.c
@@ -444,6 +444,8 @@ hid_bpf_hw_request(struct hid_bpf_ctx *ctx, __u8 *buf, size_t buf__sz,
(u64)(long)ctx,
true); /* prevent infinite recursions */
+ if (ret > size)
+ ret = size;
if (ret > 0)
memcpy(buf, dma_data, ret);
diff --git a/drivers/hid/hid-appletb-kbd.c b/drivers/hid/hid-appletb-kbd.c
index a1db3b3d0667..0fdc0968b9ef 100644
--- a/drivers/hid/hid-appletb-kbd.c
+++ b/drivers/hid/hid-appletb-kbd.c
@@ -476,7 +476,7 @@ static int appletb_kbd_suspend(struct hid_device *hdev, pm_message_t msg)
return 0;
}
-static int appletb_kbd_reset_resume(struct hid_device *hdev)
+static int appletb_kbd_resume(struct hid_device *hdev)
{
struct appletb_kbd *kbd = hid_get_drvdata(hdev);
@@ -500,7 +500,8 @@ static struct hid_driver appletb_kbd_hid_driver = {
.event = appletb_kbd_hid_event,
.input_configured = appletb_kbd_input_configured,
.suspend = pm_ptr(appletb_kbd_suspend),
- .reset_resume = pm_ptr(appletb_kbd_reset_resume),
+ .resume = pm_ptr(appletb_kbd_resume),
+ .reset_resume = pm_ptr(appletb_kbd_resume),
.driver.dev_groups = appletb_kbd_groups,
};
module_hid_driver(appletb_kbd_hid_driver);
diff --git a/drivers/hid/hid-asus.c b/drivers/hid/hid-asus.c
index 687b785e2d0c..bc93b27f9b13 100644
--- a/drivers/hid/hid-asus.c
+++ b/drivers/hid/hid-asus.c
@@ -1498,6 +1498,9 @@ static const struct hid_device_id asus_devices[] = {
USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X),
QUIRK_USE_KBD_BACKLIGHT | QUIRK_ROG_NKEY_KEYBOARD | QUIRK_ROG_ALLY_XPAD },
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
+ USB_DEVICE_ID_ASUSTEK_XGM_2022),
+ },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
USB_DEVICE_ID_ASUSTEK_XGM_2023),
},
{ HID_USB_DEVICE(USB_VENDOR_ID_ASUSTEK,
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 840a60113868..833df14ef68f 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2057,9 +2057,10 @@ int hid_report_raw_event(struct hid_device *hid, enum hid_report_type type, u8 *
rsize = max_buffer_size;
if (csize < rsize) {
- dbg_hid("report %d is too short, (%d < %d)\n", report->id,
- csize, rsize);
- memset(cdata + csize, 0, rsize - csize);
+ hid_warn_ratelimited(hid, "Event data for report %d was too short (%d vs %d)\n",
+ report->id, rsize, csize);
+ ret = -EINVAL;
+ goto out;
}
if ((hid->claimed & HID_CLAIMED_HIDDEV) && hid->hiddev_report_event)
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 4ab7640b119a..afcee13bad61 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -229,6 +229,7 @@
#define USB_DEVICE_ID_ASUSTEK_ROG_NKEY_ALLY_X 0x1b4c
#define USB_DEVICE_ID_ASUSTEK_ROG_CLAYMORE_II_KEYBOARD 0x196b
#define USB_DEVICE_ID_ASUSTEK_FX503VD_KEYBOARD 0x1869
+#define USB_DEVICE_ID_ASUSTEK_XGM_2022 0x1970
#define USB_DEVICE_ID_ASUSTEK_XGM_2023 0x1a9a
#define USB_VENDOR_ID_ATEN 0x0557
@@ -454,8 +455,6 @@
#define USB_DEVICE_ID_TOSHIBA_CLICK_L9W 0x0401
#define USB_DEVICE_ID_HP_X2 0x074d
#define USB_DEVICE_ID_HP_X2_10_COVER 0x0755
-#define USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN 0x2544
-#define USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN 0x2706
#define I2C_DEVICE_ID_CHROMEBOOK_TROGDOR_POMPOM 0x2F81
#define USB_VENDOR_ID_ELECOM 0x056e
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index d5308adb2894..9475b7e9da43 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -354,6 +354,7 @@ static enum power_supply_property hidinput_battery_props[] = {
#define HID_BATTERY_QUIRK_FEATURE (1 << 1) /* ask for feature report */
#define HID_BATTERY_QUIRK_IGNORE (1 << 2) /* completely ignore the battery */
#define HID_BATTERY_QUIRK_AVOID_QUERY (1 << 3) /* do not query the battery */
+#define HID_BATTERY_QUIRK_DYNAMIC (1 << 4) /* report present only after life signs */
static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
@@ -386,10 +387,6 @@ static const struct hid_device_id hid_battery_quirks[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
USB_DEVICE_ID_LOGITECH_DINOVO_EDGE_KBD),
HID_BATTERY_QUIRK_IGNORE },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550_TOUCHSCREEN),
- HID_BATTERY_QUIRK_IGNORE },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ASUS_UX550VE_TOUCHSCREEN),
- HID_BATTERY_QUIRK_IGNORE },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_L),
HID_BATTERY_QUIRK_AVOID_QUERY },
{ HID_USB_DEVICE(USB_VENDOR_ID_UGEE, USB_DEVICE_ID_UGEE_XPPEN_TABLET_DECO_PRO_MW),
@@ -402,8 +399,8 @@ static const struct hid_device_id hid_battery_quirks[] = {
* Elan HID touchscreens seem to all report a non present battery,
* set HID_BATTERY_QUIRK_IGNORE for all Elan I2C and USB HID devices.
*/
- { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE },
- { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_IGNORE },
+ { HID_I2C_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_DYNAMIC },
+ { HID_USB_DEVICE(USB_VENDOR_ID_ELAN, HID_ANY_ID), HID_BATTERY_QUIRK_DYNAMIC },
{}
};
@@ -460,11 +457,14 @@ static int hidinput_get_battery_property(struct power_supply *psy,
int ret = 0;
switch (prop) {
- case POWER_SUPPLY_PROP_PRESENT:
case POWER_SUPPLY_PROP_ONLINE:
val->intval = 1;
break;
+ case POWER_SUPPLY_PROP_PRESENT:
+ val->intval = dev->battery_present;
+ break;
+
case POWER_SUPPLY_PROP_CAPACITY:
if (dev->battery_status != HID_BATTERY_REPORTED &&
!dev->battery_avoid_query) {
@@ -577,6 +577,8 @@ static int hidinput_setup_battery(struct hid_device *dev, unsigned report_type,
if (quirks & HID_BATTERY_QUIRK_AVOID_QUERY)
dev->battery_avoid_query = true;
+ dev->battery_present = (quirks & HID_BATTERY_QUIRK_DYNAMIC) ? false : true;
+
dev->battery = power_supply_register(&dev->dev, psy_desc, &psy_cfg);
if (IS_ERR(dev->battery)) {
error = PTR_ERR(dev->battery);
@@ -632,6 +634,7 @@ static void hidinput_update_battery(struct hid_device *dev, unsigned int usage,
return;
if (hidinput_update_battery_charge_status(dev, usage, value)) {
+ dev->battery_present = true;
power_supply_changed(dev->battery);
return;
}
@@ -647,6 +650,7 @@ static void hidinput_update_battery(struct hid_device *dev, unsigned int usage,
if (dev->battery_status != HID_BATTERY_REPORTED ||
capacity != dev->battery_capacity ||
ktime_after(ktime_get_coarse(), dev->battery_ratelimit_time)) {
+ dev->battery_present = true;
dev->battery_capacity = capacity;
dev->battery_status = HID_BATTERY_REPORTED;
dev->battery_ratelimit_time =
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index d40932809ce1..d1dea7297712 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -4487,10 +4487,12 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (!ret)
ret = hidpp_ff_init(hidpp, &data);
- if (ret)
+ if (ret) {
hid_warn(hidpp->hid_dev,
"Unable to initialize force feedback support, errno %d\n",
ret);
+ ret = 0;
+ }
}
/*
@@ -4668,6 +4670,8 @@ static const struct hid_device_id hidpp_devices[] = {
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb038) },
{ /* Slim Solar+ K980 Keyboard over Bluetooth */
HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb391) },
+ { /* MX Master 4 mouse over Bluetooth */
+ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, 0xb042) },
{}
};
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index b8a748bbf0fd..e82a3c4e5b44 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -526,12 +526,19 @@ static void mt_get_feature(struct hid_device *hdev, struct hid_report *report)
dev_warn(&hdev->dev, "failed to fetch feature %d\n",
report->id);
} else {
+ /* The report ID in the request and the response should match */
+ if (report->id != buf[0]) {
+ hid_err(hdev, "Returned feature report did not match the request\n");
+ goto free;
+ }
+
ret = hid_report_raw_event(hdev, HID_FEATURE_REPORT, buf,
size, 0);
if (ret)
dev_warn(&hdev->dev, "failed to report feature\n");
}
+free:
kfree(buf);
}
diff --git a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c
index f9fcb398673b..8075992e8732 100644
--- a/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c
+++ b/drivers/hid/intel-thc-hid/intel-quicki2c/quicki2c-hid.c
@@ -127,6 +127,7 @@ int quicki2c_hid_probe(struct quicki2c_device *qcdev)
hid->product = le16_to_cpu(qcdev->dev_desc.product_id);
snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "quicki2c-hid",
hid->vendor, hid->product);
+ strscpy(hid->phys, dev_name(qcdev->dev), sizeof(hid->phys));
ret = hid_add_device(hid);
if (ret) {
diff --git a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c
index 82c72bfa2795..91d5807b4a83 100644
--- a/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c
+++ b/drivers/hid/intel-thc-hid/intel-quickspi/quickspi-hid.c
@@ -118,6 +118,7 @@ int quickspi_hid_probe(struct quickspi_device *qsdev)
hid->product = le16_to_cpu(qsdev->dev_desc.product_id);
snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X", "quickspi-hid",
hid->vendor, hid->product);
+ strscpy(hid->phys, dev_name(qsdev->dev), sizeof(hid->phys));
ret = hid_add_device(hid);
if (ret) {
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 9b2c710f8da1..da1f0ea85625 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -1208,10 +1208,20 @@ static int wacom_intuos_bt_irq(struct wacom_wac *wacom, size_t len)
switch (data[0]) {
case 0x04:
+ if (len < 32) {
+ dev_warn(wacom->pen_input->dev.parent,
+ "Report 0x04 too short: %zu bytes\n", len);
+ break;
+ }
wacom_intuos_bt_process_data(wacom, data + i);
i += 10;
fallthrough;
case 0x03:
+ if (i == 1 && len < 22) {
+ dev_warn(wacom->pen_input->dev.parent,
+ "Report 0x03 too short: %zu bytes\n", len);
+ break;
+ }
wacom_intuos_bt_process_data(wacom, data + i);
i += 10;
wacom_intuos_bt_process_data(wacom, data + i);
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 486152a8ea77..328867242cb3 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1493,8 +1493,7 @@ config SENSORS_LM73
config SENSORS_LM75
tristate "National Semiconductor LM75 and compatibles"
- depends on I2C
- depends on I3C || !I3C
+ depends on I3C_OR_I2C
select REGMAP_I2C
select REGMAP_I3C if I3C
help
@@ -2382,8 +2381,7 @@ config SENSORS_TMP103
config SENSORS_TMP108
tristate "Texas Instruments TMP108"
- depends on I2C
- depends on I3C || !I3C
+ depends on I3C_OR_I2C
select REGMAP_I2C
select REGMAP_I3C if I3C
help
diff --git a/drivers/i3c/Kconfig b/drivers/i3c/Kconfig
index 30a441506f61..626c54b386d5 100644
--- a/drivers/i3c/Kconfig
+++ b/drivers/i3c/Kconfig
@@ -22,3 +22,15 @@ menuconfig I3C
if I3C
source "drivers/i3c/master/Kconfig"
endif # I3C
+
+config I3C_OR_I2C
+ tristate
+ default m if I3C=m
+ default I2C
+ help
+ Device drivers using module_i3c_i2c_driver() can use either
+ i2c or i3c hosts, but cannot be built-in for the kernel when
+ CONFIG_I3C=m.
+
+ Add 'depends on I2C_OR_I3C' in Kconfig for those drivers to
+ get the correct dependencies.
diff --git a/drivers/i3c/master/dw-i3c-master.c b/drivers/i3c/master/dw-i3c-master.c
index d87bde3f7700..d6bdb32397fb 100644
--- a/drivers/i3c/master/dw-i3c-master.c
+++ b/drivers/i3c/master/dw-i3c-master.c
@@ -1024,7 +1024,7 @@ static int dw_i3c_master_reattach_i3c_dev(struct i3c_dev_desc *dev,
master->free_pos &= ~BIT(pos);
}
- writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr),
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(dev->info.dyn_addr) | DEV_ADDR_TABLE_SIR_REJECT,
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
@@ -1053,7 +1053,7 @@ static int dw_i3c_master_attach_i3c_dev(struct i3c_dev_desc *dev)
master->free_pos &= ~BIT(pos);
i3c_dev_set_master_data(dev, data);
- writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr),
+ writel(DEV_ADDR_TABLE_DYNAMIC_ADDR(master->devs[pos].addr) | DEV_ADDR_TABLE_SIR_REJECT,
master->regs +
DEV_ADDR_TABLE_LOC(master->datstartaddr, data->index));
@@ -1659,6 +1659,8 @@ int dw_i3c_common_probe(struct dw_i3c_master *master,
pm_runtime_get_noresume(&pdev->dev);
INIT_WORK(&master->hj_work, dw_i3c_hj_work);
+
+ device_set_of_node_from_dev(&master->base.i2c.dev, &pdev->dev);
ret = i3c_master_register(&master->base, &pdev->dev,
&dw_mipi_i3c_ops, false);
if (ret)
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd.h b/drivers/i3c/master/mipi-i3c-hci/cmd.h
index 1d6dd2c5d01a..b1bf87daa651 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd.h
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd.h
@@ -17,6 +17,7 @@
#define CMD_0_TOC W0_BIT_(31)
#define CMD_0_ROC W0_BIT_(30)
#define CMD_0_ATTR W0_MASK(2, 0)
+#define CMD_0_TID W0_MASK(6, 3)
/*
* Response Descriptor Structure
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
index fe260461e7e6..75d452d7f6af 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v1.c
@@ -331,12 +331,10 @@ static int hci_cmd_v1_daa(struct i3c_hci *hci)
CMD_A0_ROC | CMD_A0_TOC;
xfer->cmd_desc[1] = 0;
xfer->completion = &done;
- hci->io->queue_xfer(hci, xfer, 1);
- if (!wait_for_completion_timeout(&done, HZ) &&
- hci->io->dequeue_xfer(hci, xfer, 1)) {
- ret = -ETIME;
+ xfer->timeout = HZ;
+ ret = i3c_hci_process_xfer(hci, xfer, 1);
+ if (ret)
break;
- }
if ((RESP_STATUS(xfer->response) == RESP_ERR_ADDR_HEADER ||
RESP_STATUS(xfer->response) == RESP_ERR_NACK) &&
RESP_DATA_LENGTH(xfer->response) == 1) {
diff --git a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
index 3729e6419581..39eec26a363c 100644
--- a/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
+++ b/drivers/i3c/master/mipi-i3c-hci/cmd_v2.c
@@ -253,6 +253,7 @@ static int hci_cmd_v2_daa(struct i3c_hci *hci)
xfer[0].rnw = true;
xfer[0].cmd_desc[1] = CMD_A1_DATA_LENGTH(8);
xfer[1].completion = &done;
+ xfer[1].timeout = HZ;
for (;;) {
ret = i3c_master_get_free_addr(&hci->master, next_addr);
@@ -272,12 +273,9 @@ static int hci_cmd_v2_daa(struct i3c_hci *hci)
CMD_A0_ASSIGN_ADDRESS(next_addr) |
CMD_A0_ROC |
CMD_A0_TOC;
- hci->io->queue_xfer(hci, xfer, 2);
- if (!wait_for_completion_timeout(&done, HZ) &&
- hci->io->dequeue_xfer(hci, xfer, 2)) {
- ret = -ETIME;
+ ret = i3c_hci_process_xfer(hci, xfer, 2);
+ if (ret)
break;
- }
if (RESP_STATUS(xfer[0].response) != RESP_SUCCESS) {
ret = 0; /* no more devices to be assigned */
break;
diff --git a/drivers/i3c/master/mipi-i3c-hci/core.c b/drivers/i3c/master/mipi-i3c-hci/core.c
index 5879bba78164..284f3ed7af8c 100644
--- a/drivers/i3c/master/mipi-i3c-hci/core.c
+++ b/drivers/i3c/master/mipi-i3c-hci/core.c
@@ -152,7 +152,11 @@ static int i3c_hci_bus_init(struct i3c_master_controller *m)
if (hci->quirks & HCI_QUIRK_RESP_BUF_THLD)
amd_set_resp_buf_thld(hci);
- reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
+ scoped_guard(spinlock_irqsave, &hci->lock)
+ hci->irq_inactive = false;
+
+ /* Enable bus with Hot-Join disabled */
+ reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE | HC_CONTROL_HOT_JOIN_CTRL);
dev_dbg(&hci->master.dev, "HC_CONTROL = %#x", reg_read(HC_CONTROL));
return 0;
@@ -177,21 +181,51 @@ static int i3c_hci_bus_disable(struct i3c_hci *hci)
return ret;
}
+static int i3c_hci_software_reset(struct i3c_hci *hci)
+{
+ u32 regval;
+ int ret;
+
+ /*
+ * SOFT_RST must be clear before we write to it.
+ * Then we must wait until it clears again.
+ */
+ ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
+ !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC);
+ if (ret) {
+ dev_err(&hci->master.dev, "%s: Software reset stuck\n", __func__);
+ return ret;
+ }
+
+ reg_write(RESET_CONTROL, SOFT_RST);
+
+ ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
+ !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC);
+ if (ret) {
+ dev_err(&hci->master.dev, "%s: Software reset failed\n", __func__);
+ return ret;
+ }
+
+ return 0;
+}
+
void i3c_hci_sync_irq_inactive(struct i3c_hci *hci)
{
struct platform_device *pdev = to_platform_device(hci->master.dev.parent);
int irq = platform_get_irq(pdev, 0);
reg_write(INTR_SIGNAL_ENABLE, 0x0);
- hci->irq_inactive = true;
synchronize_irq(irq);
+ scoped_guard(spinlock_irqsave, &hci->lock)
+ hci->irq_inactive = true;
}
static void i3c_hci_bus_cleanup(struct i3c_master_controller *m)
{
struct i3c_hci *hci = to_i3c_hci(m);
- i3c_hci_bus_disable(hci);
+ if (i3c_hci_bus_disable(hci))
+ i3c_hci_software_reset(hci);
hci->io->cleanup(hci);
}
@@ -212,6 +246,36 @@ void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci)
reg_write(DCT_SECTION, FIELD_PREP(DCT_TABLE_INDEX, 0));
}
+int i3c_hci_process_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
+{
+ struct completion *done = xfer[n - 1].completion;
+ unsigned long timeout = xfer[n - 1].timeout;
+ int ret;
+
+ ret = hci->io->queue_xfer(hci, xfer, n);
+ if (ret)
+ return ret;
+
+ if (!wait_for_completion_timeout(done, timeout)) {
+ if (hci->io->dequeue_xfer(hci, xfer, n)) {
+ dev_err(&hci->master.dev, "%s: timeout error\n", __func__);
+ return -ETIMEDOUT;
+ }
+ return 0;
+ }
+
+ if (hci->io->handle_error) {
+ bool error = false;
+
+ for (int i = 0; i < n && !error; i++)
+ error = RESP_STATUS(xfer[i].response);
+ if (error)
+ return hci->io->handle_error(hci, xfer, n);
+ }
+
+ return 0;
+}
+
static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
struct i3c_ccc_cmd *ccc)
{
@@ -252,18 +316,14 @@ static int i3c_hci_send_ccc_cmd(struct i3c_master_controller *m,
last = i - 1;
xfer[last].cmd_desc[0] |= CMD_0_TOC;
xfer[last].completion = &done;
+ xfer[last].timeout = HZ;
if (prefixed)
xfer--;
- ret = hci->io->queue_xfer(hci, xfer, nxfers);
+ ret = i3c_hci_process_xfer(hci, xfer, nxfers);
if (ret)
goto out;
- if (!wait_for_completion_timeout(&done, HZ) &&
- hci->io->dequeue_xfer(hci, xfer, nxfers)) {
- ret = -ETIME;
- goto out;
- }
for (i = prefixed; i < nxfers; i++) {
if (ccc->rnw)
ccc->dests[i - prefixed].payload.len =
@@ -334,15 +394,11 @@ static int i3c_hci_i3c_xfers(struct i3c_dev_desc *dev,
last = i - 1;
xfer[last].cmd_desc[0] |= CMD_0_TOC;
xfer[last].completion = &done;
+ xfer[last].timeout = HZ;
- ret = hci->io->queue_xfer(hci, xfer, nxfers);
+ ret = i3c_hci_process_xfer(hci, xfer, nxfers);
if (ret)
goto out;
- if (!wait_for_completion_timeout(&done, HZ) &&
- hci->io->dequeue_xfer(hci, xfer, nxfers)) {
- ret = -ETIME;
- goto out;
- }
for (i = 0; i < nxfers; i++) {
if (i3c_xfers[i].rnw)
i3c_xfers[i].len = RESP_DATA_LENGTH(xfer[i].response);
@@ -382,15 +438,11 @@ static int i3c_hci_i2c_xfers(struct i2c_dev_desc *dev,
last = i - 1;
xfer[last].cmd_desc[0] |= CMD_0_TOC;
xfer[last].completion = &done;
+ xfer[last].timeout = m->i2c.timeout;
- ret = hci->io->queue_xfer(hci, xfer, nxfers);
+ ret = i3c_hci_process_xfer(hci, xfer, nxfers);
if (ret)
goto out;
- if (!wait_for_completion_timeout(&done, m->i2c.timeout) &&
- hci->io->dequeue_xfer(hci, xfer, nxfers)) {
- ret = -ETIME;
- goto out;
- }
for (i = 0; i < nxfers; i++) {
if (RESP_STATUS(xfer[i].response) != RESP_SUCCESS) {
ret = -EIO;
@@ -566,6 +618,8 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
irqreturn_t result = IRQ_NONE;
u32 val;
+ guard(spinlock)(&hci->lock);
+
/*
* The IRQ can be shared, so the handler may be called when the IRQ is
* due to a different device. That could happen when runtime suspended,
@@ -601,34 +655,6 @@ static irqreturn_t i3c_hci_irq_handler(int irq, void *dev_id)
return result;
}
-static int i3c_hci_software_reset(struct i3c_hci *hci)
-{
- u32 regval;
- int ret;
-
- /*
- * SOFT_RST must be clear before we write to it.
- * Then we must wait until it clears again.
- */
- ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
- !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC);
- if (ret) {
- dev_err(&hci->master.dev, "%s: Software reset stuck\n", __func__);
- return ret;
- }
-
- reg_write(RESET_CONTROL, SOFT_RST);
-
- ret = readx_poll_timeout(reg_read, RESET_CONTROL, regval,
- !(regval & SOFT_RST), 0, 10 * USEC_PER_MSEC);
- if (ret) {
- dev_err(&hci->master.dev, "%s: Software reset failed\n", __func__);
- return ret;
- }
-
- return 0;
-}
-
static inline bool is_version_1_1_or_newer(struct i3c_hci *hci)
{
return hci->version_major > 1 || (hci->version_major == 1 && hci->version_minor > 0);
@@ -739,8 +765,12 @@ static int i3c_hci_runtime_suspend(struct device *dev)
int ret;
ret = i3c_hci_bus_disable(hci);
- if (ret)
+ if (ret) {
+ /* Fall back to software reset to disable the bus */
+ ret = i3c_hci_software_reset(hci);
+ i3c_hci_sync_irq_inactive(hci);
return ret;
+ }
hci->io->suspend(hci);
@@ -760,11 +790,13 @@ static int i3c_hci_runtime_resume(struct device *dev)
mipi_i3c_hci_dat_v1.restore(hci);
- hci->irq_inactive = false;
-
hci->io->resume(hci);
- reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE);
+ scoped_guard(spinlock_irqsave, &hci->lock)
+ hci->irq_inactive = false;
+
+ /* Enable bus with Hot-Join disabled */
+ reg_set(HC_CONTROL, HC_CONTROL_BUS_ENABLE | HC_CONTROL_HOT_JOIN_CTRL);
return 0;
}
@@ -924,6 +956,9 @@ static int i3c_hci_probe(struct platform_device *pdev)
if (!hci)
return -ENOMEM;
+ spin_lock_init(&hci->lock);
+ mutex_init(&hci->control_mutex);
+
/*
* Multi-bus instances share the same MMIO address range, but not
* necessarily in separate contiguous sub-ranges. To avoid overlapping
@@ -950,6 +985,8 @@ static int i3c_hci_probe(struct platform_device *pdev)
if (ret)
return ret;
+ hci->irq_inactive = true;
+
irq = platform_get_irq(pdev, 0);
ret = devm_request_irq(&pdev->dev, irq, i3c_hci_irq_handler,
IRQF_SHARED, NULL, hci);
diff --git a/drivers/i3c/master/mipi-i3c-hci/dma.c b/drivers/i3c/master/mipi-i3c-hci/dma.c
index b903a2da1fd1..e487ef52f6b4 100644
--- a/drivers/i3c/master/mipi-i3c-hci/dma.c
+++ b/drivers/i3c/master/mipi-i3c-hci/dma.c
@@ -129,9 +129,8 @@ struct hci_rh_data {
dma_addr_t xfer_dma, resp_dma, ibi_status_dma, ibi_data_dma;
unsigned int xfer_entries, ibi_status_entries, ibi_chunks_total;
unsigned int xfer_struct_sz, resp_struct_sz, ibi_status_sz, ibi_chunk_sz;
- unsigned int done_ptr, ibi_chunk_ptr;
+ unsigned int done_ptr, ibi_chunk_ptr, xfer_space;
struct hci_xfer **src_xfers;
- spinlock_t lock;
struct completion op_done;
};
@@ -261,6 +260,7 @@ ring_ready:
rh->done_ptr = 0;
rh->ibi_chunk_ptr = 0;
+ rh->xfer_space = rh->xfer_entries;
}
static void hci_dma_init_rings(struct i3c_hci *hci)
@@ -344,7 +344,6 @@ static int hci_dma_init(struct i3c_hci *hci)
goto err_out;
rh = &rings->headers[i];
rh->regs = hci->base_regs + offset;
- spin_lock_init(&rh->lock);
init_completion(&rh->op_done);
rh->xfer_entries = XFER_RING_ENTRIES;
@@ -439,26 +438,63 @@ static void hci_dma_unmap_xfer(struct i3c_hci *hci,
}
}
+static struct i3c_dma *hci_dma_map_xfer(struct device *dev, struct hci_xfer *xfer)
+{
+ enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
+ bool need_bounce = device_iommu_mapped(dev) && xfer->rnw && (xfer->data_len & 3);
+
+ return i3c_master_dma_map_single(dev, xfer->data, xfer->data_len, need_bounce, dir);
+}
+
+static int hci_dma_map_xfer_list(struct i3c_hci *hci, struct device *dev,
+ struct hci_xfer *xfer_list, int n)
+{
+ for (int i = 0; i < n; i++) {
+ struct hci_xfer *xfer = xfer_list + i;
+
+ if (!xfer->data)
+ continue;
+
+ xfer->dma = hci_dma_map_xfer(dev, xfer);
+ if (!xfer->dma) {
+ hci_dma_unmap_xfer(hci, xfer_list, i);
+ return -ENOMEM;
+ }
+ }
+
+ return 0;
+}
+
static int hci_dma_queue_xfer(struct i3c_hci *hci,
struct hci_xfer *xfer_list, int n)
{
struct hci_rings_data *rings = hci->io_data;
struct hci_rh_data *rh;
unsigned int i, ring, enqueue_ptr;
- u32 op1_val, op2_val;
+ u32 op1_val;
+ int ret;
+
+ ret = hci_dma_map_xfer_list(hci, rings->sysdev, xfer_list, n);
+ if (ret)
+ return ret;
/* For now we only use ring 0 */
ring = 0;
rh = &rings->headers[ring];
+ spin_lock_irq(&hci->lock);
+
+ if (n > rh->xfer_space) {
+ spin_unlock_irq(&hci->lock);
+ hci_dma_unmap_xfer(hci, xfer_list, n);
+ return -EBUSY;
+ }
+
op1_val = rh_reg_read(RING_OPERATION1);
enqueue_ptr = FIELD_GET(RING_OP1_CR_ENQ_PTR, op1_val);
for (i = 0; i < n; i++) {
struct hci_xfer *xfer = xfer_list + i;
u32 *ring_data = rh->xfer + rh->xfer_struct_sz * enqueue_ptr;
- enum dma_data_direction dir = xfer->rnw ? DMA_FROM_DEVICE :
- DMA_TO_DEVICE;
- bool need_bounce;
/* store cmd descriptor */
*ring_data++ = xfer->cmd_desc[0];
@@ -477,18 +513,6 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
/* 2nd and 3rd words of Data Buffer Descriptor Structure */
if (xfer->data) {
- need_bounce = device_iommu_mapped(rings->sysdev) &&
- xfer->rnw &&
- xfer->data_len != ALIGN(xfer->data_len, 4);
- xfer->dma = i3c_master_dma_map_single(rings->sysdev,
- xfer->data,
- xfer->data_len,
- need_bounce,
- dir);
- if (!xfer->dma) {
- hci_dma_unmap_xfer(hci, xfer_list, i);
- return -ENOMEM;
- }
*ring_data++ = lower_32_bits(xfer->dma->addr);
*ring_data++ = upper_32_bits(xfer->dma->addr);
} else {
@@ -503,26 +527,14 @@ static int hci_dma_queue_xfer(struct i3c_hci *hci,
xfer->ring_entry = enqueue_ptr;
enqueue_ptr = (enqueue_ptr + 1) % rh->xfer_entries;
-
- /*
- * We may update the hardware view of the enqueue pointer
- * only if we didn't reach its dequeue pointer.
- */
- op2_val = rh_reg_read(RING_OPERATION2);
- if (enqueue_ptr == FIELD_GET(RING_OP2_CR_DEQ_PTR, op2_val)) {
- /* the ring is full */
- hci_dma_unmap_xfer(hci, xfer_list, i + 1);
- return -EBUSY;
- }
}
- /* take care to update the hardware enqueue pointer atomically */
- spin_lock_irq(&rh->lock);
- op1_val = rh_reg_read(RING_OPERATION1);
+ rh->xfer_space -= n;
+
op1_val &= ~RING_OP1_CR_ENQ_PTR;
op1_val |= FIELD_PREP(RING_OP1_CR_ENQ_PTR, enqueue_ptr);
rh_reg_write(RING_OPERATION1, op1_val);
- spin_unlock_irq(&rh->lock);
+ spin_unlock_irq(&hci->lock);
return 0;
}
@@ -534,18 +546,29 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
struct hci_rh_data *rh = &rings->headers[xfer_list[0].ring_number];
unsigned int i;
bool did_unqueue = false;
-
- /* stop the ring */
- rh_reg_write(RING_CONTROL, RING_CTRL_ABORT);
- if (wait_for_completion_timeout(&rh->op_done, HZ) == 0) {
- /*
- * We're deep in it if ever this condition is ever met.
- * Hardware might still be writing to memory, etc.
- */
- dev_crit(&hci->master.dev, "unable to abort the ring\n");
- WARN_ON(1);
+ u32 ring_status;
+
+ guard(mutex)(&hci->control_mutex);
+
+ ring_status = rh_reg_read(RING_STATUS);
+ if (ring_status & RING_STATUS_RUNNING) {
+ /* stop the ring */
+ reinit_completion(&rh->op_done);
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_ABORT);
+ wait_for_completion_timeout(&rh->op_done, HZ);
+ ring_status = rh_reg_read(RING_STATUS);
+ if (ring_status & RING_STATUS_RUNNING) {
+ /*
+ * We're deep in it if ever this condition is ever met.
+ * Hardware might still be writing to memory, etc.
+ */
+ dev_crit(&hci->master.dev, "unable to abort the ring\n");
+ WARN_ON(1);
+ }
}
+ spin_lock_irq(&hci->lock);
+
for (i = 0; i < n; i++) {
struct hci_xfer *xfer = xfer_list + i;
int idx = xfer->ring_entry;
@@ -559,7 +582,7 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
u32 *ring_data = rh->xfer + rh->xfer_struct_sz * idx;
/* store no-op cmd descriptor */
- *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7);
+ *ring_data++ = FIELD_PREP(CMD_0_ATTR, 0x7) | FIELD_PREP(CMD_0_TID, xfer->cmd_tid);
*ring_data++ = 0;
if (hci->cmd == &mipi_i3c_hci_cmd_v2) {
*ring_data++ = 0;
@@ -577,15 +600,25 @@ static bool hci_dma_dequeue_xfer(struct i3c_hci *hci,
}
/* restart the ring */
+ mipi_i3c_hci_resume(hci);
rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
+ rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE | RING_CTRL_RUN_STOP);
+
+ spin_unlock_irq(&hci->lock);
return did_unqueue;
}
+static int hci_dma_handle_error(struct i3c_hci *hci, struct hci_xfer *xfer_list, int n)
+{
+ return hci_dma_dequeue_xfer(hci, xfer_list, n) ? -EIO : 0;
+}
+
static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
{
u32 op1_val, op2_val, resp, *ring_resp;
unsigned int tid, done_ptr = rh->done_ptr;
+ unsigned int done_cnt = 0;
struct hci_xfer *xfer;
for (;;) {
@@ -603,6 +636,7 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
dev_dbg(&hci->master.dev, "orphaned ring entry");
} else {
hci_dma_unmap_xfer(hci, xfer, 1);
+ rh->src_xfers[done_ptr] = NULL;
xfer->ring_entry = -1;
xfer->response = resp;
if (tid != xfer->cmd_tid) {
@@ -617,15 +651,14 @@ static void hci_dma_xfer_done(struct i3c_hci *hci, struct hci_rh_data *rh)
done_ptr = (done_ptr + 1) % rh->xfer_entries;
rh->done_ptr = done_ptr;
+ done_cnt += 1;
}
- /* take care to update the software dequeue pointer atomically */
- spin_lock(&rh->lock);
+ rh->xfer_space += done_cnt;
op1_val = rh_reg_read(RING_OPERATION1);
op1_val &= ~RING_OP1_CR_SW_DEQ_PTR;
op1_val |= FIELD_PREP(RING_OP1_CR_SW_DEQ_PTR, done_ptr);
rh_reg_write(RING_OPERATION1, op1_val);
- spin_unlock(&rh->lock);
}
static int hci_dma_request_ibi(struct i3c_hci *hci, struct i3c_dev_desc *dev,
@@ -805,13 +838,10 @@ static void hci_dma_process_ibi(struct i3c_hci *hci, struct hci_rh_data *rh)
i3c_master_queue_ibi(dev, slot);
done:
- /* take care to update the ibi dequeue pointer atomically */
- spin_lock(&rh->lock);
op1_val = rh_reg_read(RING_OPERATION1);
op1_val &= ~RING_OP1_IBI_DEQ_PTR;
op1_val |= FIELD_PREP(RING_OP1_IBI_DEQ_PTR, deq_ptr);
rh_reg_write(RING_OPERATION1, op1_val);
- spin_unlock(&rh->lock);
/* update the chunk pointer */
rh->ibi_chunk_ptr += ibi_chunks;
@@ -845,29 +875,8 @@ static bool hci_dma_irq_handler(struct i3c_hci *hci)
hci_dma_xfer_done(hci, rh);
if (status & INTR_RING_OP)
complete(&rh->op_done);
-
- if (status & INTR_TRANSFER_ABORT) {
- u32 ring_status;
-
- dev_notice_ratelimited(&hci->master.dev,
- "Ring %d: Transfer Aborted\n", i);
- mipi_i3c_hci_resume(hci);
- ring_status = rh_reg_read(RING_STATUS);
- if (!(ring_status & RING_STATUS_RUNNING) &&
- status & INTR_TRANSFER_COMPLETION &&
- status & INTR_TRANSFER_ERR) {
- /*
- * Ring stop followed by run is an Intel
- * specific required quirk after resuming the
- * halted controller. Do it only when the ring
- * is not in running state after a transfer
- * error.
- */
- rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE);
- rh_reg_write(RING_CONTROL, RING_CTRL_ENABLE |
- RING_CTRL_RUN_STOP);
- }
- }
+ if (status & INTR_TRANSFER_ABORT)
+ dev_dbg(&hci->master.dev, "Ring %d: Transfer Aborted\n", i);
if (status & INTR_IBI_RING_FULL)
dev_err_ratelimited(&hci->master.dev,
"Ring %d: IBI Ring Full Condition\n", i);
@@ -883,6 +892,7 @@ const struct hci_io_ops mipi_i3c_hci_dma = {
.cleanup = hci_dma_cleanup,
.queue_xfer = hci_dma_queue_xfer,
.dequeue_xfer = hci_dma_dequeue_xfer,
+ .handle_error = hci_dma_handle_error,
.irq_handler = hci_dma_irq_handler,
.request_ibi = hci_dma_request_ibi,
.free_ibi = hci_dma_free_ibi,
diff --git a/drivers/i3c/master/mipi-i3c-hci/hci.h b/drivers/i3c/master/mipi-i3c-hci/hci.h
index 337b7ab1cb06..9ac9d0e342f4 100644
--- a/drivers/i3c/master/mipi-i3c-hci/hci.h
+++ b/drivers/i3c/master/mipi-i3c-hci/hci.h
@@ -50,6 +50,8 @@ struct i3c_hci {
const struct hci_io_ops *io;
void *io_data;
const struct hci_cmd_ops *cmd;
+ spinlock_t lock;
+ struct mutex control_mutex;
atomic_t next_cmd_tid;
bool irq_inactive;
u32 caps;
@@ -87,6 +89,7 @@ struct hci_xfer {
unsigned int data_len;
unsigned int cmd_tid;
struct completion *completion;
+ unsigned long timeout;
union {
struct {
/* PIO specific */
@@ -120,6 +123,7 @@ struct hci_io_ops {
bool (*irq_handler)(struct i3c_hci *hci);
int (*queue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
bool (*dequeue_xfer)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
+ int (*handle_error)(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
int (*request_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev,
const struct i3c_ibi_setup *req);
void (*free_ibi)(struct i3c_hci *hci, struct i3c_dev_desc *dev);
@@ -154,5 +158,6 @@ void mipi_i3c_hci_dct_index_reset(struct i3c_hci *hci);
void amd_set_od_pp_timing(struct i3c_hci *hci);
void amd_set_resp_buf_thld(struct i3c_hci *hci);
void i3c_hci_sync_irq_inactive(struct i3c_hci *hci);
+int i3c_hci_process_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n);
#endif
diff --git a/drivers/i3c/master/mipi-i3c-hci/pio.c b/drivers/i3c/master/mipi-i3c-hci/pio.c
index f8825ac81408..8f48a81e65ab 100644
--- a/drivers/i3c/master/mipi-i3c-hci/pio.c
+++ b/drivers/i3c/master/mipi-i3c-hci/pio.c
@@ -123,7 +123,6 @@ struct hci_pio_ibi_data {
};
struct hci_pio_data {
- spinlock_t lock;
struct hci_xfer *curr_xfer, *xfer_queue;
struct hci_xfer *curr_rx, *rx_queue;
struct hci_xfer *curr_tx, *tx_queue;
@@ -212,7 +211,6 @@ static int hci_pio_init(struct i3c_hci *hci)
return -ENOMEM;
hci->io_data = pio;
- spin_lock_init(&pio->lock);
__hci_pio_init(hci, &size_val);
@@ -631,7 +629,7 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
xfer[i].data_left = xfer[i].data_len;
}
- spin_lock_irq(&pio->lock);
+ spin_lock_irq(&hci->lock);
prev_queue_tail = pio->xfer_queue;
pio->xfer_queue = &xfer[n - 1];
if (pio->curr_xfer) {
@@ -645,7 +643,7 @@ static int hci_pio_queue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int n)
pio_reg_read(INTR_STATUS),
pio_reg_read(INTR_SIGNAL_ENABLE));
}
- spin_unlock_irq(&pio->lock);
+ spin_unlock_irq(&hci->lock);
return 0;
}
@@ -716,14 +714,14 @@ static bool hci_pio_dequeue_xfer(struct i3c_hci *hci, struct hci_xfer *xfer, int
struct hci_pio_data *pio = hci->io_data;
int ret;
- spin_lock_irq(&pio->lock);
+ spin_lock_irq(&hci->lock);
dev_dbg(&hci->master.dev, "n=%d status=%#x/%#x", n,
pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
dev_dbg(&hci->master.dev, "main_status = %#x/%#x",
readl(hci->base_regs + 0x20), readl(hci->base_regs + 0x28));
ret = hci_pio_dequeue_xfer_common(hci, pio, xfer, n);
- spin_unlock_irq(&pio->lock);
+ spin_unlock_irq(&hci->lock);
return ret;
}
@@ -1016,15 +1014,12 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci)
struct hci_pio_data *pio = hci->io_data;
u32 status;
- spin_lock(&pio->lock);
status = pio_reg_read(INTR_STATUS);
dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
status, pio->enabled_irqs);
status &= pio->enabled_irqs | STAT_LATENCY_WARNINGS;
- if (!status) {
- spin_unlock(&pio->lock);
+ if (!status)
return false;
- }
if (status & STAT_IBI_STATUS_THLD)
hci_pio_process_ibi(hci, pio);
@@ -1058,7 +1053,6 @@ static bool hci_pio_irq_handler(struct i3c_hci *hci)
pio_reg_write(INTR_SIGNAL_ENABLE, pio->enabled_irqs);
dev_dbg(&hci->master.dev, "PIO_INTR_STATUS %#x/%#x",
pio_reg_read(INTR_STATUS), pio_reg_read(INTR_SIGNAL_ENABLE));
- spin_unlock(&pio->lock);
return true;
}
diff --git a/drivers/iio/adc/ad7768-1.c b/drivers/iio/adc/ad7768-1.c
index fcd8aea7152e..e16dede687d3 100644
--- a/drivers/iio/adc/ad7768-1.c
+++ b/drivers/iio/adc/ad7768-1.c
@@ -531,7 +531,7 @@ static int ad7768_reg_access(struct iio_dev *indio_dev,
return ret;
}
-static void ad7768_fill_scale_tbl(struct iio_dev *dev)
+static int ad7768_fill_scale_tbl(struct iio_dev *dev)
{
struct ad7768_state *st = iio_priv(dev);
const struct iio_scan_type *scan_type;
@@ -541,6 +541,11 @@ static void ad7768_fill_scale_tbl(struct iio_dev *dev)
u64 tmp2;
scan_type = iio_get_current_scan_type(dev, &dev->channels[0]);
+ if (IS_ERR(scan_type)) {
+ dev_err(&st->spi->dev, "Failed to get scan type.\n");
+ return PTR_ERR(scan_type);
+ }
+
if (scan_type->sign == 's')
val2 = scan_type->realbits - 1;
else
@@ -565,6 +570,8 @@ static void ad7768_fill_scale_tbl(struct iio_dev *dev)
st->scale_tbl[i][0] = tmp0; /* Integer part */
st->scale_tbl[i][1] = abs(tmp1); /* Fractional part */
}
+
+ return 0;
}
static int ad7768_set_sinc3_dec_rate(struct ad7768_state *st,
@@ -669,7 +676,9 @@ static int ad7768_configure_dig_fil(struct iio_dev *dev,
}
/* Update scale table: scale values vary according to the precision */
- ad7768_fill_scale_tbl(dev);
+ ret = ad7768_fill_scale_tbl(dev);
+ if (ret)
+ return ret;
ad7768_fill_samp_freq_tbl(st);
diff --git a/drivers/iio/chemical/bme680_core.c b/drivers/iio/chemical/bme680_core.c
index 70f81c4a96ba..24e0b59e2fdf 100644
--- a/drivers/iio/chemical/bme680_core.c
+++ b/drivers/iio/chemical/bme680_core.c
@@ -613,7 +613,7 @@ static int bme680_wait_for_eoc(struct bme680_data *data)
* + heater duration
*/
int wait_eoc_us = ((data->oversampling_temp + data->oversampling_press +
- data->oversampling_humid) * 1936) + (477 * 4) +
+ data->oversampling_humid) * 1963) + (477 * 4) +
(477 * 5) + 1000 + (data->heater_dur * 1000);
fsleep(wait_eoc_us);
diff --git a/drivers/iio/chemical/sps30_i2c.c b/drivers/iio/chemical/sps30_i2c.c
index f692c089d17b..c92f04990c34 100644
--- a/drivers/iio/chemical/sps30_i2c.c
+++ b/drivers/iio/chemical/sps30_i2c.c
@@ -171,7 +171,7 @@ static int sps30_i2c_read_meas(struct sps30_state *state, __be32 *meas, size_t n
if (!sps30_i2c_meas_ready(state))
return -ETIMEDOUT;
- return sps30_i2c_command(state, SPS30_I2C_READ_MEAS, NULL, 0, meas, sizeof(num) * num);
+ return sps30_i2c_command(state, SPS30_I2C_READ_MEAS, NULL, 0, meas, sizeof(*meas) * num);
}
static int sps30_i2c_clean_fan(struct sps30_state *state)
diff --git a/drivers/iio/chemical/sps30_serial.c b/drivers/iio/chemical/sps30_serial.c
index 008bc88590f3..a5e6bc08d5fd 100644
--- a/drivers/iio/chemical/sps30_serial.c
+++ b/drivers/iio/chemical/sps30_serial.c
@@ -303,7 +303,7 @@ static int sps30_serial_read_meas(struct sps30_state *state, __be32 *meas, size_
if (msleep_interruptible(1000))
return -EINTR;
- ret = sps30_serial_command(state, SPS30_SERIAL_READ_MEAS, NULL, 0, meas, num * sizeof(num));
+ ret = sps30_serial_command(state, SPS30_SERIAL_READ_MEAS, NULL, 0, meas, num * sizeof(*meas));
if (ret < 0)
return ret;
/* if measurements aren't ready sensor returns empty frame */
diff --git a/drivers/iio/dac/ds4424.c b/drivers/iio/dac/ds4424.c
index 6dda8918975a..c61868f2de31 100644
--- a/drivers/iio/dac/ds4424.c
+++ b/drivers/iio/dac/ds4424.c
@@ -140,7 +140,7 @@ static int ds4424_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (val < S8_MIN || val > S8_MAX)
+ if (val <= S8_MIN || val > S8_MAX)
return -EINVAL;
if (val > 0) {
diff --git a/drivers/iio/frequency/adf4377.c b/drivers/iio/frequency/adf4377.c
index fa686f785fa4..8e2da218d48a 100644
--- a/drivers/iio/frequency/adf4377.c
+++ b/drivers/iio/frequency/adf4377.c
@@ -508,7 +508,7 @@ static int adf4377_soft_reset(struct adf4377_state *st)
return ret;
return regmap_read_poll_timeout(st->regmap, 0x0, read_val,
- !(read_val & (ADF4377_0000_SOFT_RESET_R_MSK |
+ !(read_val & (ADF4377_0000_SOFT_RESET_MSK |
ADF4377_0000_SOFT_RESET_R_MSK)), 200, 200 * 100);
}
diff --git a/drivers/iio/gyro/mpu3050-core.c b/drivers/iio/gyro/mpu3050-core.c
index ee2fcd20545d..317e7b217ec6 100644
--- a/drivers/iio/gyro/mpu3050-core.c
+++ b/drivers/iio/gyro/mpu3050-core.c
@@ -322,7 +322,9 @@ static int mpu3050_read_raw(struct iio_dev *indio_dev,
}
case IIO_CHAN_INFO_RAW:
/* Resume device */
- pm_runtime_get_sync(mpu3050->dev);
+ ret = pm_runtime_resume_and_get(mpu3050->dev);
+ if (ret)
+ return ret;
mutex_lock(&mpu3050->lock);
ret = mpu3050_set_8khz_samplerate(mpu3050);
@@ -647,14 +649,20 @@ out_trigger_unlock:
static int mpu3050_buffer_preenable(struct iio_dev *indio_dev)
{
struct mpu3050 *mpu3050 = iio_priv(indio_dev);
+ int ret;
- pm_runtime_get_sync(mpu3050->dev);
+ ret = pm_runtime_resume_and_get(mpu3050->dev);
+ if (ret)
+ return ret;
/* Unless we have OUR trigger active, run at full speed */
- if (!mpu3050->hw_irq_trigger)
- return mpu3050_set_8khz_samplerate(mpu3050);
+ if (!mpu3050->hw_irq_trigger) {
+ ret = mpu3050_set_8khz_samplerate(mpu3050);
+ if (ret)
+ pm_runtime_put_autosuspend(mpu3050->dev);
+ }
- return 0;
+ return ret;
}
static int mpu3050_buffer_postdisable(struct iio_dev *indio_dev)
diff --git a/drivers/iio/gyro/mpu3050-i2c.c b/drivers/iio/gyro/mpu3050-i2c.c
index 092878f2c886..6549b22e643d 100644
--- a/drivers/iio/gyro/mpu3050-i2c.c
+++ b/drivers/iio/gyro/mpu3050-i2c.c
@@ -19,8 +19,7 @@ static int mpu3050_i2c_bypass_select(struct i2c_mux_core *mux, u32 chan_id)
struct mpu3050 *mpu3050 = i2c_mux_priv(mux);
/* Just power up the device, that is all that is needed */
- pm_runtime_get_sync(mpu3050->dev);
- return 0;
+ return pm_runtime_resume_and_get(mpu3050->dev);
}
static int mpu3050_i2c_bypass_deselect(struct i2c_mux_core *mux, u32 chan_id)
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index d160147cce0b..a2bc1d14ed91 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -526,7 +526,7 @@ int adis_init(struct adis *adis, struct iio_dev *indio_dev,
adis->spi = spi;
adis->data = data;
- if (!adis->ops->write && !adis->ops->read && !adis->ops->reset)
+ if (!adis->ops)
adis->ops = &adis_default_ops;
else if (!adis->ops->write || !adis->ops->read || !adis->ops->reset)
return -EINVAL;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
index 54760d8f92a2..0ab6eddf0543 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_accel.c
@@ -651,6 +651,8 @@ static int inv_icm42600_accel_write_odr(struct iio_dev *indio_dev,
return -EINVAL;
conf.odr = inv_icm42600_accel_odr_conv[idx / 2];
+ if (conf.odr == st->conf.accel.odr)
+ return 0;
pm_runtime_get_sync(dev);
mutex_lock(&st->lock);
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
index ada968be954d..68a395758031 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_buffer.c
@@ -371,6 +371,8 @@ static int inv_icm42600_buffer_predisable(struct iio_dev *indio_dev)
static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
{
struct inv_icm42600_state *st = iio_device_get_drvdata(indio_dev);
+ struct inv_icm42600_sensor_state *sensor_st = iio_priv(indio_dev);
+ struct inv_sensors_timestamp *ts = &sensor_st->ts;
struct device *dev = regmap_get_device(st->map);
unsigned int sensor;
unsigned int *watermark;
@@ -392,6 +394,8 @@ static int inv_icm42600_buffer_postdisable(struct iio_dev *indio_dev)
mutex_lock(&st->lock);
+ inv_sensors_timestamp_apply_odr(ts, 0, 0, 0);
+
ret = inv_icm42600_buffer_set_fifo_en(st, st->fifo.en & ~sensor);
if (ret)
goto out_unlock;
diff --git a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
index 7ef0a25ec74f..11339ddf1da3 100644
--- a/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
+++ b/drivers/iio/imu/inv_icm42600/inv_icm42600_gyro.c
@@ -358,6 +358,8 @@ static int inv_icm42600_gyro_write_odr(struct iio_dev *indio_dev,
return -EINVAL;
conf.odr = inv_icm42600_gyro_odr_conv[idx / 2];
+ if (conf.odr == st->conf.gyro.odr)
+ return 0;
pm_runtime_get_sync(dev);
mutex_lock(&st->lock);
diff --git a/drivers/iio/imu/inv_icm45600/inv_icm45600.h b/drivers/iio/imu/inv_icm45600/inv_icm45600.h
index c5b5446f6c3b..1c796d4b2a40 100644
--- a/drivers/iio/imu/inv_icm45600/inv_icm45600.h
+++ b/drivers/iio/imu/inv_icm45600/inv_icm45600.h
@@ -205,7 +205,7 @@ struct inv_icm45600_sensor_state {
#define INV_ICM45600_SPI_SLEW_RATE_38NS 0
#define INV_ICM45600_REG_INT1_CONFIG2 0x0018
-#define INV_ICM45600_INT1_CONFIG2_PUSH_PULL BIT(2)
+#define INV_ICM45600_INT1_CONFIG2_OPEN_DRAIN BIT(2)
#define INV_ICM45600_INT1_CONFIG2_LATCHED BIT(1)
#define INV_ICM45600_INT1_CONFIG2_ACTIVE_HIGH BIT(0)
#define INV_ICM45600_INT1_CONFIG2_ACTIVE_LOW 0x00
diff --git a/drivers/iio/imu/inv_icm45600/inv_icm45600_core.c b/drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
index 25bd9757a594..d49053161a65 100644
--- a/drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
+++ b/drivers/iio/imu/inv_icm45600/inv_icm45600_core.c
@@ -637,8 +637,8 @@ static int inv_icm45600_irq_init(struct inv_icm45600_state *st, int irq,
break;
}
- if (!open_drain)
- val |= INV_ICM45600_INT1_CONFIG2_PUSH_PULL;
+ if (open_drain)
+ val |= INV_ICM45600_INT1_CONFIG2_OPEN_DRAIN;
ret = regmap_write(st->map, INV_ICM45600_REG_INT1_CONFIG2, val);
if (ret)
@@ -744,6 +744,11 @@ int inv_icm45600_core_probe(struct regmap *regmap, const struct inv_icm45600_chi
*/
fsleep(5 * USEC_PER_MSEC);
+ /* set pm_runtime active early for disable vddio resource cleanup */
+ ret = pm_runtime_set_active(dev);
+ if (ret)
+ return ret;
+
ret = inv_icm45600_enable_regulator_vddio(st);
if (ret)
return ret;
@@ -776,7 +781,7 @@ int inv_icm45600_core_probe(struct regmap *regmap, const struct inv_icm45600_chi
if (ret)
return ret;
- ret = devm_pm_runtime_set_active_enabled(dev);
+ ret = devm_pm_runtime_enable(dev);
if (ret)
return ret;
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index b2fa1f4957a5..5796896d54cd 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -1943,6 +1943,14 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
irq_type);
return -EINVAL;
}
+
+ /*
+ * Acking interrupts by status register does not work reliably
+ * but seem to work when this bit is set.
+ */
+ if (st->chip_type == INV_MPU9150)
+ st->irq_mask |= INV_MPU6050_INT_RD_CLEAR;
+
device_set_wakeup_capable(dev, true);
st->vdd_supply = devm_regulator_get(dev, "vdd");
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index 211901f8b8eb..6239b1a803f7 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -390,6 +390,8 @@ struct inv_mpu6050_state {
/* enable level triggering */
#define INV_MPU6050_LATCH_INT_EN 0x20
#define INV_MPU6050_BIT_BYPASS_EN 0x2
+/* allow acking interrupts by any register read */
+#define INV_MPU6050_INT_RD_CLEAR 0x10
/* Allowed timestamp period jitter in percent */
#define INV_MPU6050_TS_PERIOD_JITTER 4
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
index 10a473342075..22c1ce66f99e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_trigger.c
@@ -248,7 +248,6 @@ static irqreturn_t inv_mpu6050_interrupt_handle(int irq, void *p)
switch (st->chip_type) {
case INV_MPU6000:
case INV_MPU6050:
- case INV_MPU9150:
/*
* WoM is not supported and interrupt status read seems to be broken for
* some chips. Since data ready is the only interrupt, bypass interrupt
@@ -257,6 +256,10 @@ static irqreturn_t inv_mpu6050_interrupt_handle(int irq, void *p)
wom_bits = 0;
int_status = INV_MPU6050_BIT_RAW_DATA_RDY_INT;
goto data_ready_interrupt;
+ case INV_MPU9150:
+ /* IRQ needs to be acked */
+ wom_bits = 0;
+ break;
case INV_MPU6500:
case INV_MPU6515:
case INV_MPU6880:
diff --git a/drivers/iio/industrialio-buffer.c b/drivers/iio/industrialio-buffer.c
index f15a180dc49e..46f36a6ed271 100644
--- a/drivers/iio/industrialio-buffer.c
+++ b/drivers/iio/industrialio-buffer.c
@@ -228,8 +228,10 @@ static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
written = 0;
add_wait_queue(&rb->pollq, &wait);
do {
- if (!indio_dev->info)
- return -ENODEV;
+ if (!indio_dev->info) {
+ ret = -ENODEV;
+ break;
+ }
if (!iio_buffer_space_available(rb)) {
if (signal_pending(current)) {
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
index 5d3c6d5276ba..a740d1f992a8 100644
--- a/drivers/iio/light/bh1780.c
+++ b/drivers/iio/light/bh1780.c
@@ -109,9 +109,9 @@ static int bh1780_read_raw(struct iio_dev *indio_dev,
case IIO_LIGHT:
pm_runtime_get_sync(&bh1780->client->dev);
value = bh1780_read_word(bh1780, BH1780_REG_DLOW);
+ pm_runtime_put_autosuspend(&bh1780->client->dev);
if (value < 0)
return value;
- pm_runtime_put_autosuspend(&bh1780->client->dev);
*val = value;
return IIO_VAL_INT;
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 9345fb6d5317..fb313e591e85 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -143,8 +143,7 @@ config MMC5633
tristate "MEMSIC MMC5633 3-axis magnetic sensor"
select REGMAP_I2C
select REGMAP_I3C if I3C
- depends on I2C
- depends on I3C || !I3C
+ depends on I3C_OR_I2C
help
Say yes here to build support for the MEMSIC MMC5633 3-axis
magnetic sensor.
diff --git a/drivers/iio/magnetometer/tlv493d.c b/drivers/iio/magnetometer/tlv493d.c
index ec53fd40277b..e5e050af2b74 100644
--- a/drivers/iio/magnetometer/tlv493d.c
+++ b/drivers/iio/magnetometer/tlv493d.c
@@ -171,7 +171,7 @@ static s16 tlv493d_get_channel_data(u8 *b, enum tlv493d_channels ch)
switch (ch) {
case TLV493D_AXIS_X:
val = FIELD_GET(TLV493D_BX_MAG_X_AXIS_MSB, b[TLV493D_RD_REG_BX]) << 4 |
- FIELD_GET(TLV493D_BX2_MAG_X_AXIS_LSB, b[TLV493D_RD_REG_BX2]) >> 4;
+ FIELD_GET(TLV493D_BX2_MAG_X_AXIS_LSB, b[TLV493D_RD_REG_BX2]);
break;
case TLV493D_AXIS_Y:
val = FIELD_GET(TLV493D_BY_MAG_Y_AXIS_MSB, b[TLV493D_RD_REG_BY]) << 4 |
diff --git a/drivers/iio/potentiometer/mcp4131.c b/drivers/iio/potentiometer/mcp4131.c
index ad082827aad5..56c9111ef5e8 100644
--- a/drivers/iio/potentiometer/mcp4131.c
+++ b/drivers/iio/potentiometer/mcp4131.c
@@ -221,7 +221,7 @@ static int mcp4131_write_raw(struct iio_dev *indio_dev,
mutex_lock(&data->lock);
- data->buf[0] = address << MCP4131_WIPER_SHIFT;
+ data->buf[0] = address;
data->buf[0] |= MCP4131_WRITE | (val >> 8);
data->buf[1] = val & 0xFF; /* 8 bits here */
diff --git a/drivers/iio/proximity/hx9023s.c b/drivers/iio/proximity/hx9023s.c
index 2918dfc0df54..17e00ee2b6f8 100644
--- a/drivers/iio/proximity/hx9023s.c
+++ b/drivers/iio/proximity/hx9023s.c
@@ -719,6 +719,9 @@ static int hx9023s_set_samp_freq(struct hx9023s_data *data, int val, int val2)
struct device *dev = regmap_get_device(data->regmap);
unsigned int i, period_ms;
+ if (!val && !val2)
+ return -EINVAL;
+
period_ms = div_u64(NANO, (val * MEGA + val2));
for (i = 0; i < ARRAY_SIZE(hx9023s_samp_freq_table); i++) {
@@ -1034,9 +1037,8 @@ static int hx9023s_send_cfg(const struct firmware *fw, struct hx9023s_data *data
if (!bin)
return -ENOMEM;
- memcpy(bin->data, fw->data, fw->size);
-
bin->fw_size = fw->size;
+ memcpy(bin->data, fw->data, bin->fw_size);
bin->fw_ver = bin->data[FW_VER_OFFSET];
bin->reg_count = get_unaligned_le16(bin->data + FW_REG_CNT_OFFSET);
diff --git a/drivers/irqchip/irq-riscv-aplic-main.c b/drivers/irqchip/irq-riscv-aplic-main.c
index 4495ca26abf5..9f53979b6962 100644
--- a/drivers/irqchip/irq-riscv-aplic-main.c
+++ b/drivers/irqchip/irq-riscv-aplic-main.c
@@ -116,6 +116,16 @@ static struct syscore aplic_syscore = {
.ops = &aplic_syscore_ops,
};
+static bool aplic_syscore_registered __ro_after_init;
+
+static void aplic_syscore_init(void)
+{
+ if (!aplic_syscore_registered) {
+ register_syscore(&aplic_syscore);
+ aplic_syscore_registered = true;
+ }
+}
+
static int aplic_pm_notifier(struct notifier_block *nb, unsigned long action, void *data)
{
struct aplic_priv *priv = container_of(nb, struct aplic_priv, genpd_nb);
@@ -372,18 +382,21 @@ static int aplic_probe(struct platform_device *pdev)
rc = aplic_msi_setup(dev, regs);
else
rc = aplic_direct_setup(dev, regs);
- if (rc)
+
+ if (rc) {
dev_err_probe(dev, rc, "failed to setup APLIC in %s mode\n",
msi_mode ? "MSI" : "direct");
- else
- register_syscore(&aplic_syscore);
+ return rc;
+ }
+
+ aplic_syscore_init();
#ifdef CONFIG_ACPI
if (!acpi_disabled)
acpi_dev_clear_dependencies(ACPI_COMPANION(dev));
#endif
- return rc;
+ return 0;
}
static const struct of_device_id aplic_match[] = {
diff --git a/drivers/misc/amd-sbi/Kconfig b/drivers/misc/amd-sbi/Kconfig
index be022c71a90c..30e7fad7356c 100644
--- a/drivers/misc/amd-sbi/Kconfig
+++ b/drivers/misc/amd-sbi/Kconfig
@@ -1,10 +1,9 @@
# SPDX-License-Identifier: GPL-2.0-only
config AMD_SBRMI_I2C
tristate "AMD side band RMI support"
- depends on I2C
+ depends on I3C_OR_I2C
depends on ARM || ARM64 || COMPILE_TEST
select REGMAP_I2C
- depends on I3C || !I3C
select REGMAP_I3C if I3C
help
Side band RMI over I2C/I3C support for AMD out of band management.
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index bd9621d3f73c..45b7d756e39a 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -486,14 +486,15 @@ EXPORT_SYMBOL_GPL(nd_synchronize);
static void nd_async_device_register(void *d, async_cookie_t cookie)
{
struct device *dev = d;
+ struct device *parent = dev->parent;
if (device_add(dev) != 0) {
dev_err(dev, "%s: failed\n", __func__);
put_device(dev);
}
put_device(dev);
- if (dev->parent)
- put_device(dev->parent);
+ if (parent)
+ put_device(parent);
}
static void nd_async_device_unregister(void *d, async_cookie_t cookie)
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 58bf432ec5e6..766e9cc4ffca 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -4834,7 +4834,6 @@ EXPORT_SYMBOL_GPL(nvme_complete_async_event);
int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
const struct blk_mq_ops *ops, unsigned int cmd_size)
{
- struct queue_limits lim = {};
int ret;
memset(set, 0, sizeof(*set));
@@ -4861,7 +4860,7 @@ int nvme_alloc_admin_tag_set(struct nvme_ctrl *ctrl, struct blk_mq_tag_set *set,
if (ctrl->admin_q)
blk_put_queue(ctrl->admin_q);
- ctrl->admin_q = blk_mq_alloc_queue(set, &lim, NULL);
+ ctrl->admin_q = blk_mq_alloc_queue(set, NULL, NULL);
if (IS_ERR(ctrl->admin_q)) {
ret = PTR_ERR(ctrl->admin_q);
goto out_free_tagset;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 8d09fa7d7ff9..b78ba239c8ea 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -544,7 +544,7 @@ static void nvme_dbbuf_set(struct nvme_dev *dev)
/* Free memory and continue on */
nvme_dbbuf_dma_free(dev);
- for (i = 1; i <= dev->online_queues; i++)
+ for (i = 1; i < dev->online_queues; i++)
nvme_dbbuf_free(&dev->queues[i]);
}
}
@@ -1625,14 +1625,16 @@ static irqreturn_t nvme_irq_check(int irq, void *data)
static void nvme_poll_irqdisable(struct nvme_queue *nvmeq)
{
struct pci_dev *pdev = to_pci_dev(nvmeq->dev->dev);
+ int irq;
WARN_ON_ONCE(test_bit(NVMEQ_POLLED, &nvmeq->flags));
- disable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
+ irq = pci_irq_vector(pdev, nvmeq->cq_vector);
+ disable_irq(irq);
spin_lock(&nvmeq->cq_poll_lock);
nvme_poll_cq(nvmeq, NULL);
spin_unlock(&nvmeq->cq_poll_lock);
- enable_irq(pci_irq_vector(pdev, nvmeq->cq_vector));
+ enable_irq(irq);
}
static int nvme_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob)
diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c
index 9de93f65d7d7..ca5b08ce1211 100644
--- a/drivers/nvme/target/admin-cmd.c
+++ b/drivers/nvme/target/admin-cmd.c
@@ -1585,7 +1585,7 @@ void nvmet_execute_async_event(struct nvmet_req *req)
ctrl->async_event_cmds[ctrl->nr_async_event_cmds++] = req;
mutex_unlock(&ctrl->lock);
- queue_work(nvmet_wq, &ctrl->async_event_work);
+ queue_work(nvmet_aen_wq, &ctrl->async_event_work);
}
void nvmet_execute_keep_alive(struct nvmet_req *req)
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index 5e43d0acc86e..9238e13bd480 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -27,6 +27,8 @@ static DEFINE_IDA(cntlid_ida);
struct workqueue_struct *nvmet_wq;
EXPORT_SYMBOL_GPL(nvmet_wq);
+struct workqueue_struct *nvmet_aen_wq;
+EXPORT_SYMBOL_GPL(nvmet_aen_wq);
/*
* This read/write semaphore is used to synchronize access to configuration
@@ -206,7 +208,7 @@ void nvmet_add_async_event(struct nvmet_ctrl *ctrl, u8 event_type,
list_add_tail(&aen->entry, &ctrl->async_events);
mutex_unlock(&ctrl->lock);
- queue_work(nvmet_wq, &ctrl->async_event_work);
+ queue_work(nvmet_aen_wq, &ctrl->async_event_work);
}
static void nvmet_add_to_changed_ns_log(struct nvmet_ctrl *ctrl, __le32 nsid)
@@ -1956,9 +1958,14 @@ static int __init nvmet_init(void)
if (!nvmet_wq)
goto out_free_buffered_work_queue;
+ nvmet_aen_wq = alloc_workqueue("nvmet-aen-wq",
+ WQ_MEM_RECLAIM | WQ_UNBOUND, 0);
+ if (!nvmet_aen_wq)
+ goto out_free_nvmet_work_queue;
+
error = nvmet_init_debugfs();
if (error)
- goto out_free_nvmet_work_queue;
+ goto out_free_nvmet_aen_work_queue;
error = nvmet_init_discovery();
if (error)
@@ -1974,6 +1981,8 @@ out_exit_discovery:
nvmet_exit_discovery();
out_exit_debugfs:
nvmet_exit_debugfs();
+out_free_nvmet_aen_work_queue:
+ destroy_workqueue(nvmet_aen_wq);
out_free_nvmet_work_queue:
destroy_workqueue(nvmet_wq);
out_free_buffered_work_queue:
@@ -1991,6 +2000,7 @@ static void __exit nvmet_exit(void)
nvmet_exit_discovery();
nvmet_exit_debugfs();
ida_destroy(&cntlid_ida);
+ destroy_workqueue(nvmet_aen_wq);
destroy_workqueue(nvmet_wq);
destroy_workqueue(buffered_io_wq);
destroy_workqueue(zbd_wq);
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index b664b584fdc8..319d6a5e9cf0 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -501,6 +501,7 @@ extern struct kmem_cache *nvmet_bvec_cache;
extern struct workqueue_struct *buffered_io_wq;
extern struct workqueue_struct *zbd_wq;
extern struct workqueue_struct *nvmet_wq;
+extern struct workqueue_struct *nvmet_aen_wq;
static inline void nvmet_set_result(struct nvmet_req *req, u32 result)
{
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 2d6eb89f98af..e6e2c3f9afdf 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -2087,6 +2087,7 @@ static void nvmet_rdma_remove_one(struct ib_device *ib_device, void *client_data
mutex_unlock(&nvmet_rdma_queue_mutex);
flush_workqueue(nvmet_wq);
+ flush_workqueue(nvmet_aen_wq);
}
static struct ib_client nvmet_rdma_ib_client = {
diff --git a/drivers/power/sequencing/pwrseq-pcie-m2.c b/drivers/power/sequencing/pwrseq-pcie-m2.c
index d31a7dd8b35c..dadb4aad9d5d 100644
--- a/drivers/power/sequencing/pwrseq-pcie-m2.c
+++ b/drivers/power/sequencing/pwrseq-pcie-m2.c
@@ -109,7 +109,7 @@ static int pwrseq_pcie_m2_probe(struct platform_device *pdev)
if (!ctx)
return -ENOMEM;
- ctx->of_node = of_node_get(dev->of_node);
+ ctx->of_node = dev_of_node(dev);
ctx->pdata = device_get_match_data(dev);
if (!ctx->pdata)
return dev_err_probe(dev, -ENODEV,
diff --git a/drivers/regulator/pca9450-regulator.c b/drivers/regulator/pca9450-regulator.c
index 5fa868264250..45d7dc44c2cd 100644
--- a/drivers/regulator/pca9450-regulator.c
+++ b/drivers/regulator/pca9450-regulator.c
@@ -1293,6 +1293,7 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
struct regulator_dev *ldo5;
struct pca9450 *pca9450;
unsigned int device_id, i;
+ const char *type_name;
int ret;
pca9450 = devm_kzalloc(&i2c->dev, sizeof(struct pca9450), GFP_KERNEL);
@@ -1303,15 +1304,22 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
case PCA9450_TYPE_PCA9450A:
regulator_desc = pca9450a_regulators;
pca9450->rcnt = ARRAY_SIZE(pca9450a_regulators);
+ type_name = "pca9450a";
break;
case PCA9450_TYPE_PCA9450BC:
regulator_desc = pca9450bc_regulators;
pca9450->rcnt = ARRAY_SIZE(pca9450bc_regulators);
+ type_name = "pca9450bc";
break;
case PCA9450_TYPE_PCA9451A:
+ regulator_desc = pca9451a_regulators;
+ pca9450->rcnt = ARRAY_SIZE(pca9451a_regulators);
+ type_name = "pca9451a";
+ break;
case PCA9450_TYPE_PCA9452:
regulator_desc = pca9451a_regulators;
pca9450->rcnt = ARRAY_SIZE(pca9451a_regulators);
+ type_name = "pca9452";
break;
default:
dev_err(&i2c->dev, "Unknown device type");
@@ -1369,7 +1377,7 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
if (pca9450->irq) {
ret = devm_request_threaded_irq(pca9450->dev, pca9450->irq, NULL,
pca9450_irq_handler,
- (IRQF_TRIGGER_FALLING | IRQF_ONESHOT),
+ (IRQF_TRIGGER_LOW | IRQF_ONESHOT),
"pca9450-irq", pca9450);
if (ret != 0)
return dev_err_probe(pca9450->dev, ret, "Failed to request IRQ: %d\n",
@@ -1413,9 +1421,7 @@ static int pca9450_i2c_probe(struct i2c_client *i2c)
pca9450_i2c_restart_handler, pca9450))
dev_warn(&i2c->dev, "Failed to register restart handler\n");
- dev_info(&i2c->dev, "%s probed.\n",
- type == PCA9450_TYPE_PCA9450A ? "pca9450a" :
- (type == PCA9450_TYPE_PCA9451A ? "pca9451a" : "pca9450bc"));
+ dev_info(&i2c->dev, "%s probed.\n", type_name);
return 0;
}
diff --git a/drivers/reset/reset-rzg2l-usbphy-ctrl.c b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
index 32bc268c9149..05dd9b4a02df 100644
--- a/drivers/reset/reset-rzg2l-usbphy-ctrl.c
+++ b/drivers/reset/reset-rzg2l-usbphy-ctrl.c
@@ -136,6 +136,9 @@ static int rzg2l_usbphy_ctrl_set_pwrrdy(struct regmap_field *pwrrdy,
{
u32 val = power_on ? 0 : 1;
+ if (!pwrrdy)
+ return 0;
+
/* The initialization path guarantees that the mask is 1 bit long. */
return regmap_field_update_bits(pwrrdy, 1, val);
}
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index cb068d5e2145..14e58c336baa 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -6135,6 +6135,7 @@ static void copy_pair_set_active(struct dasd_copy_relation *copy, char *new_busi
static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid,
char *sec_busid)
{
+ struct dasd_eckd_private *prim_priv, *sec_priv;
struct dasd_device *primary, *secondary;
struct dasd_copy_relation *copy;
struct dasd_block *block;
@@ -6155,6 +6156,9 @@ static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid
if (!secondary)
return DASD_COPYPAIRSWAP_SECONDARY;
+ prim_priv = primary->private;
+ sec_priv = secondary->private;
+
/*
* usually the device should be quiesced for swap
* for paranoia stop device and requeue requests again
@@ -6182,6 +6186,18 @@ static int dasd_eckd_copy_pair_swap(struct dasd_device *device, char *prim_busid
dev_name(&secondary->cdev->dev), rc);
}
+ if (primary->stopped & DASD_STOPPED_QUIESCE) {
+ dasd_device_set_stop_bits(secondary, DASD_STOPPED_QUIESCE);
+ dasd_device_remove_stop_bits(primary, DASD_STOPPED_QUIESCE);
+ }
+
+ /*
+ * The secondary device never got through format detection, but since it
+ * is a copy of the primary device, the format is exactly the same;
+ * therefore, the detected layout can simply be copied.
+ */
+ sec_priv->uses_cdl = prim_priv->uses_cdl;
+
/* re-enable device */
dasd_device_remove_stop_bits(primary, DASD_STOPPED_PPRC);
dasd_device_remove_stop_bits(secondary, DASD_STOPPED_PPRC);
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 573bad1d6d86..37a157a1d969 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -1639,11 +1639,13 @@ int cca_get_info(u16 cardnr, u16 domain, struct cca_info *ci, u32 xflags)
memset(ci, 0, sizeof(*ci));
- /* get first info from zcrypt device driver about this apqn */
- rc = zcrypt_device_status_ext(cardnr, domain, &devstat);
- if (rc)
- return rc;
- ci->hwtype = devstat.hwtype;
+ /* if specific domain given, fetch status and hw info for this apqn */
+ if (domain != AUTOSEL_DOM) {
+ rc = zcrypt_device_status_ext(cardnr, domain, &devstat);
+ if (rc)
+ return rc;
+ ci->hwtype = devstat.hwtype;
+ }
/*
* Prep memory for rule array and var array use.
diff --git a/drivers/s390/crypto/zcrypt_cex4.c b/drivers/s390/crypto/zcrypt_cex4.c
index e9a984903bff..e7b0ed26a9ec 100644
--- a/drivers/s390/crypto/zcrypt_cex4.c
+++ b/drivers/s390/crypto/zcrypt_cex4.c
@@ -85,8 +85,7 @@ static ssize_t cca_serialnr_show(struct device *dev,
memset(&ci, 0, sizeof(ci));
- if (ap_domain_index >= 0)
- cca_get_info(ac->id, ap_domain_index, &ci, 0);
+ cca_get_info(ac->id, AUTOSEL_DOM, &ci, 0);
return sysfs_emit(buf, "%s\n", ci.serial);
}
diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c
index 30a9c6612651..c2b082f1252c 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_main.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_main.c
@@ -2578,7 +2578,7 @@ int hisi_sas_probe(struct platform_device *pdev,
shost->transportt = hisi_sas_stt;
shost->max_id = HISI_SAS_MAX_DEVICES;
shost->max_lun = ~0;
- shost->max_channel = 1;
+ shost->max_channel = 0;
shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN;
if (hisi_hba->hw->slot_index_alloc) {
shost->can_queue = HISI_SAS_MAX_COMMANDS;
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
index 2f9e01717ef3..f69efc6494b8 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v3_hw.c
@@ -4993,7 +4993,7 @@ hisi_sas_v3_probe(struct pci_dev *pdev, const struct pci_device_id *id)
shost->transportt = hisi_sas_stt;
shost->max_id = HISI_SAS_MAX_DEVICES;
shost->max_lun = ~0;
- shost->max_channel = 1;
+ shost->max_channel = 0;
shost->max_cmd_len = HISI_SAS_MAX_CDB_LEN;
shost->can_queue = HISI_SAS_UNRESERVED_IPTT;
shost->cmd_per_lun = HISI_SAS_UNRESERVED_IPTT;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 9038f6723444..dbe3cd4e274c 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -2751,7 +2751,6 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
if (!elsio->u.els_logo.els_logo_pyld) {
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
- qla2x00_free_fcport(fcport);
return QLA_FUNCTION_FAILED;
}
@@ -2776,7 +2775,6 @@ qla24xx_els_dcmd_iocb(scsi_qla_host_t *vha, int els_opcode,
if (rval != QLA_SUCCESS) {
/* ref: INIT */
kref_put(&sp->cmd_kref, qla2x00_sp_release);
- qla2x00_free_fcport(fcport);
return QLA_FUNCTION_FAILED;
}
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 2cfcf1f5d6a4..7b11bc7de0e3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -360,12 +360,8 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget,
* default device queue depth to figure out sbitmap shift
* since we use this queue depth most of times.
*/
- if (scsi_realloc_sdev_budget_map(sdev, depth)) {
- kref_put(&sdev->host->tagset_refcnt, scsi_mq_free_tags);
- put_device(&starget->dev);
- kfree(sdev);
- goto out;
- }
+ if (scsi_realloc_sdev_budget_map(sdev, depth))
+ goto out_device_destroy;
scsi_change_queue_depth(sdev, depth);
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 411381f1a1c4..9ddafcb18f1c 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1827,6 +1827,8 @@ EXPORT_SYMBOL(qman_create_fq);
void qman_destroy_fq(struct qman_fq *fq)
{
+ int leaked;
+
/*
* We don't need to lock the FQ as it is a pre-condition that the FQ be
* quiesced. Instead, run some checks.
@@ -1834,11 +1836,29 @@ void qman_destroy_fq(struct qman_fq *fq)
switch (fq->state) {
case qman_fq_state_parked:
case qman_fq_state_oos:
- if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID))
- qman_release_fqid(fq->fqid);
+ /*
+ * There's a race condition here on releasing the fqid,
+ * setting the fq_table to NULL, and freeing the fqid.
+ * To prevent it, this order should be respected:
+ */
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID)) {
+ leaked = qman_shutdown_fq(fq->fqid);
+ if (leaked)
+ pr_debug("FQID %d leaked\n", fq->fqid);
+ }
DPAA_ASSERT(fq_table[fq->idx]);
fq_table[fq->idx] = NULL;
+
+ if (fq_isset(fq, QMAN_FQ_FLAG_DYNAMIC_FQID) && !leaked) {
+ /*
+ * fq_table[fq->idx] should be set to null before
+ * freeing fq->fqid otherwise it could by allocated by
+ * qman_alloc_fqid() while still being !NULL
+ */
+ smp_wmb();
+ gen_pool_free(qm_fqalloc, fq->fqid | DPAA_GENALLOC_OFF, 1);
+ }
return;
default:
break;
diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c
index c4587b32a59b..672adff8e35f 100644
--- a/drivers/soc/fsl/qe/qmc.c
+++ b/drivers/soc/fsl/qe/qmc.c
@@ -1790,8 +1790,8 @@ static int qmc_qe_init_resources(struct qmc *qmc, struct platform_device *pdev)
return -EINVAL;
qmc->dpram_offset = res->start - qe_muram_dma(qe_muram_addr(0));
qmc->dpram = devm_ioremap_resource(qmc->dev, res);
- if (IS_ERR(qmc->scc_pram))
- return PTR_ERR(qmc->scc_pram);
+ if (IS_ERR(qmc->dpram))
+ return PTR_ERR(qmc->dpram);
return 0;
}
diff --git a/drivers/soc/microchip/mpfs-sys-controller.c b/drivers/soc/microchip/mpfs-sys-controller.c
index 8e7ae3cb92ff..10b2fc39da66 100644
--- a/drivers/soc/microchip/mpfs-sys-controller.c
+++ b/drivers/soc/microchip/mpfs-sys-controller.c
@@ -142,8 +142,10 @@ static int mpfs_sys_controller_probe(struct platform_device *pdev)
sys_controller->flash = of_get_mtd_device_by_node(np);
of_node_put(np);
- if (IS_ERR(sys_controller->flash))
- return dev_err_probe(dev, PTR_ERR(sys_controller->flash), "Failed to get flash\n");
+ if (IS_ERR(sys_controller->flash)) {
+ ret = dev_err_probe(dev, PTR_ERR(sys_controller->flash), "Failed to get flash\n");
+ goto out_free;
+ }
no_flash:
sys_controller->client.dev = dev;
@@ -155,8 +157,7 @@ no_flash:
if (IS_ERR(sys_controller->chan)) {
ret = dev_err_probe(dev, PTR_ERR(sys_controller->chan),
"Failed to get mbox channel\n");
- kfree(sys_controller);
- return ret;
+ goto out_free;
}
init_completion(&sys_controller->c);
@@ -174,6 +175,10 @@ no_flash:
dev_info(&pdev->dev, "Registered MPFS system controller\n");
return 0;
+
+out_free:
+ kfree(sys_controller);
+ return ret;
}
static void mpfs_sys_controller_remove(struct platform_device *pdev)
diff --git a/drivers/soc/rockchip/grf.c b/drivers/soc/rockchip/grf.c
index 04937c40da47..b459607c118a 100644
--- a/drivers/soc/rockchip/grf.c
+++ b/drivers/soc/rockchip/grf.c
@@ -231,6 +231,7 @@ static int __init rockchip_grf_init(void)
grf = syscon_node_to_regmap(np);
if (IS_ERR(grf)) {
pr_err("%s: could not get grf syscon\n", __func__);
+ of_node_put(np);
return PTR_ERR(grf);
}
diff --git a/drivers/spi/spi-amlogic-spifc-a4.c b/drivers/spi/spi-amlogic-spifc-a4.c
index 2aef528cfc1b..3956869cfec1 100644
--- a/drivers/spi/spi-amlogic-spifc-a4.c
+++ b/drivers/spi/spi-amlogic-spifc-a4.c
@@ -411,7 +411,7 @@ static int aml_sfc_dma_buffer_setup(struct aml_sfc *sfc, void *databuf,
ret = dma_mapping_error(sfc->dev, sfc->daddr);
if (ret) {
dev_err(sfc->dev, "DMA mapping error\n");
- goto out_map_data;
+ return ret;
}
cmd = CMD_DATA_ADDRL(sfc->daddr);
@@ -429,7 +429,6 @@ static int aml_sfc_dma_buffer_setup(struct aml_sfc *sfc, void *databuf,
ret = dma_mapping_error(sfc->dev, sfc->iaddr);
if (ret) {
dev_err(sfc->dev, "DMA mapping error\n");
- dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir);
goto out_map_data;
}
@@ -448,7 +447,7 @@ static int aml_sfc_dma_buffer_setup(struct aml_sfc *sfc, void *databuf,
return 0;
out_map_info:
- dma_unmap_single(sfc->dev, sfc->iaddr, datalen, dir);
+ dma_unmap_single(sfc->dev, sfc->iaddr, infolen, dir);
out_map_data:
dma_unmap_single(sfc->dev, sfc->daddr, datalen, dir);
diff --git a/drivers/spi/spi-atcspi200.c b/drivers/spi/spi-atcspi200.c
index 60a37ff5c6f5..2665f31a49ce 100644
--- a/drivers/spi/spi-atcspi200.c
+++ b/drivers/spi/spi-atcspi200.c
@@ -195,7 +195,15 @@ static void atcspi_set_trans_ctl(struct atcspi_dev *spi,
if (op->addr.buswidth > 1)
tc |= TRANS_ADDR_FMT;
if (op->data.nbytes) {
- tc |= TRANS_DUAL_QUAD(ffs(op->data.buswidth) - 1);
+ unsigned int width_code;
+
+ width_code = ffs(op->data.buswidth) - 1;
+ if (unlikely(width_code > 3)) {
+ WARN_ON_ONCE(1);
+ width_code = 0;
+ }
+ tc |= TRANS_DUAL_QUAD(width_code);
+
if (op->data.dir == SPI_MEM_DATA_IN) {
if (op->dummy.nbytes)
tc |= TRANS_MODE_DMY_READ |
@@ -497,31 +505,17 @@ static int atcspi_init_resources(struct platform_device *pdev,
static int atcspi_configure_dma(struct atcspi_dev *spi)
{
- struct dma_chan *dma_chan;
- int ret = 0;
+ spi->host->dma_rx = devm_dma_request_chan(spi->dev, "rx");
+ if (IS_ERR(spi->host->dma_rx))
+ return PTR_ERR(spi->host->dma_rx);
- dma_chan = devm_dma_request_chan(spi->dev, "rx");
- if (IS_ERR(dma_chan)) {
- ret = PTR_ERR(dma_chan);
- goto err_exit;
- }
- spi->host->dma_rx = dma_chan;
+ spi->host->dma_tx = devm_dma_request_chan(spi->dev, "tx");
+ if (IS_ERR(spi->host->dma_tx))
+ return PTR_ERR(spi->host->dma_tx);
- dma_chan = devm_dma_request_chan(spi->dev, "tx");
- if (IS_ERR(dma_chan)) {
- ret = PTR_ERR(dma_chan);
- goto free_rx;
- }
- spi->host->dma_tx = dma_chan;
init_completion(&spi->dma_completion);
- return ret;
-
-free_rx:
- dma_release_channel(spi->host->dma_rx);
- spi->host->dma_rx = NULL;
-err_exit:
- return ret;
+ return 0;
}
static int atcspi_enable_clk(struct atcspi_dev *spi)
diff --git a/drivers/spi/spi-cadence-quadspi.c b/drivers/spi/spi-cadence-quadspi.c
index 649ff55333f0..5fb0cb07c110 100644
--- a/drivers/spi/spi-cadence-quadspi.c
+++ b/drivers/spi/spi-cadence-quadspi.c
@@ -76,6 +76,11 @@ struct cqspi_flash_pdata {
u8 cs;
};
+static const struct clk_bulk_data cqspi_clks[CLK_QSPI_NUM] = {
+ [CLK_QSPI_APB] = { .id = "apb" },
+ [CLK_QSPI_AHB] = { .id = "ahb" },
+};
+
struct cqspi_st {
struct platform_device *pdev;
struct spi_controller *host;
@@ -1823,6 +1828,7 @@ static int cqspi_probe(struct platform_device *pdev)
}
/* Obtain QSPI clocks. */
+ memcpy(&cqspi->clks, &cqspi_clks, sizeof(cqspi->clks));
ret = devm_clk_bulk_get_optional(dev, CLK_QSPI_NUM, cqspi->clks);
if (ret)
return dev_err_probe(dev, ret, "Failed to get clocks\n");
diff --git a/drivers/spi/spi-intel-pci.c b/drivers/spi/spi-intel-pci.c
index bce3d149bea1..d8ef8f89330a 100644
--- a/drivers/spi/spi-intel-pci.c
+++ b/drivers/spi/spi-intel-pci.c
@@ -96,6 +96,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
{ PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xa823), (unsigned long)&cnl_info },
+ { PCI_VDEVICE(INTEL, 0xd323), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xe323), (unsigned long)&cnl_info },
{ PCI_VDEVICE(INTEL, 0xe423), (unsigned long)&cnl_info },
{ },
diff --git a/drivers/spi/spi-rockchip-sfc.c b/drivers/spi/spi-rockchip-sfc.c
index 2990bf85ee47..174995042f53 100644
--- a/drivers/spi/spi-rockchip-sfc.c
+++ b/drivers/spi/spi-rockchip-sfc.c
@@ -711,7 +711,7 @@ static int rockchip_sfc_probe(struct platform_device *pdev)
}
}
- ret = devm_spi_register_controller(dev, host);
+ ret = spi_register_controller(host);
if (ret)
goto err_register;
diff --git a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
index 6cf217e21593..3e2b5e6b07f9 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ieee80211.c
@@ -186,20 +186,25 @@ u8 *rtw_get_ie_ex(u8 *in_ie, uint in_len, u8 eid, u8 *oui, u8 oui_len, u8 *ie, u
cnt = 0;
- while (cnt < in_len) {
+ while (cnt + 2 <= in_len) {
+ u8 ie_len = in_ie[cnt + 1];
+
+ if (cnt + 2 + ie_len > in_len)
+ break;
+
if (eid == in_ie[cnt]
- && (!oui || !memcmp(&in_ie[cnt+2], oui, oui_len))) {
+ && (!oui || (ie_len >= oui_len && !memcmp(&in_ie[cnt + 2], oui, oui_len)))) {
target_ie = &in_ie[cnt];
if (ie)
- memcpy(ie, &in_ie[cnt], in_ie[cnt+1]+2);
+ memcpy(ie, &in_ie[cnt], ie_len + 2);
if (ielen)
- *ielen = in_ie[cnt+1]+2;
+ *ielen = ie_len + 2;
break;
}
- cnt += in_ie[cnt+1]+2; /* goto next */
+ cnt += ie_len + 2; /* goto next */
}
return target_ie;
diff --git a/drivers/staging/rtl8723bs/core/rtw_mlme.c b/drivers/staging/rtl8723bs/core/rtw_mlme.c
index 7df651708381..1ef48bf6581c 100644
--- a/drivers/staging/rtl8723bs/core/rtw_mlme.c
+++ b/drivers/staging/rtl8723bs/core/rtw_mlme.c
@@ -1988,7 +1988,10 @@ int rtw_restruct_wmm_ie(struct adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_
while (i < in_len) {
ielength = initial_out_len;
- if (in_ie[i] == 0xDD && in_ie[i + 2] == 0x00 && in_ie[i + 3] == 0x50 && in_ie[i + 4] == 0xF2 && in_ie[i + 5] == 0x02 && i + 5 < in_len) { /* WMM element ID and OUI */
+ if (i + 5 < in_len &&
+ in_ie[i] == 0xDD && in_ie[i + 2] == 0x00 &&
+ in_ie[i + 3] == 0x50 && in_ie[i + 4] == 0xF2 &&
+ in_ie[i + 5] == 0x02) {
for (j = i; j < i + 9; j++) {
out_ie[ielength] = in_ie[j];
ielength++;
diff --git a/drivers/staging/sm750fb/sm750.c b/drivers/staging/sm750fb/sm750.c
index dec1f6b88a7d..62f6e0cdff4d 100644
--- a/drivers/staging/sm750fb/sm750.c
+++ b/drivers/staging/sm750fb/sm750.c
@@ -1123,6 +1123,7 @@ static void lynxfb_pci_remove(struct pci_dev *pdev)
iounmap(sm750_dev->pvReg);
iounmap(sm750_dev->pvMem);
+ pci_release_region(pdev, 1);
kfree(g_settings);
}
diff --git a/drivers/staging/sm750fb/sm750_hw.c b/drivers/staging/sm750fb/sm750_hw.c
index a29faee91c78..f60b152a647d 100644
--- a/drivers/staging/sm750fb/sm750_hw.c
+++ b/drivers/staging/sm750fb/sm750_hw.c
@@ -36,16 +36,11 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
pr_info("mmio phyAddr = %lx\n", sm750_dev->vidreg_start);
- /*
- * reserve the vidreg space of smi adaptor
- * if you do this, you need to add release region code
- * in lynxfb_remove, or memory will not be mapped again
- * successfully
- */
+ /* reserve the vidreg space of smi adaptor */
ret = pci_request_region(pdev, 1, "sm750fb");
if (ret) {
pr_err("Can not request PCI regions.\n");
- goto exit;
+ return ret;
}
/* now map mmio and vidmem */
@@ -54,7 +49,7 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
if (!sm750_dev->pvReg) {
pr_err("mmio failed\n");
ret = -EFAULT;
- goto exit;
+ goto err_release_region;
}
pr_info("mmio virtual addr = %p\n", sm750_dev->pvReg);
@@ -79,13 +74,18 @@ int hw_sm750_map(struct sm750_dev *sm750_dev, struct pci_dev *pdev)
sm750_dev->pvMem =
ioremap_wc(sm750_dev->vidmem_start, sm750_dev->vidmem_size);
if (!sm750_dev->pvMem) {
- iounmap(sm750_dev->pvReg);
pr_err("Map video memory failed\n");
ret = -EFAULT;
- goto exit;
+ goto err_unmap_reg;
}
pr_info("video memory vaddr = %p\n", sm750_dev->pvMem);
-exit:
+
+ return 0;
+
+err_unmap_reg:
+ iounmap(sm750_dev->pvReg);
+err_release_region:
+ pci_release_region(pdev, 1);
return ret;
}
diff --git a/drivers/tee/tee_shm.c b/drivers/tee/tee_shm.c
index 6c5b9e352e5e..e9ea9f80cfd9 100644
--- a/drivers/tee/tee_shm.c
+++ b/drivers/tee/tee_shm.c
@@ -23,29 +23,11 @@ struct tee_shm_dma_mem {
struct page *page;
};
-static void shm_put_kernel_pages(struct page **pages, size_t page_count)
-{
- size_t n;
-
- for (n = 0; n < page_count; n++)
- put_page(pages[n]);
-}
-
-static void shm_get_kernel_pages(struct page **pages, size_t page_count)
-{
- size_t n;
-
- for (n = 0; n < page_count; n++)
- get_page(pages[n]);
-}
-
static void release_registered_pages(struct tee_shm *shm)
{
if (shm->pages) {
if (shm->flags & TEE_SHM_USER_MAPPED)
unpin_user_pages(shm->pages, shm->num_pages);
- else
- shm_put_kernel_pages(shm->pages, shm->num_pages);
kfree(shm->pages);
}
@@ -477,13 +459,6 @@ register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
goto err_put_shm_pages;
}
- /*
- * iov_iter_extract_kvec_pages does not get reference on the pages,
- * get a reference on them.
- */
- if (iov_iter_is_kvec(iter))
- shm_get_kernel_pages(shm->pages, num_pages);
-
shm->offset = off;
shm->size = len;
shm->num_pages = num_pages;
@@ -499,8 +474,6 @@ register_shm_helper(struct tee_context *ctx, struct iov_iter *iter, u32 flags,
err_put_shm_pages:
if (!iov_iter_is_kvec(iter))
unpin_user_pages(shm->pages, shm->num_pages);
- else
- shm_put_kernel_pages(shm->pages, shm->num_pages);
err_free_shm_pages:
kfree(shm->pages);
err_free_shm:
diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c
index 899e663fea6e..9ceb6d6d479d 100644
--- a/drivers/ufs/core/ufshcd.c
+++ b/drivers/ufs/core/ufshcd.c
@@ -10066,6 +10066,7 @@ static int __ufshcd_wl_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
}
flush_work(&hba->eeh_work);
+ cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
ret = ufshcd_vops_suspend(hba, pm_op, PRE_CHANGE);
if (ret)
@@ -10120,7 +10121,6 @@ vops_suspend:
if (ret)
goto set_link_active;
- cancel_delayed_work_sync(&hba->ufs_rtc_update_work);
goto out;
set_link_active:
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index ad38c746270a..7ede29d4c7c1 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1379,6 +1379,8 @@ made_compressed_probe:
acm->ctrl_caps = h.usb_cdc_acm_descriptor->bmCapabilities;
if (quirks & NO_CAP_LINE)
acm->ctrl_caps &= ~USB_CDC_CAP_LINE;
+ if (quirks & MISSING_CAP_BRK)
+ acm->ctrl_caps |= USB_CDC_CAP_BRK;
acm->ctrlsize = ctrlsize;
acm->readsize = readsize;
acm->rx_buflimit = num_rx_buf;
@@ -2002,6 +2004,9 @@ static const struct usb_device_id acm_ids[] = {
.driver_info = IGNORE_DEVICE,
},
+ /* CH343 supports CAP_BRK, but doesn't advertise it */
+ { USB_DEVICE(0x1a86, 0x55d3), .driver_info = MISSING_CAP_BRK, },
+
/* control interfaces without any protocol set */
{ USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM,
USB_CDC_PROTO_NONE) },
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index 759ac15631d3..76f73853a60b 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -113,3 +113,4 @@ struct acm {
#define CLEAR_HALT_CONDITIONS BIT(5)
#define SEND_ZERO_PACKET BIT(6)
#define DISABLE_ECHO BIT(7)
+#define MISSING_CAP_BRK BIT(8)
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index f2d94cfc70af..7556c0dac908 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -225,7 +225,8 @@ static void wdm_in_callback(struct urb *urb)
/* we may already be in overflow */
if (!test_bit(WDM_OVERFLOW, &desc->flags)) {
memmove(desc->ubuf + desc->length, desc->inbuf, length);
- desc->length += length;
+ smp_wmb(); /* against wdm_read() */
+ WRITE_ONCE(desc->length, desc->length + length);
}
}
skip_error:
@@ -533,6 +534,7 @@ static ssize_t wdm_read
return -ERESTARTSYS;
cntr = READ_ONCE(desc->length);
+ smp_rmb(); /* against wdm_in_callback() */
if (cntr == 0) {
desc->read = 0;
retry:
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 2526a0e03cde..d39bbfd7fd18 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -727,7 +727,7 @@ static int usbtmc488_ioctl_trigger(struct usbtmc_file_data *file_data)
buffer[1] = data->bTag;
buffer[2] = ~data->bTag;
- retval = usb_bulk_msg(data->usb_dev,
+ retval = usb_bulk_msg_killable(data->usb_dev,
usb_sndbulkpipe(data->usb_dev,
data->bulk_out),
buffer, USBTMC_HEADER_SIZE,
@@ -1347,7 +1347,7 @@ static int send_request_dev_dep_msg_in(struct usbtmc_file_data *file_data,
buffer[11] = 0; /* Reserved */
/* Send bulk URB */
- retval = usb_bulk_msg(data->usb_dev,
+ retval = usb_bulk_msg_killable(data->usb_dev,
usb_sndbulkpipe(data->usb_dev,
data->bulk_out),
buffer, USBTMC_HEADER_SIZE,
@@ -1419,7 +1419,7 @@ static ssize_t usbtmc_read(struct file *filp, char __user *buf,
actual = 0;
/* Send bulk URB */
- retval = usb_bulk_msg(data->usb_dev,
+ retval = usb_bulk_msg_killable(data->usb_dev,
usb_rcvbulkpipe(data->usb_dev,
data->bulk_in),
buffer, bufsize, &actual,
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 1cd5fa61dc76..6a1fd967e0a6 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -927,7 +927,11 @@ int usb_get_configuration(struct usb_device *dev)
dev->descriptor.bNumConfigurations = ncfg = USB_MAXCONFIG;
}
- if (ncfg < 1) {
+ if (ncfg < 1 && dev->quirks & USB_QUIRK_FORCE_ONE_CONFIG) {
+ dev_info(ddev, "Device claims zero configurations, forcing to 1\n");
+ dev->descriptor.bNumConfigurations = 1;
+ ncfg = 1;
+ } else if (ncfg < 1) {
dev_err(ddev, "no configurations\n");
return -EINVAL;
}
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index ea970ddf8879..2ab120ce2fa8 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -42,16 +42,19 @@ static void usb_api_blocking_completion(struct urb *urb)
/*
- * Starts urb and waits for completion or timeout. Note that this call
- * is NOT interruptible. Many device driver i/o requests should be
- * interruptible and therefore these drivers should implement their
- * own interruptible routines.
+ * Starts urb and waits for completion or timeout.
+ * Whether or not the wait is killable depends on the flag passed in.
+ * For example, compare usb_bulk_msg() and usb_bulk_msg_killable().
+ *
+ * For non-killable waits, we enforce a maximum limit on the timeout value.
*/
-static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length)
+static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length,
+ bool killable)
{
struct api_context ctx;
unsigned long expire;
int retval;
+ long rc;
init_completion(&ctx.done);
urb->context = &ctx;
@@ -60,13 +63,24 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int *actual_length)
if (unlikely(retval))
goto out;
- expire = timeout ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT;
- if (!wait_for_completion_timeout(&ctx.done, expire)) {
+ if (!killable && (timeout <= 0 || timeout > USB_MAX_SYNCHRONOUS_TIMEOUT))
+ timeout = USB_MAX_SYNCHRONOUS_TIMEOUT;
+ expire = (timeout > 0) ? msecs_to_jiffies(timeout) : MAX_SCHEDULE_TIMEOUT;
+ if (killable)
+ rc = wait_for_completion_killable_timeout(&ctx.done, expire);
+ else
+ rc = wait_for_completion_timeout(&ctx.done, expire);
+ if (rc <= 0) {
usb_kill_urb(urb);
- retval = (ctx.status == -ENOENT ? -ETIMEDOUT : ctx.status);
+ if (ctx.status != -ENOENT)
+ retval = ctx.status;
+ else if (rc == 0)
+ retval = -ETIMEDOUT;
+ else
+ retval = rc;
dev_dbg(&urb->dev->dev,
- "%s timed out on ep%d%s len=%u/%u\n",
+ "%s timed out or killed on ep%d%s len=%u/%u\n",
current->comm,
usb_endpoint_num(&urb->ep->desc),
usb_urb_dir_in(urb) ? "in" : "out",
@@ -100,7 +114,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
usb_fill_control_urb(urb, usb_dev, pipe, (unsigned char *)cmd, data,
len, usb_api_blocking_completion, NULL);
- retv = usb_start_wait_urb(urb, timeout, &length);
+ retv = usb_start_wait_urb(urb, timeout, &length, false);
if (retv < 0)
return retv;
else
@@ -117,8 +131,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
* @index: USB message index value
* @data: pointer to the data to send
* @size: length in bytes of the data to send
- * @timeout: time in msecs to wait for the message to complete before timing
- * out (if 0 the wait is forever)
+ * @timeout: time in msecs to wait for the message to complete before timing out
*
* Context: task context, might sleep.
*
@@ -173,8 +186,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg);
* @index: USB message index value
* @driver_data: pointer to the data to send
* @size: length in bytes of the data to send
- * @timeout: time in msecs to wait for the message to complete before timing
- * out (if 0 the wait is forever)
+ * @timeout: time in msecs to wait for the message to complete before timing out
* @memflags: the flags for memory allocation for buffers
*
* Context: !in_interrupt ()
@@ -232,8 +244,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg_send);
* @index: USB message index value
* @driver_data: pointer to the data to be filled in by the message
* @size: length in bytes of the data to be received
- * @timeout: time in msecs to wait for the message to complete before timing
- * out (if 0 the wait is forever)
+ * @timeout: time in msecs to wait for the message to complete before timing out
* @memflags: the flags for memory allocation for buffers
*
* Context: !in_interrupt ()
@@ -304,8 +315,7 @@ EXPORT_SYMBOL_GPL(usb_control_msg_recv);
* @len: length in bytes of the data to send
* @actual_length: pointer to a location to put the actual length transferred
* in bytes
- * @timeout: time in msecs to wait for the message to complete before
- * timing out (if 0 the wait is forever)
+ * @timeout: time in msecs to wait for the message to complete before timing out
*
* Context: task context, might sleep.
*
@@ -337,8 +347,7 @@ EXPORT_SYMBOL_GPL(usb_interrupt_msg);
* @len: length in bytes of the data to send
* @actual_length: pointer to a location to put the actual length transferred
* in bytes
- * @timeout: time in msecs to wait for the message to complete before
- * timing out (if 0 the wait is forever)
+ * @timeout: time in msecs to wait for the message to complete before timing out
*
* Context: task context, might sleep.
*
@@ -385,10 +394,59 @@ int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
usb_fill_bulk_urb(urb, usb_dev, pipe, data, len,
usb_api_blocking_completion, NULL);
- return usb_start_wait_urb(urb, timeout, actual_length);
+ return usb_start_wait_urb(urb, timeout, actual_length, false);
}
EXPORT_SYMBOL_GPL(usb_bulk_msg);
+/**
+ * usb_bulk_msg_killable - Builds a bulk urb, sends it off and waits for completion in a killable state
+ * @usb_dev: pointer to the usb device to send the message to
+ * @pipe: endpoint "pipe" to send the message to
+ * @data: pointer to the data to send
+ * @len: length in bytes of the data to send
+ * @actual_length: pointer to a location to put the actual length transferred
+ * in bytes
+ * @timeout: time in msecs to wait for the message to complete before
+ * timing out (if <= 0, the wait is as long as possible)
+ *
+ * Context: task context, might sleep.
+ *
+ * This function is just like usb_blk_msg(), except that it waits in a
+ * killable state and there is no limit on the timeout length.
+ *
+ * Return:
+ * If successful, 0. Otherwise a negative error number. The number of actual
+ * bytes transferred will be stored in the @actual_length parameter.
+ *
+ */
+int usb_bulk_msg_killable(struct usb_device *usb_dev, unsigned int pipe,
+ void *data, int len, int *actual_length, int timeout)
+{
+ struct urb *urb;
+ struct usb_host_endpoint *ep;
+
+ ep = usb_pipe_endpoint(usb_dev, pipe);
+ if (!ep || len < 0)
+ return -EINVAL;
+
+ urb = usb_alloc_urb(0, GFP_KERNEL);
+ if (!urb)
+ return -ENOMEM;
+
+ if ((ep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) ==
+ USB_ENDPOINT_XFER_INT) {
+ pipe = (pipe & ~(3 << 30)) | (PIPE_INTERRUPT << 30);
+ usb_fill_int_urb(urb, usb_dev, pipe, data, len,
+ usb_api_blocking_completion, NULL,
+ ep->desc.bInterval);
+ } else
+ usb_fill_bulk_urb(urb, usb_dev, pipe, data, len,
+ usb_api_blocking_completion, NULL);
+
+ return usb_start_wait_urb(urb, timeout, actual_length, true);
+}
+EXPORT_SYMBOL_GPL(usb_bulk_msg_killable);
+
/*-------------------------------------------------------------------*/
static void sg_clean(struct usb_sg_request *io)
diff --git a/drivers/usb/core/phy.c b/drivers/usb/core/phy.c
index faa20054ad5a..4bba1c275740 100644
--- a/drivers/usb/core/phy.c
+++ b/drivers/usb/core/phy.c
@@ -200,16 +200,10 @@ int usb_phy_roothub_set_mode(struct usb_phy_roothub *phy_roothub,
list_for_each_entry(roothub_entry, head, list) {
err = phy_set_mode(roothub_entry->phy, mode);
if (err)
- goto err_out;
+ return err;
}
return 0;
-
-err_out:
- list_for_each_entry_continue_reverse(roothub_entry, head, list)
- phy_power_off(roothub_entry->phy);
-
- return err;
}
EXPORT_SYMBOL_GPL(usb_phy_roothub_set_mode);
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 9e7e49712739..5523a8e29021 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -140,6 +140,8 @@ static int quirks_param_set(const char *value, const struct kernel_param *kp)
case 'p':
flags |= USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT;
break;
+ case 'q':
+ flags |= USB_QUIRK_FORCE_ONE_CONFIG;
/* Ignore unrecognized flag characters */
}
}
@@ -207,6 +209,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* HP v222w 16GB Mini USB Drive */
{ USB_DEVICE(0x03f0, 0x3f40), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* Huawei 4G LTE module ME906S */
+ { USB_DEVICE(0x03f0, 0xa31d), .driver_info =
+ USB_QUIRK_DISCONNECT_SUSPEND },
+
/* Creative SB Audigy 2 NX */
{ USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -376,6 +382,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* SanDisk Extreme 55AE */
{ USB_DEVICE(0x0781, 0x55ae), .driver_info = USB_QUIRK_NO_LPM },
+ /* Avermedia Live Gamer Ultra 2.1 (GC553G2) - BOS descriptor fetch hangs at SuperSpeed Plus */
+ { USB_DEVICE(0x07ca, 0x2553), .driver_info = USB_QUIRK_NO_BOS },
+
/* Realforce 87U Keyboard */
{ USB_DEVICE(0x0853, 0x011b), .driver_info = USB_QUIRK_NO_LPM },
@@ -436,6 +445,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ /* ASUS TUF 4K PRO - BOS descriptor fetch hangs at SuperSpeed Plus */
+ { USB_DEVICE(0x0b05, 0x1ab9), .driver_info = USB_QUIRK_NO_BOS },
+
/* Realtek Semiconductor Corp. Mass Storage Device (Multicard Reader)*/
{ USB_DEVICE(0x0bda, 0x0151), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -564,6 +576,9 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x2386, 0x350e), .driver_info = USB_QUIRK_NO_LPM },
+ /* UGREEN 35871 - BOS descriptor fetch hangs at SuperSpeed Plus */
+ { USB_DEVICE(0x2b89, 0x5871), .driver_info = USB_QUIRK_NO_BOS },
+
/* APTIV AUTOMOTIVE HUB */
{ USB_DEVICE(0x2c48, 0x0132), .driver_info =
USB_QUIRK_SHORT_SET_ADDRESS_REQ_TIMEOUT },
@@ -574,12 +589,18 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Alcor Link AK9563 SC Reader used in 2022 Lenovo ThinkPads */
{ USB_DEVICE(0x2ce3, 0x9563), .driver_info = USB_QUIRK_NO_LPM },
+ /* ezcap401 - BOS descriptor fetch hangs at SuperSpeed Plus */
+ { USB_DEVICE(0x32ed, 0x0401), .driver_info = USB_QUIRK_NO_BOS },
+
/* DELL USB GEN2 */
{ USB_DEVICE(0x413c, 0xb062), .driver_info = USB_QUIRK_NO_LPM | USB_QUIRK_RESET_RESUME },
/* VCOM device */
{ USB_DEVICE(0x4296, 0x7570), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS },
+ /* Noji-MCS SmartCard Reader */
+ { USB_DEVICE(0x5131, 0x2007), .driver_info = USB_QUIRK_FORCE_ONE_CONFIG },
+
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 6ecadc81bd6b..6c1cbb722ca8 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -56,6 +56,7 @@
#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
#define PCI_DEVICE_ID_INTEL_CNPV 0xa3b0
#define PCI_DEVICE_ID_INTEL_RPL 0xa70e
+#define PCI_DEVICE_ID_INTEL_NVLH 0xd37f
#define PCI_DEVICE_ID_INTEL_PTLH 0xe332
#define PCI_DEVICE_ID_INTEL_PTLH_PCH 0xe37e
#define PCI_DEVICE_ID_INTEL_PTLU 0xe432
@@ -447,6 +448,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE_DATA(INTEL, CNPH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, CNPV, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, RPL, &dwc3_pci_intel_swnode) },
+ { PCI_DEVICE_DATA(INTEL, NVLH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, PTLH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, PTLH_PCH, &dwc3_pci_intel_swnode) },
{ PCI_DEVICE_DATA(INTEL, PTLU, &dwc3_pci_intel_swnode) },
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 8c855c00b887..8812ebf33d14 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -1207,9 +1207,11 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
if (!hidg->interval_user_set) {
hidg_fs_in_ep_desc.bInterval = 10;
hidg_hs_in_ep_desc.bInterval = 4;
+ hidg_ss_in_ep_desc.bInterval = 4;
} else {
hidg_fs_in_ep_desc.bInterval = hidg->interval;
hidg_hs_in_ep_desc.bInterval = hidg->interval;
+ hidg_ss_in_ep_desc.bInterval = hidg->interval;
}
hidg_ss_out_comp_desc.wBytesPerInterval =
@@ -1239,9 +1241,11 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
if (!hidg->interval_user_set) {
hidg_fs_out_ep_desc.bInterval = 10;
hidg_hs_out_ep_desc.bInterval = 4;
+ hidg_ss_out_ep_desc.bInterval = 4;
} else {
hidg_fs_out_ep_desc.bInterval = hidg->interval;
hidg_hs_out_ep_desc.bInterval = hidg->interval;
+ hidg_ss_out_ep_desc.bInterval = hidg->interval;
}
status = usb_assign_descriptors(f,
hidg_fs_descriptors_intout,
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 6af96e2b44eb..b7b06cb79ff5 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -180,6 +180,7 @@
#include <linux/kthread.h>
#include <linux/sched/signal.h>
#include <linux/limits.h>
+#include <linux/overflow.h>
#include <linux/pagemap.h>
#include <linux/rwsem.h>
#include <linux/slab.h>
@@ -1853,8 +1854,15 @@ static int check_command_size_in_blocks(struct fsg_common *common,
int cmnd_size, enum data_direction data_dir,
unsigned int mask, int needs_medium, const char *name)
{
- if (common->curlun)
- common->data_size_from_cmnd <<= common->curlun->blkbits;
+ if (common->curlun) {
+ if (check_shl_overflow(common->data_size_from_cmnd,
+ common->curlun->blkbits,
+ &common->data_size_from_cmnd)) {
+ common->phase_error = 1;
+ return -EINVAL;
+ }
+ }
+
return check_command(common, cmnd_size, data_dir,
mask, needs_medium, name);
}
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 14fc7dce6f39..a6fa5ed3d6cb 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -83,11 +83,6 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f)
return container_of(f, struct f_ncm, port.func);
}
-static inline struct f_ncm_opts *func_to_ncm_opts(struct usb_function *f)
-{
- return container_of(f->fi, struct f_ncm_opts, func_inst);
-}
-
/*-------------------------------------------------------------------------*/
/*
@@ -864,7 +859,6 @@ invalid:
static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_ncm *ncm = func_to_ncm(f);
- struct f_ncm_opts *opts = func_to_ncm_opts(f);
struct usb_composite_dev *cdev = f->config->cdev;
/* Control interface has only altsetting 0 */
@@ -887,13 +881,12 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
if (alt > 1)
goto fail;
- scoped_guard(mutex, &opts->lock)
- if (opts->net) {
- DBG(cdev, "reset ncm\n");
- opts->net = NULL;
- gether_disconnect(&ncm->port);
- ncm_reset_values(ncm);
- }
+ if (ncm->netdev) {
+ DBG(cdev, "reset ncm\n");
+ ncm->netdev = NULL;
+ gether_disconnect(&ncm->port);
+ ncm_reset_values(ncm);
+ }
/*
* CDC Network only sends data in non-default altsettings.
@@ -926,8 +919,7 @@ static int ncm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
net = gether_connect(&ncm->port);
if (IS_ERR(net))
return PTR_ERR(net);
- scoped_guard(mutex, &opts->lock)
- opts->net = net;
+ ncm->netdev = net;
}
spin_lock(&ncm->lock);
@@ -1374,16 +1366,14 @@ err:
static void ncm_disable(struct usb_function *f)
{
struct f_ncm *ncm = func_to_ncm(f);
- struct f_ncm_opts *opts = func_to_ncm_opts(f);
struct usb_composite_dev *cdev = f->config->cdev;
DBG(cdev, "ncm deactivated\n");
- scoped_guard(mutex, &opts->lock)
- if (opts->net) {
- opts->net = NULL;
- gether_disconnect(&ncm->port);
- }
+ if (ncm->netdev) {
+ ncm->netdev = NULL;
+ gether_disconnect(&ncm->port);
+ }
if (ncm->notify->enabled) {
usb_ep_disable(ncm->notify);
@@ -1443,44 +1433,41 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_ncm *ncm = func_to_ncm(f);
- struct f_ncm_opts *ncm_opts = func_to_ncm_opts(f);
struct usb_string *us;
int status = 0;
struct usb_ep *ep;
+ struct f_ncm_opts *ncm_opts;
struct usb_os_desc_table *os_desc_table __free(kfree) = NULL;
- struct net_device *netdev __free(free_gether_netdev) = NULL;
+ struct net_device *net __free(detach_gadget) = NULL;
struct usb_request *request __free(free_usb_request) = NULL;
if (!can_support_ecm(cdev->gadget))
return -EINVAL;
+ ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst);
+
if (cdev->use_os_string) {
os_desc_table = kzalloc(sizeof(*os_desc_table), GFP_KERNEL);
if (!os_desc_table)
return -ENOMEM;
}
- netdev = gether_setup_default();
- if (IS_ERR(netdev))
- return -ENOMEM;
-
- scoped_guard(mutex, &ncm_opts->lock) {
- gether_apply_opts(netdev, &ncm_opts->net_opts);
- netdev->mtu = ncm_opts->max_segment_size - ETH_HLEN;
- }
-
- gether_set_gadget(netdev, cdev->gadget);
- status = gether_register_netdev(netdev);
- if (status)
- return status;
+ scoped_guard(mutex, &ncm_opts->lock)
+ if (ncm_opts->bind_count == 0) {
+ if (!device_is_registered(&ncm_opts->net->dev)) {
+ ncm_opts->net->mtu = (ncm_opts->max_segment_size - ETH_HLEN);
+ gether_set_gadget(ncm_opts->net, cdev->gadget);
+ status = gether_register_netdev(ncm_opts->net);
+ } else
+ status = gether_attach_gadget(ncm_opts->net, cdev->gadget);
+
+ if (status)
+ return status;
+ net = ncm_opts->net;
+ }
- /* export host's Ethernet address in CDC format */
- status = gether_get_host_addr_cdc(netdev, ncm->ethaddr,
- sizeof(ncm->ethaddr));
- if (status < 12)
- return -EINVAL;
- ncm_string_defs[STRING_MAC_IDX].s = ncm->ethaddr;
+ ncm_string_defs[1].s = ncm->ethaddr;
us = usb_gstrings_attach(cdev, ncm_strings,
ARRAY_SIZE(ncm_string_defs));
@@ -1578,8 +1565,9 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
f->os_desc_n = 1;
}
ncm->notify_req = no_free_ptr(request);
- ncm->netdev = no_free_ptr(netdev);
- ncm->port.ioport = netdev_priv(ncm->netdev);
+
+ ncm_opts->bind_count++;
+ retain_and_null_ptr(net);
DBG(cdev, "CDC Network: IN/%s OUT/%s NOTIFY/%s\n",
ncm->port.in_ep->name, ncm->port.out_ep->name,
@@ -1594,19 +1582,19 @@ static inline struct f_ncm_opts *to_f_ncm_opts(struct config_item *item)
}
/* f_ncm_item_ops */
-USB_ETHER_OPTS_ITEM(ncm);
+USB_ETHERNET_CONFIGFS_ITEM(ncm);
/* f_ncm_opts_dev_addr */
-USB_ETHER_OPTS_ATTR_DEV_ADDR(ncm);
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_DEV_ADDR(ncm);
/* f_ncm_opts_host_addr */
-USB_ETHER_OPTS_ATTR_HOST_ADDR(ncm);
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_HOST_ADDR(ncm);
/* f_ncm_opts_qmult */
-USB_ETHER_OPTS_ATTR_QMULT(ncm);
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_QMULT(ncm);
/* f_ncm_opts_ifname */
-USB_ETHER_OPTS_ATTR_IFNAME(ncm);
+USB_ETHERNET_CONFIGFS_ITEM_ATTR_IFNAME(ncm);
static ssize_t ncm_opts_max_segment_size_show(struct config_item *item,
char *page)
@@ -1672,27 +1660,34 @@ static void ncm_free_inst(struct usb_function_instance *f)
struct f_ncm_opts *opts;
opts = container_of(f, struct f_ncm_opts, func_inst);
+ if (device_is_registered(&opts->net->dev))
+ gether_cleanup(netdev_priv(opts->net));
+ else
+ free_netdev(opts->net);
kfree(opts->ncm_interf_group);
kfree(opts);
}
static struct usb_function_instance *ncm_alloc_inst(void)
{
- struct usb_function_instance *ret;
+ struct f_ncm_opts *opts;
struct usb_os_desc *descs[1];
char *names[1];
struct config_group *ncm_interf_group;
- struct f_ncm_opts *opts __free(kfree) = kzalloc_obj(*opts);
+ opts = kzalloc_obj(*opts);
if (!opts)
return ERR_PTR(-ENOMEM);
-
- opts->net = NULL;
opts->ncm_os_desc.ext_compat_id = opts->ncm_ext_compat_id;
- gether_setup_opts_default(&opts->net_opts, "usb");
mutex_init(&opts->lock);
opts->func_inst.free_func_inst = ncm_free_inst;
+ opts->net = gether_setup_default();
+ if (IS_ERR(opts->net)) {
+ struct net_device *net = opts->net;
+ kfree(opts);
+ return ERR_CAST(net);
+ }
opts->max_segment_size = ETH_FRAME_LEN;
INIT_LIST_HEAD(&opts->ncm_os_desc.ext_prop);
@@ -1703,30 +1698,37 @@ static struct usb_function_instance *ncm_alloc_inst(void)
ncm_interf_group =
usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
names, THIS_MODULE);
- if (IS_ERR(ncm_interf_group))
+ if (IS_ERR(ncm_interf_group)) {
+ ncm_free_inst(&opts->func_inst);
return ERR_CAST(ncm_interf_group);
+ }
opts->ncm_interf_group = ncm_interf_group;
- ret = &opts->func_inst;
- retain_and_null_ptr(opts);
- return ret;
+ return &opts->func_inst;
}
static void ncm_free(struct usb_function *f)
{
- struct f_ncm_opts *opts = func_to_ncm_opts(f);
+ struct f_ncm *ncm;
+ struct f_ncm_opts *opts;
- scoped_guard(mutex, &opts->lock)
- opts->refcnt--;
- kfree(func_to_ncm(f));
+ ncm = func_to_ncm(f);
+ opts = container_of(f->fi, struct f_ncm_opts, func_inst);
+ kfree(ncm);
+ mutex_lock(&opts->lock);
+ opts->refcnt--;
+ mutex_unlock(&opts->lock);
}
static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_ncm *ncm = func_to_ncm(f);
+ struct f_ncm_opts *ncm_opts;
DBG(c->cdev, "ncm unbind\n");
+ ncm_opts = container_of(f->fi, struct f_ncm_opts, func_inst);
+
hrtimer_cancel(&ncm->task_timer);
kfree(f->os_desc_table);
@@ -1743,14 +1745,16 @@ static void ncm_unbind(struct usb_configuration *c, struct usb_function *f)
kfree(ncm->notify_req->buf);
usb_ep_free_request(ncm->notify, ncm->notify_req);
- ncm->port.ioport = NULL;
- gether_cleanup(netdev_priv(ncm->netdev));
+ ncm_opts->bind_count--;
+ if (ncm_opts->bind_count == 0)
+ gether_detach_gadget(ncm_opts->net);
}
static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
{
struct f_ncm *ncm;
struct f_ncm_opts *opts;
+ int status;
/* allocate and initialize one new instance */
ncm = kzalloc(sizeof(*ncm), GFP_KERNEL);
@@ -1758,12 +1762,22 @@ static struct usb_function *ncm_alloc(struct usb_function_instance *fi)
return ERR_PTR(-ENOMEM);
opts = container_of(fi, struct f_ncm_opts, func_inst);
+ mutex_lock(&opts->lock);
+ opts->refcnt++;
- scoped_guard(mutex, &opts->lock)
- opts->refcnt++;
+ /* export host's Ethernet address in CDC format */
+ status = gether_get_host_addr_cdc(opts->net, ncm->ethaddr,
+ sizeof(ncm->ethaddr));
+ if (status < 12) { /* strlen("01234567890a") */
+ kfree(ncm);
+ mutex_unlock(&opts->lock);
+ return ERR_PTR(-EINVAL);
+ }
spin_lock_init(&ncm->lock);
ncm_reset_values(ncm);
+ ncm->port.ioport = netdev_priv(opts->net);
+ mutex_unlock(&opts->lock);
ncm->port.is_fixed = true;
ncm->port.supports_multi_frame = true;
diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c
index ec050d8f99f1..a7853dcbb14c 100644
--- a/drivers/usb/gadget/function/f_tcm.c
+++ b/drivers/usb/gadget/function/f_tcm.c
@@ -1222,6 +1222,13 @@ static void usbg_submit_cmd(struct usbg_cmd *cmd)
se_cmd = &cmd->se_cmd;
tpg = cmd->fu->tpg;
tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus) {
+ struct usb_gadget *gadget = fuas_to_gadget(cmd->fu);
+
+ dev_err(&gadget->dev, "Missing nexus, ignoring command\n");
+ return;
+ }
+
dir = get_cmd_dir(cmd->cmd_buf);
if (dir < 0)
goto out;
@@ -1483,6 +1490,13 @@ static void bot_cmd_work(struct work_struct *work)
se_cmd = &cmd->se_cmd;
tpg = cmd->fu->tpg;
tv_nexus = tpg->tpg_nexus;
+ if (!tv_nexus) {
+ struct usb_gadget *gadget = fuas_to_gadget(cmd->fu);
+
+ dev_err(&gadget->dev, "Missing nexus, ignoring command\n");
+ return;
+ }
+
dir = get_cmd_dir(cmd->cmd_buf);
if (dir < 0)
goto out;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 338f6e2a85a9..1a9e7c495e2e 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -897,6 +897,28 @@ void gether_set_gadget(struct net_device *net, struct usb_gadget *g)
}
EXPORT_SYMBOL_GPL(gether_set_gadget);
+int gether_attach_gadget(struct net_device *net, struct usb_gadget *g)
+{
+ int ret;
+
+ ret = device_move(&net->dev, &g->dev, DPM_ORDER_DEV_AFTER_PARENT);
+ if (ret)
+ return ret;
+
+ gether_set_gadget(net, g);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gether_attach_gadget);
+
+void gether_detach_gadget(struct net_device *net)
+{
+ struct eth_dev *dev = netdev_priv(net);
+
+ device_move(&net->dev, NULL, DPM_ORDER_NONE);
+ dev->gadget = NULL;
+}
+EXPORT_SYMBOL_GPL(gether_detach_gadget);
+
int gether_set_dev_addr(struct net_device *net, const char *dev_addr)
{
struct eth_dev *dev;
@@ -1040,36 +1062,6 @@ int gether_set_ifname(struct net_device *net, const char *name, int len)
}
EXPORT_SYMBOL_GPL(gether_set_ifname);
-void gether_setup_opts_default(struct gether_opts *opts, const char *name)
-{
- opts->qmult = QMULT_DEFAULT;
- snprintf(opts->name, sizeof(opts->name), "%s%%d", name);
- eth_random_addr(opts->dev_mac);
- opts->addr_assign_type = NET_ADDR_RANDOM;
- eth_random_addr(opts->host_mac);
-}
-EXPORT_SYMBOL_GPL(gether_setup_opts_default);
-
-void gether_apply_opts(struct net_device *net, struct gether_opts *opts)
-{
- struct eth_dev *dev = netdev_priv(net);
-
- dev->qmult = opts->qmult;
-
- if (opts->ifname_set) {
- strscpy(net->name, opts->name, sizeof(net->name));
- dev->ifname_set = true;
- }
-
- memcpy(dev->host_mac, opts->host_mac, sizeof(dev->host_mac));
-
- if (opts->addr_assign_type == NET_ADDR_SET) {
- memcpy(dev->dev_mac, opts->dev_mac, sizeof(dev->dev_mac));
- net->addr_assign_type = opts->addr_assign_type;
- }
-}
-EXPORT_SYMBOL_GPL(gether_apply_opts);
-
void gether_suspend(struct gether *link)
{
struct eth_dev *dev = link->ioport;
@@ -1126,21 +1118,6 @@ void gether_cleanup(struct eth_dev *dev)
}
EXPORT_SYMBOL_GPL(gether_cleanup);
-void gether_unregister_free_netdev(struct net_device *net)
-{
- if (!net)
- return;
-
- struct eth_dev *dev = netdev_priv(net);
-
- if (net->reg_state == NETREG_REGISTERED) {
- unregister_netdev(net);
- flush_work(&dev->work);
- }
- free_netdev(net);
-}
-EXPORT_SYMBOL_GPL(gether_unregister_free_netdev);
-
/**
* gether_connect - notify network layer that USB link is active
* @link: the USB link, set up with endpoints, descriptors matching
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
index a212a8ec5eb1..c85a1cf3c115 100644
--- a/drivers/usb/gadget/function/u_ether.h
+++ b/drivers/usb/gadget/function/u_ether.h
@@ -38,31 +38,6 @@
struct eth_dev;
-/**
- * struct gether_opts - Options for Ethernet gadget function instances
- * @name: Pattern for the network interface name (e.g., "usb%d").
- * Used to generate the net device name.
- * @qmult: Queue length multiplier for high/super speed.
- * @host_mac: The MAC address to be used by the host side.
- * @dev_mac: The MAC address to be used by the device side.
- * @ifname_set: True if the interface name pattern has been set by userspace.
- * @addr_assign_type: The method used for assigning the device MAC address
- * (e.g., NET_ADDR_RANDOM, NET_ADDR_SET).
- *
- * This structure caches network-related settings provided through configfs
- * before the net_device is fully instantiated. This allows for early
- * configuration while deferring net_device allocation until the function
- * is bound.
- */
-struct gether_opts {
- char name[IFNAMSIZ];
- unsigned int qmult;
- u8 host_mac[ETH_ALEN];
- u8 dev_mac[ETH_ALEN];
- bool ifname_set;
- unsigned char addr_assign_type;
-};
-
/*
* This represents the USB side of an "ethernet" link, managed by a USB
* function which provides control and (maybe) framing. Two functions
@@ -176,6 +151,32 @@ static inline struct net_device *gether_setup_default(void)
void gether_set_gadget(struct net_device *net, struct usb_gadget *g);
/**
+ * gether_attach_gadget - Reparent net_device to the gadget device.
+ * @net: The network device to reparent.
+ * @g: The target USB gadget device to parent to.
+ *
+ * This function moves the network device to be a child of the USB gadget
+ * device in the device hierarchy. This is typically done when the function
+ * is bound to a configuration.
+ *
+ * Returns 0 on success, or a negative error code on failure.
+ */
+int gether_attach_gadget(struct net_device *net, struct usb_gadget *g);
+
+/**
+ * gether_detach_gadget - Detach net_device from its gadget parent.
+ * @net: The network device to detach.
+ *
+ * This function moves the network device to be a child of the virtual
+ * devices parent, effectively detaching it from the USB gadget device
+ * hierarchy. This is typically done when the function is unbound
+ * from a configuration but the instance is not yet freed.
+ */
+void gether_detach_gadget(struct net_device *net);
+
+DEFINE_FREE(detach_gadget, struct net_device *, if (_T) gether_detach_gadget(_T))
+
+/**
* gether_set_dev_addr - initialize an ethernet-over-usb link with eth address
* @net: device representing this link
* @dev_addr: eth address of this device
@@ -283,11 +284,6 @@ int gether_get_ifname(struct net_device *net, char *name, int len);
int gether_set_ifname(struct net_device *net, const char *name, int len);
void gether_cleanup(struct eth_dev *dev);
-void gether_unregister_free_netdev(struct net_device *net);
-DEFINE_FREE(free_gether_netdev, struct net_device *, gether_unregister_free_netdev(_T));
-
-void gether_setup_opts_default(struct gether_opts *opts, const char *name);
-void gether_apply_opts(struct net_device *net, struct gether_opts *opts);
void gether_suspend(struct gether *link);
void gether_resume(struct gether *link);
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index 217990a266b2..51f0d79e5eca 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -13,13 +13,6 @@
#ifndef __U_ETHER_CONFIGFS_H
#define __U_ETHER_CONFIGFS_H
-#include <linux/cleanup.h>
-#include <linux/hex.h>
-#include <linux/if_ether.h>
-#include <linux/mutex.h>
-#include <linux/netdevice.h>
-#include <linux/rtnetlink.h>
-
#define USB_ETHERNET_CONFIGFS_ITEM(_f_) \
static void _f_##_attr_release(struct config_item *item) \
{ \
@@ -204,174 +197,4 @@ out: \
\
CONFIGFS_ATTR(_f_##_opts_, _n_)
-#define USB_ETHER_OPTS_ITEM(_f_) \
- static void _f_##_attr_release(struct config_item *item) \
- { \
- struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
- \
- usb_put_function_instance(&opts->func_inst); \
- } \
- \
- static struct configfs_item_operations _f_##_item_ops = { \
- .release = _f_##_attr_release, \
- }
-
-#define USB_ETHER_OPTS_ATTR_DEV_ADDR(_f_) \
- static ssize_t _f_##_opts_dev_addr_show(struct config_item *item, \
- char *page) \
- { \
- struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
- \
- guard(mutex)(&opts->lock); \
- return sysfs_emit(page, "%pM\n", opts->net_opts.dev_mac); \
- } \
- \
- static ssize_t _f_##_opts_dev_addr_store(struct config_item *item, \
- const char *page, size_t len) \
- { \
- struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
- u8 new_addr[ETH_ALEN]; \
- const char *p = page; \
- \
- guard(mutex)(&opts->lock); \
- if (opts->refcnt) \
- return -EBUSY; \
- \
- for (int i = 0; i < ETH_ALEN; i++) { \
- unsigned char num; \
- if ((*p == '.') || (*p == ':')) \
- p++; \
- num = hex_to_bin(*p++) << 4; \
- num |= hex_to_bin(*p++); \
- new_addr[i] = num; \
- } \
- if (!is_valid_ether_addr(new_addr)) \
- return -EINVAL; \
- memcpy(opts->net_opts.dev_mac, new_addr, ETH_ALEN); \
- opts->net_opts.addr_assign_type = NET_ADDR_SET; \
- return len; \
- } \
- \
- CONFIGFS_ATTR(_f_##_opts_, dev_addr)
-
-#define USB_ETHER_OPTS_ATTR_HOST_ADDR(_f_) \
- static ssize_t _f_##_opts_host_addr_show(struct config_item *item, \
- char *page) \
- { \
- struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
- \
- guard(mutex)(&opts->lock); \
- return sysfs_emit(page, "%pM\n", opts->net_opts.host_mac); \
- } \
- \
- static ssize_t _f_##_opts_host_addr_store(struct config_item *item, \
- const char *page, size_t len) \
- { \
- struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
- u8 new_addr[ETH_ALEN]; \
- const char *p = page; \
- \
- guard(mutex)(&opts->lock); \
- if (opts->refcnt) \
- return -EBUSY; \
- \
- for (int i = 0; i < ETH_ALEN; i++) { \
- unsigned char num; \
- if ((*p == '.') || (*p == ':')) \
- p++; \
- num = hex_to_bin(*p++) << 4; \
- num |= hex_to_bin(*p++); \
- new_addr[i] = num; \
- } \
- if (!is_valid_ether_addr(new_addr)) \
- return -EINVAL; \
- memcpy(opts->net_opts.host_mac, new_addr, ETH_ALEN); \
- return len; \
- } \
- \
- CONFIGFS_ATTR(_f_##_opts_, host_addr)
-
-#define USB_ETHER_OPTS_ATTR_QMULT(_f_) \
- static ssize_t _f_##_opts_qmult_show(struct config_item *item, \
- char *page) \
- { \
- struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
- \
- guard(mutex)(&opts->lock); \
- return sysfs_emit(page, "%u\n", opts->net_opts.qmult); \
- } \
- \
- static ssize_t _f_##_opts_qmult_store(struct config_item *item, \
- const char *page, size_t len) \
- { \
- struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
- u32 val; \
- int ret; \
- \
- guard(mutex)(&opts->lock); \
- if (opts->refcnt) \
- return -EBUSY; \
- \
- ret = kstrtou32(page, 0, &val); \
- if (ret) \
- return ret; \
- \
- opts->net_opts.qmult = val; \
- return len; \
- } \
- \
- CONFIGFS_ATTR(_f_##_opts_, qmult)
-
-#define USB_ETHER_OPTS_ATTR_IFNAME(_f_) \
- static ssize_t _f_##_opts_ifname_show(struct config_item *item, \
- char *page) \
- { \
- struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
- const char *name; \
- \
- guard(mutex)(&opts->lock); \
- rtnl_lock(); \
- if (opts->net_opts.ifname_set) \
- name = opts->net_opts.name; \
- else if (opts->net) \
- name = netdev_name(opts->net); \
- else \
- name = "(inactive net_device)"; \
- rtnl_unlock(); \
- return sysfs_emit(page, "%s\n", name); \
- } \
- \
- static ssize_t _f_##_opts_ifname_store(struct config_item *item, \
- const char *page, size_t len) \
- { \
- struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
- char tmp[IFNAMSIZ]; \
- const char *p; \
- size_t c_len = len; \
- \
- if (c_len > 0 && page[c_len - 1] == '\n') \
- c_len--; \
- \
- if (c_len >= sizeof(tmp)) \
- return -E2BIG; \
- \
- strscpy(tmp, page, c_len + 1); \
- if (!dev_valid_name(tmp)) \
- return -EINVAL; \
- \
- /* Require exactly one %d */ \
- p = strchr(tmp, '%'); \
- if (!p || p[1] != 'd' || strchr(p + 2, '%')) \
- return -EINVAL; \
- \
- guard(mutex)(&opts->lock); \
- if (opts->refcnt) \
- return -EBUSY; \
- strscpy(opts->net_opts.name, tmp, sizeof(opts->net_opts.name)); \
- opts->net_opts.ifname_set = true; \
- return len; \
- } \
- \
- CONFIGFS_ATTR(_f_##_opts_, ifname)
-
#endif /* __U_ETHER_CONFIGFS_H */
diff --git a/drivers/usb/gadget/function/u_ncm.h b/drivers/usb/gadget/function/u_ncm.h
index d99330fe31e8..b1f3db8b68c1 100644
--- a/drivers/usb/gadget/function/u_ncm.h
+++ b/drivers/usb/gadget/function/u_ncm.h
@@ -15,13 +15,11 @@
#include <linux/usb/composite.h>
-#include "u_ether.h"
-
struct f_ncm_opts {
struct usb_function_instance func_inst;
struct net_device *net;
+ int bind_count;
- struct gether_opts net_opts;
struct config_group *ncm_interf_group;
struct usb_os_desc ncm_os_desc;
char ncm_ext_compat_id[16];
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index 7cea641b06b4..2f9700b3f1b6 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -513,7 +513,7 @@ uvc_video_prep_requests(struct uvc_video *video)
return;
}
- interval_duration = 2 << (video->ep->desc->bInterval - 1);
+ interval_duration = 1 << (video->ep->desc->bInterval - 1);
if (cdev->gadget->speed < USB_SPEED_HIGH)
interval_duration *= 10000;
else
diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c
index 890fc5e892f1..ade178ab34a7 100644
--- a/drivers/usb/host/xhci-debugfs.c
+++ b/drivers/usb/host/xhci-debugfs.c
@@ -386,11 +386,19 @@ static const struct file_operations port_fops = {
static int xhci_portli_show(struct seq_file *s, void *unused)
{
struct xhci_port *port = s->private;
- struct xhci_hcd *xhci = hcd_to_xhci(port->rhub->hcd);
+ struct xhci_hcd *xhci;
u32 portli;
portli = readl(&port->port_reg->portli);
+ /* port without protocol capability isn't added to a roothub */
+ if (!port->rhub) {
+ seq_printf(s, "0x%08x\n", portli);
+ return 0;
+ }
+
+ xhci = hcd_to_xhci(port->rhub->hcd);
+
/* PORTLI fields are valid if port is a USB3 or eUSB2V2 port */
if (port->rhub == &xhci->usb3_rhub)
seq_printf(s, "0x%08x LEC=%u RLC=%u TLC=%u\n", portli,
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 9315ba18310d..1cbefee3c4ca 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -3195,6 +3195,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
if (status & STS_HCE) {
xhci_warn(xhci, "WARNING: Host Controller Error\n");
+ xhci_halt(xhci);
goto out;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index c36ab323d68e..ef6d8662adec 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -4146,7 +4146,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING) ||
(xhci->xhc_state & XHCI_STATE_HALTED)) {
spin_unlock_irqrestore(&xhci->lock, flags);
- kfree(command);
+ xhci_free_command(xhci, command);
return -ENODEV;
}
@@ -4154,7 +4154,7 @@ int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id)
slot_id);
if (ret) {
spin_unlock_irqrestore(&xhci->lock, flags);
- kfree(command);
+ xhci_free_command(xhci, command);
return ret;
}
xhci_ring_cmd_db(xhci);
diff --git a/drivers/usb/image/mdc800.c b/drivers/usb/image/mdc800.c
index 8d8e79afa600..ca287b770e8c 100644
--- a/drivers/usb/image/mdc800.c
+++ b/drivers/usb/image/mdc800.c
@@ -707,7 +707,7 @@ static ssize_t mdc800_device_read (struct file *file, char __user *buf, size_t l
if (signal_pending (current))
{
mutex_unlock(&mdc800->io_lock);
- return -EINTR;
+ return len == left ? -EINTR : len-left;
}
sts=left > (mdc800->out_count-mdc800->out_ptr)?mdc800->out_count-mdc800->out_ptr:left;
@@ -730,9 +730,11 @@ static ssize_t mdc800_device_read (struct file *file, char __user *buf, size_t l
mutex_unlock(&mdc800->io_lock);
return len-left;
}
- wait_event_timeout(mdc800->download_wait,
+ retval = wait_event_timeout(mdc800->download_wait,
mdc800->downloaded,
msecs_to_jiffies(TO_DOWNLOAD_GET_READY));
+ if (!retval)
+ usb_kill_urb(mdc800->download_urb);
mdc800->downloaded = 0;
if (mdc800->download_urb->status != 0)
{
diff --git a/drivers/usb/misc/uss720.c b/drivers/usb/misc/uss720.c
index ec8bd968c4de..a8af7615b1bf 100644
--- a/drivers/usb/misc/uss720.c
+++ b/drivers/usb/misc/uss720.c
@@ -736,7 +736,7 @@ static int uss720_probe(struct usb_interface *intf,
ret = get_1284_register(pp, 0, &reg, GFP_KERNEL);
dev_dbg(&intf->dev, "reg: %7ph\n", priv->reg);
if (ret < 0)
- return ret;
+ goto probe_abort;
ret = usb_find_last_int_in_endpoint(interface, &epd);
if (!ret) {
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 9189e4bb213a..7a482cdee1e9 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -272,6 +272,7 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
dev->int_buffer, YUREX_BUF_SIZE, yurex_interrupt,
dev, 1);
dev->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
+ dev->bbu = -1;
if (usb_submit_urb(dev->urb, GFP_KERNEL)) {
retval = -EIO;
dev_err(&interface->dev, "Could not submitting URB\n");
@@ -280,7 +281,6 @@ static int yurex_probe(struct usb_interface *interface, const struct usb_device_
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
- dev->bbu = -1;
/* we can register the device now, as it is ready */
retval = usb_register_dev(interface, &yurex_class);
diff --git a/drivers/usb/renesas_usbhs/common.c b/drivers/usb/renesas_usbhs/common.c
index cf4a0367d6d6..8c93bde4b816 100644
--- a/drivers/usb/renesas_usbhs/common.c
+++ b/drivers/usb/renesas_usbhs/common.c
@@ -815,6 +815,15 @@ static void usbhs_remove(struct platform_device *pdev)
usbhs_platform_call(priv, hardware_exit, pdev);
reset_control_assert(priv->rsts);
+
+ /*
+ * Explicitly free the IRQ to ensure the interrupt handler is
+ * disabled and synchronized before freeing resources.
+ * devm_free_irq() calls free_irq() which waits for any running
+ * ISR to complete, preventing UAF.
+ */
+ devm_free_irq(&pdev->dev, priv->irq, priv);
+
usbhs_mod_remove(priv);
usbhs_fifo_remove(priv);
usbhs_pipe_remove(priv);
diff --git a/drivers/usb/roles/class.c b/drivers/usb/roles/class.c
index b8e28ceca51e..edec139b68b5 100644
--- a/drivers/usb/roles/class.c
+++ b/drivers/usb/roles/class.c
@@ -139,9 +139,14 @@ static void *usb_role_switch_match(const struct fwnode_handle *fwnode, const cha
static struct usb_role_switch *
usb_role_switch_is_parent(struct fwnode_handle *fwnode)
{
- struct fwnode_handle *parent = fwnode_get_parent(fwnode);
+ struct fwnode_handle *parent;
struct device *dev;
+ if (!fwnode_device_is_compatible(fwnode, "usb-b-connector"))
+ return NULL;
+
+ parent = fwnode_get_parent(fwnode);
+
if (!fwnode_property_present(parent, "usb-role-switch")) {
fwnode_handle_put(parent);
return NULL;
diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c
index d185688a16b1..35d9c3086990 100644
--- a/drivers/usb/typec/altmodes/displayport.c
+++ b/drivers/usb/typec/altmodes/displayport.c
@@ -100,9 +100,14 @@ static int dp_altmode_configure(struct dp_altmode *dp, u8 con)
{
u8 pin_assign = 0;
u32 conf;
+ u32 signal;
/* DP Signalling */
- conf = (dp->data.conf & DP_CONF_SIGNALLING_MASK) >> DP_CONF_SIGNALLING_SHIFT;
+ signal = DP_CAP_DP_SIGNALLING(dp->port->vdo) & DP_CAP_DP_SIGNALLING(dp->alt->vdo);
+ if (dp->plug_prime)
+ signal &= DP_CAP_DP_SIGNALLING(dp->plug_prime->vdo);
+
+ conf = signal << DP_CONF_SIGNALLING_SHIFT;
switch (con) {
case DP_STATUS_CON_DISABLED:
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 1d2f3af034c5..8e0e14a2704e 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -7890,7 +7890,7 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc)
port->partner_desc.identity = &port->partner_ident;
port->role_sw = fwnode_usb_role_switch_get(tcpc->fwnode);
- if (IS_ERR_OR_NULL(port->role_sw))
+ if (!port->role_sw)
port->role_sw = usb_role_switch_get(port->dev);
if (IS_ERR(port->role_sw)) {
err = PTR_ERR(port->role_sw);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e4fad777b034..407830d86d0d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3594,7 +3594,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
}
}
- btrfs_zoned_reserve_data_reloc_bg(fs_info);
btrfs_free_zone_cache(fs_info);
btrfs_check_active_zone_reservation(fs_info);
@@ -3622,6 +3621,12 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
goto fail_cleaner;
}
+ /*
+ * Starts a transaction, must be called after the transaction kthread
+ * is initialized.
+ */
+ btrfs_zoned_reserve_data_reloc_bg(fs_info);
+
ret = btrfs_read_qgroup_config(fs_info);
if (ret)
goto fail_trans_kthread;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 744a1fff6eef..5f97a3d2a8d7 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4507,6 +4507,7 @@ static int try_release_subpage_extent_buffer(struct folio *folio)
*/
if (!test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) {
spin_unlock(&eb->refs_lock);
+ rcu_read_lock();
break;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index a11fcc9e9f50..a6da98435ef7 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6612,6 +6612,25 @@ int btrfs_create_new_inode(struct btrfs_trans_handle *trans,
int ret;
bool xa_reserved = false;
+ if (!args->orphan && !args->subvol) {
+ /*
+ * Before anything else, check if we can add the name to the
+ * parent directory. We want to avoid a dir item overflow in
+ * case we have an existing dir item due to existing name
+ * hash collisions. We do this check here before we call
+ * btrfs_add_link() down below so that we can avoid a
+ * transaction abort (which could be exploited by malicious
+ * users).
+ *
+ * For subvolumes we already do this in btrfs_mksubvol().
+ */
+ ret = btrfs_check_dir_item_collision(BTRFS_I(dir)->root,
+ btrfs_ino(BTRFS_I(dir)),
+ name);
+ if (ret < 0)
+ return ret;
+ }
+
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a1fd44c44ecf..b805dd9227ef 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -672,6 +672,13 @@ static noinline int create_subvol(struct mnt_idmap *idmap,
goto out;
}
+ /*
+ * Subvolumes have orphans cleaned on first dentry lookup. A new
+ * subvolume cannot have any orphans, so we should set the bit before we
+ * add the subvolume dentry to the dentry cache, so that it is in the
+ * same state as a subvolume after first lookup.
+ */
+ set_bit(BTRFS_ROOT_ORPHAN_CLEANUP, &new_root->state);
d_instantiate_new(dentry, new_inode_args.inode);
new_inode_args.inode = NULL;
@@ -3852,6 +3859,25 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
goto out;
}
+ received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
+ BTRFS_UUID_SIZE);
+
+ /*
+ * Before we attempt to add the new received uuid, check if we have room
+ * for it in case there's already an item. If the size of the existing
+ * item plus this root's ID (u64) exceeds the maximum item size, we can
+ * return here without the need to abort a transaction. If we don't do
+ * this check, the btrfs_uuid_tree_add() call below would fail with
+ * -EOVERFLOW and result in a transaction abort. Malicious users could
+ * exploit this to turn the fs into RO mode.
+ */
+ if (received_uuid_changed && !btrfs_is_empty_uuid(sa->uuid)) {
+ ret = btrfs_uuid_tree_check_overflow(fs_info, sa->uuid,
+ BTRFS_UUID_KEY_RECEIVED_SUBVOL);
+ if (ret < 0)
+ goto out;
+ }
+
/*
* 1 - root item
* 2 - uuid items (received uuid + subvol uuid)
@@ -3867,15 +3893,12 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
sa->rtime.sec = ct.tv_sec;
sa->rtime.nsec = ct.tv_nsec;
- received_uuid_changed = memcmp(root_item->received_uuid, sa->uuid,
- BTRFS_UUID_SIZE);
if (received_uuid_changed &&
!btrfs_is_empty_uuid(root_item->received_uuid)) {
ret = btrfs_uuid_tree_remove(trans, root_item->received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
btrfs_root_id(root));
if (unlikely(ret && ret != -ENOENT)) {
- btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
goto out;
}
@@ -3890,7 +3913,8 @@ static long _btrfs_ioctl_set_received_subvol(struct file *file,
ret = btrfs_update_root(trans, fs_info->tree_root,
&root->root_key, &root->root_item);
- if (ret < 0) {
+ if (unlikely(ret < 0)) {
+ btrfs_abort_transaction(trans, ret);
btrfs_end_transaction(trans);
goto out;
}
diff --git a/fs/btrfs/messages.h b/fs/btrfs/messages.h
index 943e53980945..c8e92efce405 100644
--- a/fs/btrfs/messages.h
+++ b/fs/btrfs/messages.h
@@ -31,9 +31,6 @@ void _btrfs_printk(const struct btrfs_fs_info *fs_info, unsigned int level, cons
#define btrfs_printk_in_rcu(fs_info, level, fmt, args...) \
btrfs_no_printk(fs_info, fmt, ##args)
-#define btrfs_printk_in_rcu(fs_info, level, fmt, args...) \
- btrfs_no_printk(fs_info, fmt, ##args)
-
#define btrfs_printk_rl_in_rcu(fs_info, level, fmt, args...) \
btrfs_no_printk(fs_info, fmt, ##args)
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index f189bf09ce6a..b7dfe877cf8d 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -38,6 +38,7 @@ static const struct root_name_map root_map[] = {
{ BTRFS_BLOCK_GROUP_TREE_OBJECTID, "BLOCK_GROUP_TREE" },
{ BTRFS_DATA_RELOC_TREE_OBJECTID, "DATA_RELOC_TREE" },
{ BTRFS_RAID_STRIPE_TREE_OBJECTID, "RAID_STRIPE_TREE" },
+ { BTRFS_REMAP_TREE_OBJECTID, "REMAP_TREE" },
};
const char *btrfs_root_name(const struct btrfs_key *key, char *buf)
@@ -415,6 +416,9 @@ static void key_type_string(const struct btrfs_key *key, char *buf, int buf_size
[BTRFS_UUID_KEY_SUBVOL] = "UUID_KEY_SUBVOL",
[BTRFS_UUID_KEY_RECEIVED_SUBVOL] = "UUID_KEY_RECEIVED_SUBVOL",
[BTRFS_RAID_STRIPE_KEY] = "RAID_STRIPE",
+ [BTRFS_IDENTITY_REMAP_KEY] = "IDENTITY_REMAP",
+ [BTRFS_REMAP_KEY] = "REMAP",
+ [BTRFS_REMAP_BACKREF_KEY] = "REMAP_BACKREF",
};
if (key->type == 0 && key->objectid == BTRFS_FREE_SPACE_OBJECTID)
@@ -435,6 +439,7 @@ void btrfs_print_leaf(const struct extent_buffer *l)
struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref;
struct btrfs_dev_extent *dev_extent;
+ struct btrfs_remap_item *remap;
struct btrfs_key key;
if (!l)
@@ -569,6 +574,11 @@ void btrfs_print_leaf(const struct extent_buffer *l)
print_raid_stripe_key(l, btrfs_item_size(l, i),
btrfs_item_ptr(l, i, struct btrfs_stripe_extent));
break;
+ case BTRFS_REMAP_KEY:
+ case BTRFS_REMAP_BACKREF_KEY:
+ remap = btrfs_item_ptr(l, i, struct btrfs_remap_item);
+ pr_info("\t\taddress %llu\n", btrfs_remap_address(l, remap));
+ break;
}
}
}
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index a330d8624b83..b2343aed7a5d 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -4399,6 +4399,8 @@ static int move_existing_remaps(struct btrfs_fs_info *fs_info,
leaf = path->nodes[0];
}
+
+ btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
}
remap = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_remap_item);
diff --git a/fs/btrfs/space-info.c b/fs/btrfs/space-info.c
index 52a267a5dd80..87cbc051cb12 100644
--- a/fs/btrfs/space-info.c
+++ b/fs/btrfs/space-info.c
@@ -2194,8 +2194,11 @@ void btrfs_reclaim_sweep(const struct btrfs_fs_info *fs_info)
if (!btrfs_should_periodic_reclaim(space_info))
continue;
for (raid = 0; raid < BTRFS_NR_RAID_TYPES; raid++) {
- if (do_reclaim_sweep(space_info, raid))
+ if (do_reclaim_sweep(space_info, raid)) {
+ spin_lock(&space_info->lock);
btrfs_set_periodic_reclaim_ready(space_info, false);
+ spin_unlock(&space_info->lock);
+ }
}
}
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 7ef8c9b7dfc1..8dd77c431974 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -1905,6 +1905,22 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
ret = btrfs_uuid_tree_add(trans, new_root_item->received_uuid,
BTRFS_UUID_KEY_RECEIVED_SUBVOL,
objectid);
+ /*
+ * We are creating of lot of snapshots of the same root that was
+ * received (has a received UUID) and reached a leaf's limit for
+ * an item. We can safely ignore this and avoid a transaction
+ * abort. A deletion of this snapshot will still work since we
+ * ignore if an item with a BTRFS_UUID_KEY_RECEIVED_SUBVOL key
+ * is missing (see btrfs_delete_subvolume()). Send/receive will
+ * work too since it peeks the first root id from the existing
+ * item (it could peek any), and in case it's missing it
+ * falls back to search by BTRFS_UUID_KEY_SUBVOL keys.
+ * Creation of a snapshot does not require CAP_SYS_ADMIN, so
+ * we don't want users triggering transaction aborts, either
+ * intentionally or not.
+ */
+ if (ret == -EOVERFLOW)
+ ret = 0;
if (unlikely(ret && ret != -EEXIST)) {
btrfs_abort_transaction(trans, ret);
goto fail;
diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c
index ac4c4573ee39..516ef62c8f43 100644
--- a/fs/btrfs/tree-checker.c
+++ b/fs/btrfs/tree-checker.c
@@ -1284,7 +1284,7 @@ static int check_root_item(struct extent_buffer *leaf, struct btrfs_key *key,
}
if (unlikely(btrfs_root_drop_level(&ri) >= BTRFS_MAX_LEVEL)) {
generic_err(leaf, slot,
- "invalid root level, have %u expect [0, %u]",
+ "invalid root drop_level, have %u expect [0, %u]",
btrfs_root_drop_level(&ri), BTRFS_MAX_LEVEL - 1);
return -EUCLEAN;
}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 780a06d59240..552fef3c385a 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -6195,6 +6195,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_log_ctx *ctx)
{
+ const bool orig_log_new_dentries = ctx->log_new_dentries;
int ret = 0;
/*
@@ -6256,7 +6257,11 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
* dir index key range logged for the directory. So we
* must make sure the deletion is recorded.
*/
+ ctx->log_new_dentries = false;
ret = btrfs_log_inode(trans, inode, LOG_INODE_ALL, ctx);
+ if (!ret && ctx->log_new_dentries)
+ ret = log_new_dir_dentries(trans, inode, ctx);
+
btrfs_add_delayed_iput(inode);
if (ret)
break;
@@ -6291,6 +6296,7 @@ static int log_conflicting_inodes(struct btrfs_trans_handle *trans,
break;
}
+ ctx->log_new_dentries = orig_log_new_dentries;
ctx->logging_conflict_inodes = false;
if (ret)
free_conflicting_inodes(ctx);
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index f24c14b9bb2f..43c17a1d3451 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -199,6 +199,44 @@ int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8
return 0;
}
+/*
+ * Check if we can add one root ID to a UUID key.
+ * If the key does not yet exists, we can, otherwise only if extended item does
+ * not exceeds the maximum item size permitted by the leaf size.
+ *
+ * Returns 0 on success, negative value on error.
+ */
+int btrfs_uuid_tree_check_overflow(struct btrfs_fs_info *fs_info,
+ const u8 *uuid, u8 type)
+{
+ BTRFS_PATH_AUTO_FREE(path);
+ int ret;
+ u32 item_size;
+ struct btrfs_key key;
+
+ if (WARN_ON_ONCE(!fs_info->uuid_root))
+ return -EINVAL;
+
+ path = btrfs_alloc_path();
+ if (!path)
+ return -ENOMEM;
+
+ btrfs_uuid_to_key(uuid, type, &key);
+ ret = btrfs_search_slot(NULL, fs_info->uuid_root, &key, path, 0, 0);
+ if (ret < 0)
+ return ret;
+ if (ret > 0)
+ return 0;
+
+ item_size = btrfs_item_size(path->nodes[0], path->slots[0]);
+
+ if (sizeof(struct btrfs_item) + item_size + sizeof(u64) >
+ BTRFS_LEAF_DATA_SIZE(fs_info))
+ return -EOVERFLOW;
+
+ return 0;
+}
+
static int btrfs_uuid_iter_rem(struct btrfs_root *uuid_root, u8 *uuid, u8 type,
u64 subid)
{
diff --git a/fs/btrfs/uuid-tree.h b/fs/btrfs/uuid-tree.h
index c60ad20325cc..02b235a3653f 100644
--- a/fs/btrfs/uuid-tree.h
+++ b/fs/btrfs/uuid-tree.h
@@ -12,6 +12,8 @@ int btrfs_uuid_tree_add(struct btrfs_trans_handle *trans, const u8 *uuid, u8 typ
u64 subid);
int btrfs_uuid_tree_remove(struct btrfs_trans_handle *trans, const u8 *uuid, u8 type,
u64 subid);
+int btrfs_uuid_tree_check_overflow(struct btrfs_fs_info *fs_info,
+ const u8 *uuid, u8 type);
int btrfs_uuid_tree_iterate(struct btrfs_fs_info *fs_info);
int btrfs_create_uuid_tree(struct btrfs_fs_info *fs_info);
int btrfs_uuid_scan_kthread(void *data);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 648bb09fc416..be8975ef8b24 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3587,7 +3587,7 @@ int btrfs_relocate_chunk(struct btrfs_fs_info *fs_info, u64 chunk_offset, bool v
/* step one, relocate all the extents inside this chunk */
btrfs_scrub_pause(fs_info);
- ret = btrfs_relocate_block_group(fs_info, chunk_offset, true);
+ ret = btrfs_relocate_block_group(fs_info, chunk_offset, verbose);
btrfs_scrub_continue(fs_info);
if (ret) {
/*
diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
index 39930d99943c..817ca4fb9efa 100644
--- a/fs/btrfs/zoned.c
+++ b/fs/btrfs/zoned.c
@@ -337,7 +337,10 @@ int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
if (!btrfs_fs_incompat(fs_info, ZONED))
return 0;
- mutex_lock(&fs_devices->device_list_mutex);
+ /*
+ * No need to take the device_list mutex here, we're still in the mount
+ * path and devices cannot be added to or removed from the list yet.
+ */
list_for_each_entry(device, &fs_devices->devices, dev_list) {
/* We can skip reading of zone info for missing devices */
if (!device->bdev)
@@ -347,7 +350,6 @@ int btrfs_get_dev_zone_info_all_devices(struct btrfs_fs_info *fs_info)
if (ret)
break;
}
- mutex_unlock(&fs_devices->device_list_mutex);
return ret;
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index e87b3bb94ee8..2090fc78529c 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1326,7 +1326,6 @@ void ceph_process_folio_batch(struct address_space *mapping,
continue;
} else if (rc == -E2BIG) {
folio_unlock(folio);
- ceph_wbc->fbatch.folios[i] = NULL;
break;
}
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index f3fe786b4143..7dc307790240 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -79,7 +79,7 @@ static int mdsc_show(struct seq_file *s, void *p)
if (req->r_inode) {
seq_printf(s, " #%llx", ceph_ino(req->r_inode));
} else if (req->r_dentry) {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, req->r_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
@@ -98,7 +98,7 @@ static int mdsc_show(struct seq_file *s, void *p)
}
if (req->r_old_dentry) {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, req->r_old_dentry, &path_info, 0);
if (IS_ERR(path))
path = NULL;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 86d7aa594ea9..bac9cfb6b982 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1339,6 +1339,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
struct ceph_client *cl = fsc->client;
struct ceph_mds_client *mdsc = fsc->mdsc;
struct inode *inode = d_inode(dentry);
+ struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_mds_request *req;
bool try_async = ceph_test_mount_opt(fsc, ASYNC_DIROPS);
struct dentry *dn;
@@ -1363,7 +1364,7 @@ static int ceph_unlink(struct inode *dir, struct dentry *dentry)
if (!dn) {
try_async = false;
} else {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0);
if (IS_ERR(path)) {
try_async = false;
@@ -1424,7 +1425,19 @@ retry:
* We have enough caps, so we assume that the unlink
* will succeed. Fix up the target inode and dcache.
*/
- drop_nlink(inode);
+
+ /*
+ * Protect the i_nlink update with i_ceph_lock
+ * to precent racing against ceph_fill_inode()
+ * handling our completion on a worker thread
+ * and don't decrement if i_nlink has already
+ * been updated to zero by this completion.
+ */
+ spin_lock(&ci->i_ceph_lock);
+ if (inode->i_nlink > 0)
+ drop_nlink(inode);
+ spin_unlock(&ci->i_ceph_lock);
+
d_delete(dentry);
} else {
spin_lock(&fsc->async_unlink_conflict_lock);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index 66bbf6d517a9..5e7c73a29aa3 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -397,7 +397,7 @@ int ceph_open(struct inode *inode, struct file *file)
if (!dentry) {
do_sync = true;
} else {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
if (IS_ERR(path)) {
do_sync = true;
@@ -807,7 +807,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
if (!dn) {
try_async = false;
} else {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, dn, &path_info, 0);
if (IS_ERR(path)) {
try_async = false;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index d76f9a79dc0c..d99e12d1100b 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -2551,7 +2551,7 @@ int __ceph_setattr(struct mnt_idmap *idmap, struct inode *inode,
if (!dentry) {
do_sync = true;
} else {
- struct ceph_path_info path_info;
+ struct ceph_path_info path_info = {0};
path = ceph_mdsc_build_path(mdsc, dentry, &path_info, 0);
if (IS_ERR(path)) {
do_sync = true;
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 23b6d00643c9..b1746273f186 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -2768,6 +2768,7 @@ retry:
if (ret < 0) {
dput(parent);
dput(cur);
+ __putname(path);
return ERR_PTR(ret);
}
@@ -2777,6 +2778,7 @@ retry:
if (len < 0) {
dput(parent);
dput(cur);
+ __putname(path);
return ERR_PTR(len);
}
}
@@ -2813,6 +2815,7 @@ retry:
* cannot ever succeed. Creating paths that long is
* possible with Ceph, but Linux cannot use them.
*/
+ __putname(path);
return ERR_PTR(-ENAMETOOLONG);
}
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 12cb0ca738af..6bb30543eff0 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -87,7 +87,7 @@ config NFS_V4
space programs which can be found in the Linux nfs-utils package,
available from http://linux-nfs.org/.
- If unsure, say Y.
+ If unsure, say N.
config NFS_SWAP
bool "Provide swap over NFS support"
@@ -100,6 +100,7 @@ config NFS_SWAP
config NFS_V4_0
bool "NFS client support for NFSv4.0"
depends on NFS_V4
+ default y
help
This option enables support for minor version 0 of the NFSv4 protocol
(RFC 3530) in the kernel's NFS client.
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 3e2de45c95fe..be2aebf62056 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -392,8 +392,13 @@ nfs3_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
if (status != 0)
goto out_release_acls;
- if (d_alias)
+ if (d_alias) {
+ if (d_is_dir(d_alias)) {
+ status = -EISDIR;
+ goto out_dput;
+ }
dentry = d_alias;
+ }
/* When we created the file with exclusive semantics, make
* sure we set the attributes afterwards. */
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 8fdbba7cad96..8e8a76a44ff0 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -36,19 +36,30 @@
* second map contains a reference to the entry in the first map.
*/
+static struct workqueue_struct *nfsd_export_wq;
+
#define EXPKEY_HASHBITS 8
#define EXPKEY_HASHMAX (1 << EXPKEY_HASHBITS)
#define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1)
-static void expkey_put(struct kref *ref)
+static void expkey_release(struct work_struct *work)
{
- struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
+ struct svc_expkey *key = container_of(to_rcu_work(work),
+ struct svc_expkey, ek_rwork);
if (test_bit(CACHE_VALID, &key->h.flags) &&
!test_bit(CACHE_NEGATIVE, &key->h.flags))
path_put(&key->ek_path);
auth_domain_put(key->ek_client);
- kfree_rcu(key, ek_rcu);
+ kfree(key);
+}
+
+static void expkey_put(struct kref *ref)
+{
+ struct svc_expkey *key = container_of(ref, struct svc_expkey, h.ref);
+
+ INIT_RCU_WORK(&key->ek_rwork, expkey_release);
+ queue_rcu_work(nfsd_export_wq, &key->ek_rwork);
}
static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
@@ -353,11 +364,13 @@ static void export_stats_destroy(struct export_stats *stats)
EXP_STATS_COUNTERS_NUM);
}
-static void svc_export_release(struct rcu_head *rcu_head)
+static void svc_export_release(struct work_struct *work)
{
- struct svc_export *exp = container_of(rcu_head, struct svc_export,
- ex_rcu);
+ struct svc_export *exp = container_of(to_rcu_work(work),
+ struct svc_export, ex_rwork);
+ path_put(&exp->ex_path);
+ auth_domain_put(exp->ex_client);
nfsd4_fslocs_free(&exp->ex_fslocs);
export_stats_destroy(exp->ex_stats);
kfree(exp->ex_stats);
@@ -369,9 +382,8 @@ static void svc_export_put(struct kref *ref)
{
struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
- path_put(&exp->ex_path);
- auth_domain_put(exp->ex_client);
- call_rcu(&exp->ex_rcu, svc_export_release);
+ INIT_RCU_WORK(&exp->ex_rwork, svc_export_release);
+ queue_rcu_work(nfsd_export_wq, &exp->ex_rwork);
}
static int svc_export_upcall(struct cache_detail *cd, struct cache_head *h)
@@ -1479,6 +1491,36 @@ const struct seq_operations nfs_exports_op = {
.show = e_show,
};
+/**
+ * nfsd_export_wq_init - allocate the export release workqueue
+ *
+ * Called once at module load. The workqueue runs deferred svc_export and
+ * svc_expkey release work scheduled by queue_rcu_work() in the cache put
+ * callbacks.
+ *
+ * Return values:
+ * %0: workqueue allocated
+ * %-ENOMEM: allocation failed
+ */
+int nfsd_export_wq_init(void)
+{
+ nfsd_export_wq = alloc_workqueue("nfsd_export", WQ_UNBOUND, 0);
+ if (!nfsd_export_wq)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * nfsd_export_wq_shutdown - drain and free the export release workqueue
+ *
+ * Called once at module unload. Per-namespace teardown in
+ * nfsd_export_shutdown() has already drained all deferred work.
+ */
+void nfsd_export_wq_shutdown(void)
+{
+ destroy_workqueue(nfsd_export_wq);
+}
+
/*
* Initialize the exports module.
*/
@@ -1540,6 +1582,9 @@ nfsd_export_shutdown(struct net *net)
cache_unregister_net(nn->svc_expkey_cache, net);
cache_unregister_net(nn->svc_export_cache, net);
+ /* Drain deferred export and expkey release work. */
+ rcu_barrier();
+ flush_workqueue(nfsd_export_wq);
cache_destroy_net(nn->svc_expkey_cache, net);
cache_destroy_net(nn->svc_export_cache, net);
svcauth_unix_purge(net);
diff --git a/fs/nfsd/export.h b/fs/nfsd/export.h
index d2b09cd76145..b05399374574 100644
--- a/fs/nfsd/export.h
+++ b/fs/nfsd/export.h
@@ -7,6 +7,7 @@
#include <linux/sunrpc/cache.h>
#include <linux/percpu_counter.h>
+#include <linux/workqueue.h>
#include <uapi/linux/nfsd/export.h>
#include <linux/nfs4.h>
@@ -75,7 +76,7 @@ struct svc_export {
u32 ex_layout_types;
struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
- struct rcu_head ex_rcu;
+ struct rcu_work ex_rwork;
unsigned long ex_xprtsec_modes;
struct export_stats *ex_stats;
};
@@ -92,7 +93,7 @@ struct svc_expkey {
u32 ek_fsid[6];
struct path ek_path;
- struct rcu_head ek_rcu;
+ struct rcu_work ek_rwork;
};
#define EX_ISSYNC(exp) (!((exp)->ex_flags & NFSEXP_ASYNC))
@@ -110,6 +111,8 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp,
/*
* Function declarations
*/
+int nfsd_export_wq_init(void);
+void nfsd_export_wq_shutdown(void);
int nfsd_export_init(struct net *);
void nfsd_export_shutdown(struct net *);
void nfsd_export_flush(struct net *);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 41dfba5ab8b8..9d234913100b 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -6281,9 +6281,14 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
int len = xdr->buf->len - (op_status_offset + XDR_UNIT);
so->so_replay.rp_status = op->status;
- so->so_replay.rp_buflen = len;
- read_bytes_from_xdr_buf(xdr->buf, op_status_offset + XDR_UNIT,
+ if (len <= NFSD4_REPLAY_ISIZE) {
+ so->so_replay.rp_buflen = len;
+ read_bytes_from_xdr_buf(xdr->buf,
+ op_status_offset + XDR_UNIT,
so->so_replay.rp_buf, len);
+ } else {
+ so->so_replay.rp_buflen = 0;
+ }
}
status:
op->status = nfsd4_map_status(op->status,
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 4cc8a58fa56a..71aabdaa1d15 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -149,9 +149,19 @@ static int exports_net_open(struct net *net, struct file *file)
seq = file->private_data;
seq->private = nn->svc_export_cache;
+ get_net(net);
return 0;
}
+static int exports_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+ struct cache_detail *cd = seq->private;
+
+ put_net(cd->net);
+ return seq_release(inode, file);
+}
+
static int exports_nfsd_open(struct inode *inode, struct file *file)
{
return exports_net_open(inode->i_sb->s_fs_info, file);
@@ -161,7 +171,7 @@ static const struct file_operations exports_nfsd_operations = {
.open = exports_nfsd_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = exports_release,
};
static int export_features_show(struct seq_file *m, void *v)
@@ -1376,7 +1386,7 @@ static const struct proc_ops exports_proc_ops = {
.proc_open = exports_proc_open,
.proc_read = seq_read,
.proc_lseek = seq_lseek,
- .proc_release = seq_release,
+ .proc_release = exports_release,
};
static int create_proc_exports_entry(void)
@@ -2259,9 +2269,12 @@ static int __init init_nfsd(void)
if (retval)
goto out_free_pnfs;
nfsd_lockd_init(); /* lockd->nfsd callbacks */
+ retval = nfsd_export_wq_init();
+ if (retval)
+ goto out_free_lockd;
retval = register_pernet_subsys(&nfsd_net_ops);
if (retval < 0)
- goto out_free_lockd;
+ goto out_free_export_wq;
retval = register_cld_notifier();
if (retval)
goto out_free_subsys;
@@ -2290,6 +2303,8 @@ out_free_cld:
unregister_cld_notifier();
out_free_subsys:
unregister_pernet_subsys(&nfsd_net_ops);
+out_free_export_wq:
+ nfsd_export_wq_shutdown();
out_free_lockd:
nfsd_lockd_shutdown();
nfsd_drc_slab_free();
@@ -2310,6 +2325,7 @@ static void __exit exit_nfsd(void)
nfsd4_destroy_laundry_wq();
unregister_cld_notifier();
unregister_pernet_subsys(&nfsd_net_ops);
+ nfsd_export_wq_shutdown();
nfsd_drc_slab_free();
nfsd_lockd_shutdown();
nfsd4_free_slabs();
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 6fcbf1e427d4..c0ca115c3b74 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -541,11 +541,18 @@ struct nfs4_client_reclaim {
struct xdr_netobj cr_princhash;
};
-/* A reasonable value for REPLAY_ISIZE was estimated as follows:
- * The OPEN response, typically the largest, requires
- * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 8(verifier) +
- * 4(deleg. type) + 8(deleg. stateid) + 4(deleg. recall flag) +
- * 20(deleg. space limit) + ~32(deleg. ace) = 112 bytes
+/*
+ * REPLAY_ISIZE is sized for an OPEN response with delegation:
+ * 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) +
+ * 8(verifier) + 4(deleg. type) + 8(deleg. stateid) +
+ * 4(deleg. recall flag) + 20(deleg. space limit) +
+ * ~32(deleg. ace) = 112 bytes
+ *
+ * Some responses can exceed this. A LOCK denial includes the conflicting
+ * lock owner, which can be up to 1024 bytes (NFS4_OPAQUE_LIMIT). Responses
+ * larger than REPLAY_ISIZE are not cached in rp_ibuf; only rp_status is
+ * saved. Enlarging this constant increases the size of every
+ * nfs4_stateowner.
*/
#define NFSD4_REPLAY_ISIZE 112
diff --git a/fs/smb/client/cifsacl.c b/fs/smb/client/cifsacl.c
index f4cb3018a358..c920039d733c 100644
--- a/fs/smb/client/cifsacl.c
+++ b/fs/smb/client/cifsacl.c
@@ -1489,7 +1489,7 @@ struct smb_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
struct cifsFileInfo *open_file = NULL;
if (inode)
- open_file = find_readable_file(CIFS_I(inode), true);
+ open_file = find_readable_file(CIFS_I(inode), FIND_FSUID_ONLY);
if (!open_file)
return get_cifs_acl_by_path(cifs_sb, path, pacllen, info);
diff --git a/fs/smb/client/cifsfs.c b/fs/smb/client/cifsfs.c
index b6e3db993cc6..32d0305a1239 100644
--- a/fs/smb/client/cifsfs.c
+++ b/fs/smb/client/cifsfs.c
@@ -1269,7 +1269,7 @@ static int cifs_precopy_set_eof(struct inode *src_inode, struct cifsInodeInfo *s
struct cifsFileInfo *writeable_srcfile;
int rc = -EINVAL;
- writeable_srcfile = find_writable_file(src_cifsi, FIND_WR_FSUID_ONLY);
+ writeable_srcfile = find_writable_file(src_cifsi, FIND_FSUID_ONLY);
if (writeable_srcfile) {
if (src_tcon->ses->server->ops->set_file_size)
rc = src_tcon->ses->server->ops->set_file_size(
diff --git a/fs/smb/client/cifsglob.h b/fs/smb/client/cifsglob.h
index 6f9b6c72962b..7877d327dbb0 100644
--- a/fs/smb/client/cifsglob.h
+++ b/fs/smb/client/cifsglob.h
@@ -20,6 +20,7 @@
#include <linux/utsname.h>
#include <linux/sched/mm.h>
#include <linux/netfs.h>
+#include <linux/fcntl.h>
#include "cifs_fs_sb.h"
#include "cifsacl.h"
#include <crypto/internal/hash.h>
@@ -1884,12 +1885,12 @@ static inline bool is_replayable_error(int error)
}
-/* cifs_get_writable_file() flags */
-enum cifs_writable_file_flags {
- FIND_WR_ANY = 0U,
- FIND_WR_FSUID_ONLY = (1U << 0),
- FIND_WR_WITH_DELETE = (1U << 1),
- FIND_WR_NO_PENDING_DELETE = (1U << 2),
+enum cifs_find_flags {
+ FIND_ANY = 0U,
+ FIND_FSUID_ONLY = (1U << 0),
+ FIND_WITH_DELETE = (1U << 1),
+ FIND_NO_PENDING_DELETE = (1U << 2),
+ FIND_OPEN_FLAGS = (1U << 3),
};
#define MID_FREE 0
@@ -2375,4 +2376,14 @@ static inline bool cifs_forced_shutdown(const struct cifs_sb_info *sbi)
return cifs_sb_flags(sbi) & CIFS_MOUNT_SHUTDOWN;
}
+static inline int cifs_open_create_options(unsigned int oflags, int opts)
+{
+ /* O_SYNC also has bit for O_DSYNC so following check picks up either */
+ if (oflags & O_SYNC)
+ opts |= CREATE_WRITE_THROUGH;
+ if (oflags & O_DIRECT)
+ opts |= CREATE_NO_BUFFER;
+ return opts;
+}
+
#endif /* _CIFS_GLOB_H */
diff --git a/fs/smb/client/cifsproto.h b/fs/smb/client/cifsproto.h
index 800a7e418c32..884bfa1cf0b4 100644
--- a/fs/smb/client/cifsproto.h
+++ b/fs/smb/client/cifsproto.h
@@ -138,12 +138,14 @@ void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata,
ssize_t result);
struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
int flags);
-int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
- struct cifsFileInfo **ret_file);
+int __cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
+ unsigned int find_flags, unsigned int open_flags,
+ struct cifsFileInfo **ret_file);
int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name, int flags,
struct cifsFileInfo **ret_file);
-struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
- bool fsuid_only);
+struct cifsFileInfo *__find_readable_file(struct cifsInodeInfo *cifs_inode,
+ unsigned int find_flags,
+ unsigned int open_flags);
int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
struct cifsFileInfo **ret_file);
int cifs_get_hardlink_path(struct cifs_tcon *tcon, struct inode *inode,
@@ -596,4 +598,20 @@ static inline void cifs_sg_set_buf(struct sg_table *sgtable,
}
}
+static inline int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
+ unsigned int find_flags,
+ struct cifsFileInfo **ret_file)
+{
+ find_flags &= ~FIND_OPEN_FLAGS;
+ return __cifs_get_writable_file(cifs_inode, find_flags, 0, ret_file);
+}
+
+static inline struct cifsFileInfo *
+find_readable_file(struct cifsInodeInfo *cinode, unsigned int find_flags)
+{
+ find_flags &= ~FIND_OPEN_FLAGS;
+ find_flags |= FIND_NO_PENDING_DELETE;
+ return __find_readable_file(cinode, find_flags, 0);
+}
+
#endif /* _CIFSPROTO_H */
diff --git a/fs/smb/client/dir.c b/fs/smb/client/dir.c
index 953f1fee8cb8..6d2378eeb7f6 100644
--- a/fs/smb/client/dir.c
+++ b/fs/smb/client/dir.c
@@ -187,7 +187,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
const char *full_path;
void *page = alloc_dentry_path();
struct inode *newinode = NULL;
- unsigned int sbflags;
+ unsigned int sbflags = cifs_sb_flags(cifs_sb);
int disposition;
struct TCP_Server_Info *server = tcon->ses->server;
struct cifs_open_parms oparms;
@@ -308,6 +308,7 @@ static int cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned
goto out;
}
+ create_options |= cifs_open_create_options(oflags, create_options);
/*
* if we're not using unix extensions, see if we need to set
* ATTR_READONLY on the create call
@@ -367,7 +368,6 @@ retry_open:
* If Open reported that we actually created a file then we now have to
* set the mode if possible.
*/
- sbflags = cifs_sb_flags(cifs_sb);
if ((tcon->unix_ext) && (*oplock & CIFS_CREATE_ACTION)) {
struct cifs_unix_set_info_args args = {
.mode = mode,
diff --git a/fs/smb/client/file.c b/fs/smb/client/file.c
index cffcf82c1b69..27f61fe7e4e2 100644
--- a/fs/smb/client/file.c
+++ b/fs/smb/client/file.c
@@ -255,7 +255,7 @@ static void cifs_begin_writeback(struct netfs_io_request *wreq)
struct cifs_io_request *req = container_of(wreq, struct cifs_io_request, rreq);
int ret;
- ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_WR_ANY, &req->cfile);
+ ret = cifs_get_writable_file(CIFS_I(wreq->inode), FIND_ANY, &req->cfile);
if (ret) {
cifs_dbg(VFS, "No writable handle in writepages ret=%d\n", ret);
return;
@@ -584,15 +584,8 @@ static int cifs_nt_open(const char *full_path, struct inode *inode, struct cifs_
*********************************************************************/
disposition = cifs_get_disposition(f_flags);
-
/* BB pass O_SYNC flag through on file attributes .. BB */
-
- /* O_SYNC also has bit for O_DSYNC so following check picks up either */
- if (f_flags & O_SYNC)
- create_options |= CREATE_WRITE_THROUGH;
-
- if (f_flags & O_DIRECT)
- create_options |= CREATE_NO_BUFFER;
+ create_options |= cifs_open_create_options(f_flags, create_options);
retry_open:
oparms = (struct cifs_open_parms) {
@@ -963,7 +956,7 @@ int cifs_file_flush(const unsigned int xid, struct inode *inode,
return tcon->ses->server->ops->flush(xid, tcon,
&cfile->fid);
}
- rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
+ rc = cifs_get_writable_file(CIFS_I(inode), FIND_ANY, &cfile);
if (!rc) {
tcon = tlink_tcon(cfile->tlink);
rc = tcon->ses->server->ops->flush(xid, tcon, &cfile->fid);
@@ -988,7 +981,7 @@ static int cifs_do_truncate(const unsigned int xid, struct dentry *dentry)
return -ERESTARTSYS;
mapping_set_error(inode->i_mapping, rc);
- cfile = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
+ cfile = find_writable_file(cinode, FIND_FSUID_ONLY);
rc = cifs_file_flush(xid, inode, cfile);
if (!rc) {
if (cfile) {
@@ -1068,32 +1061,29 @@ int cifs_open(struct inode *inode, struct file *file)
/* Get the cached handle as SMB2 close is deferred */
if (OPEN_FMODE(file->f_flags) & FMODE_WRITE) {
- rc = cifs_get_writable_path(tcon, full_path,
- FIND_WR_FSUID_ONLY |
- FIND_WR_NO_PENDING_DELETE,
- &cfile);
+ rc = __cifs_get_writable_file(CIFS_I(inode),
+ FIND_FSUID_ONLY |
+ FIND_NO_PENDING_DELETE |
+ FIND_OPEN_FLAGS,
+ file->f_flags, &cfile);
} else {
- rc = cifs_get_readable_path(tcon, full_path, &cfile);
+ cfile = __find_readable_file(CIFS_I(inode),
+ FIND_NO_PENDING_DELETE |
+ FIND_OPEN_FLAGS,
+ file->f_flags);
+ rc = cfile ? 0 : -ENOENT;
}
if (rc == 0) {
- unsigned int oflags = file->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
- unsigned int cflags = cfile->f_flags & ~(O_CREAT|O_EXCL|O_TRUNC);
-
- if (cifs_convert_flags(oflags, 0) == cifs_convert_flags(cflags, 0) &&
- (oflags & (O_SYNC|O_DIRECT)) == (cflags & (O_SYNC|O_DIRECT))) {
- file->private_data = cfile;
- spin_lock(&CIFS_I(inode)->deferred_lock);
- cifs_del_deferred_close(cfile);
- spin_unlock(&CIFS_I(inode)->deferred_lock);
- goto use_cache;
- }
- _cifsFileInfo_put(cfile, true, false);
- } else {
- /* hard link on the defeered close file */
- rc = cifs_get_hardlink_path(tcon, inode, file);
- if (rc)
- cifs_close_deferred_file(CIFS_I(inode));
- }
+ file->private_data = cfile;
+ spin_lock(&CIFS_I(inode)->deferred_lock);
+ cifs_del_deferred_close(cfile);
+ spin_unlock(&CIFS_I(inode)->deferred_lock);
+ goto use_cache;
+ }
+ /* hard link on the deferred close file */
+ rc = cifs_get_hardlink_path(tcon, inode, file);
+ if (rc)
+ cifs_close_deferred_file(CIFS_I(inode));
if (server->oplocks)
oplock = REQ_OPLOCK;
@@ -1314,13 +1304,8 @@ cifs_reopen_file(struct cifsFileInfo *cfile, bool can_flush)
rdwr_for_fscache = 1;
desired_access = cifs_convert_flags(cfile->f_flags, rdwr_for_fscache);
-
- /* O_SYNC also has bit for O_DSYNC so following check picks up either */
- if (cfile->f_flags & O_SYNC)
- create_options |= CREATE_WRITE_THROUGH;
-
- if (cfile->f_flags & O_DIRECT)
- create_options |= CREATE_NO_BUFFER;
+ create_options |= cifs_open_create_options(cfile->f_flags,
+ create_options);
if (server->ops->get_lease_key)
server->ops->get_lease_key(inode, &cfile->fid);
@@ -2524,10 +2509,33 @@ void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t
netfs_write_subrequest_terminated(&wdata->subreq, result);
}
-struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
- bool fsuid_only)
+static bool open_flags_match(struct cifsInodeInfo *cinode,
+ unsigned int oflags, unsigned int cflags)
+{
+ struct inode *inode = &cinode->netfs.inode;
+ int crw = 0, orw = 0;
+
+ oflags &= ~(O_CREAT | O_EXCL | O_TRUNC);
+ cflags &= ~(O_CREAT | O_EXCL | O_TRUNC);
+
+ if (cifs_fscache_enabled(inode)) {
+ if (OPEN_FMODE(cflags) & FMODE_WRITE)
+ crw = 1;
+ if (OPEN_FMODE(oflags) & FMODE_WRITE)
+ orw = 1;
+ }
+ if (cifs_convert_flags(oflags, orw) != cifs_convert_flags(cflags, crw))
+ return false;
+
+ return (oflags & (O_SYNC | O_DIRECT)) == (cflags & (O_SYNC | O_DIRECT));
+}
+
+struct cifsFileInfo *__find_readable_file(struct cifsInodeInfo *cifs_inode,
+ unsigned int find_flags,
+ unsigned int open_flags)
{
struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode);
+ bool fsuid_only = find_flags & FIND_FSUID_ONLY;
struct cifsFileInfo *open_file = NULL;
/* only filter by fsuid on multiuser mounts */
@@ -2541,6 +2549,13 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
continue;
+ if ((find_flags & FIND_NO_PENDING_DELETE) &&
+ open_file->status_file_deleted)
+ continue;
+ if ((find_flags & FIND_OPEN_FLAGS) &&
+ !open_flags_match(cifs_inode, open_flags,
+ open_file->f_flags))
+ continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_READ) {
if ((!open_file->invalidHandle)) {
/* found a good file */
@@ -2559,17 +2574,17 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
}
/* Return -EBADF if no handle is found and general rc otherwise */
-int
-cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
- struct cifsFileInfo **ret_file)
+int __cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
+ unsigned int find_flags, unsigned int open_flags,
+ struct cifsFileInfo **ret_file)
{
struct cifsFileInfo *open_file, *inv_file = NULL;
struct cifs_sb_info *cifs_sb;
bool any_available = false;
int rc = -EBADF;
unsigned int refind = 0;
- bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
- bool with_delete = flags & FIND_WR_WITH_DELETE;
+ bool fsuid_only = find_flags & FIND_FSUID_ONLY;
+ bool with_delete = find_flags & FIND_WITH_DELETE;
*ret_file = NULL;
/*
@@ -2603,9 +2618,13 @@ refind_writable:
continue;
if (with_delete && !(open_file->fid.access & DELETE))
continue;
- if ((flags & FIND_WR_NO_PENDING_DELETE) &&
+ if ((find_flags & FIND_NO_PENDING_DELETE) &&
open_file->status_file_deleted)
continue;
+ if ((find_flags & FIND_OPEN_FLAGS) &&
+ !open_flags_match(cifs_inode, open_flags,
+ open_file->f_flags))
+ continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
if (!open_file->invalidHandle) {
/* found a good writable file */
@@ -2722,17 +2741,7 @@ cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
cinode = CIFS_I(d_inode(cfile->dentry));
spin_unlock(&tcon->open_file_lock);
free_dentry_path(page);
- *ret_file = find_readable_file(cinode, 0);
- if (*ret_file) {
- spin_lock(&cinode->open_file_lock);
- if ((*ret_file)->status_file_deleted) {
- spin_unlock(&cinode->open_file_lock);
- cifsFileInfo_put(*ret_file);
- *ret_file = NULL;
- } else {
- spin_unlock(&cinode->open_file_lock);
- }
- }
+ *ret_file = find_readable_file(cinode, FIND_ANY);
return *ret_file ? 0 : -ENOENT;
}
@@ -2804,7 +2813,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
}
if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
- smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
+ smbfile = find_writable_file(CIFS_I(inode), FIND_ANY);
if (smbfile) {
rc = server->ops->flush(xid, tcon, &smbfile->fid);
cifsFileInfo_put(smbfile);
diff --git a/fs/smb/client/fs_context.c b/fs/smb/client/fs_context.c
index 54090739535f..a4a7c7eee038 100644
--- a/fs/smb/client/fs_context.c
+++ b/fs/smb/client/fs_context.c
@@ -1997,7 +1997,7 @@ int smb3_init_fs_context(struct fs_context *fc)
ctx->backupuid_specified = false; /* no backup intent for a user */
ctx->backupgid_specified = false; /* no backup intent for a group */
- ctx->retrans = 1;
+ ctx->retrans = 0;
ctx->reparse_type = CIFS_REPARSE_TYPE_DEFAULT;
ctx->symlink_type = CIFS_SYMLINK_TYPE_DEFAULT;
ctx->nonativesocket = 0;
diff --git a/fs/smb/client/inode.c b/fs/smb/client/inode.c
index 3e844c55ab8a..143fa2e665ed 100644
--- a/fs/smb/client/inode.c
+++ b/fs/smb/client/inode.c
@@ -2997,7 +2997,7 @@ int cifs_fiemap(struct inode *inode, struct fiemap_extent_info *fei, u64 start,
}
}
- cfile = find_readable_file(cifs_i, false);
+ cfile = find_readable_file(cifs_i, FIND_ANY);
if (cfile == NULL)
return -EINVAL;
@@ -3050,7 +3050,7 @@ int cifs_file_set_size(const unsigned int xid, struct dentry *dentry,
size, false);
cifs_dbg(FYI, "%s: set_file_size: rc = %d\n", __func__, rc);
} else {
- open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
+ open_file = find_writable_file(cifsInode, FIND_FSUID_ONLY);
if (open_file) {
tcon = tlink_tcon(open_file->tlink);
server = tcon->ses->server;
@@ -3219,7 +3219,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
open_file->fid.netfid,
open_file->pid);
} else {
- open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
+ open_file = find_writable_file(cifsInode, FIND_FSUID_ONLY);
if (open_file) {
pTcon = tlink_tcon(open_file->tlink);
rc = CIFSSMBUnixSetFileInfo(xid, pTcon, args,
diff --git a/fs/smb/client/smb1ops.c b/fs/smb/client/smb1ops.c
index 9643eca0cb70..9694117050a6 100644
--- a/fs/smb/client/smb1ops.c
+++ b/fs/smb/client/smb1ops.c
@@ -960,7 +960,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
struct cifs_tcon *tcon;
/* if the file is already open for write, just use that fileid */
- open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
+ open_file = find_writable_file(cinode, FIND_FSUID_ONLY);
if (open_file) {
fid.netfid = open_file->fid.netfid;
diff --git a/fs/smb/client/smb2inode.c b/fs/smb/client/smb2inode.c
index 5280c5c869ad..364bdcff9c9d 100644
--- a/fs/smb/client/smb2inode.c
+++ b/fs/smb/client/smb2inode.c
@@ -1156,7 +1156,7 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
cifs_i = CIFS_I(inode);
dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
data.Attributes = cpu_to_le32(dosattrs);
- cifs_get_writable_path(tcon, name, FIND_WR_ANY, &cfile);
+ cifs_get_writable_path(tcon, name, FIND_ANY, &cfile);
oparms = CIFS_OPARMS(cifs_sb, tcon, name, FILE_WRITE_ATTRIBUTES,
FILE_CREATE, CREATE_NOT_FILE, ACL_NO_MODE);
tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
@@ -1336,14 +1336,13 @@ int smb2_rename_path(const unsigned int xid,
__u32 co = file_create_options(source_dentry);
drop_cached_dir_by_name(xid, tcon, from_name, cifs_sb);
- cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile);
+ cifs_get_writable_path(tcon, from_name, FIND_WITH_DELETE, &cfile);
int rc = smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
co, DELETE, SMB2_OP_RENAME, cfile, source_dentry);
if (rc == -EINVAL) {
cifs_dbg(FYI, "invalid lease key, resending request without lease");
- cifs_get_writable_path(tcon, from_name,
- FIND_WR_WITH_DELETE, &cfile);
+ cifs_get_writable_path(tcon, from_name, FIND_WITH_DELETE, &cfile);
rc = smb2_set_path_attr(xid, tcon, from_name, to_name, cifs_sb,
co, DELETE, SMB2_OP_RENAME, cfile, NULL);
}
@@ -1377,7 +1376,7 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
in_iov.iov_base = &eof;
in_iov.iov_len = sizeof(eof);
- cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+ cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile);
oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_WRITE_DATA,
FILE_OPEN, 0, ACL_NO_MODE);
@@ -1387,7 +1386,7 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
cfile, NULL, NULL, dentry);
if (rc == -EINVAL) {
cifs_dbg(FYI, "invalid lease key, resending request without lease");
- cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+ cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb,
full_path, &oparms, &in_iov,
&(int){SMB2_OP_SET_EOF}, 1,
@@ -1417,7 +1416,7 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
(buf->LastWriteTime == 0) && (buf->ChangeTime == 0)) {
if (buf->Attributes == 0)
goto out; /* would be a no op, no sense sending this */
- cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+ cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile);
}
oparms = CIFS_OPARMS(cifs_sb, tcon, full_path, FILE_WRITE_ATTRIBUTES,
@@ -1476,7 +1475,7 @@ struct inode *smb2_create_reparse_inode(struct cifs_open_info_data *data,
if (tcon->posix_extensions) {
cmds[1] = SMB2_OP_POSIX_QUERY_INFO;
- cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+ cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms,
in_iov, cmds, 2, cfile, out_iov, out_buftype, NULL);
if (!rc) {
@@ -1485,7 +1484,7 @@ struct inode *smb2_create_reparse_inode(struct cifs_open_info_data *data,
}
} else {
cmds[1] = SMB2_OP_QUERY_INFO;
- cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+ cifs_get_writable_path(tcon, full_path, FIND_ANY, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms,
in_iov, cmds, 2, cfile, out_iov, out_buftype, NULL);
if (!rc) {
@@ -1636,13 +1635,12 @@ int smb2_rename_pending_delete(const char *full_path,
iov[1].iov_base = utf16_path;
iov[1].iov_len = sizeof(*utf16_path) * UniStrlen((wchar_t *)utf16_path);
- cifs_get_writable_path(tcon, full_path, FIND_WR_WITH_DELETE, &cfile);
+ cifs_get_writable_path(tcon, full_path, FIND_WITH_DELETE, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, iov,
cmds, num_cmds, cfile, NULL, NULL, dentry);
if (rc == -EINVAL) {
cifs_dbg(FYI, "invalid lease key, resending request without lease\n");
- cifs_get_writable_path(tcon, full_path,
- FIND_WR_WITH_DELETE, &cfile);
+ cifs_get_writable_path(tcon, full_path, FIND_WITH_DELETE, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path, &oparms, iov,
cmds, num_cmds, cfile, NULL, NULL, NULL);
}
diff --git a/fs/smb/client/smb2maperror.c b/fs/smb/client/smb2maperror.c
index f4cff44e2796..2b8782c4f684 100644
--- a/fs/smb/client/smb2maperror.c
+++ b/fs/smb/client/smb2maperror.c
@@ -109,6 +109,9 @@ int __init smb2_init_maperror(void)
}
#if IS_ENABLED(CONFIG_SMB_KUNIT_TESTS)
+#define EXPORT_SYMBOL_FOR_SMB_TEST(sym) \
+ EXPORT_SYMBOL_FOR_MODULES(sym, "smb2maperror_test")
+
/* Previous prototype for eliminating the build warning. */
const struct status_to_posix_error *smb2_get_err_map_test(__u32 smb2_status);
@@ -116,11 +119,11 @@ const struct status_to_posix_error *smb2_get_err_map_test(__u32 smb2_status)
{
return smb2_get_err_map(smb2_status);
}
-EXPORT_SYMBOL_GPL(smb2_get_err_map_test);
+EXPORT_SYMBOL_FOR_SMB_TEST(smb2_get_err_map_test);
const struct status_to_posix_error *smb2_error_map_table_test = smb2_error_map_table;
-EXPORT_SYMBOL_GPL(smb2_error_map_table_test);
+EXPORT_SYMBOL_FOR_SMB_TEST(smb2_error_map_table_test);
unsigned int smb2_error_map_num = ARRAY_SIZE(smb2_error_map_table);
-EXPORT_SYMBOL_GPL(smb2_error_map_num);
+EXPORT_SYMBOL_FOR_SMB_TEST(smb2_error_map_num);
#endif
diff --git a/fs/smb/client/smb2ops.c b/fs/smb/client/smb2ops.c
index 7f2d3459cbf9..98ac4e86bf99 100644
--- a/fs/smb/client/smb2ops.c
+++ b/fs/smb/client/smb2ops.c
@@ -628,6 +628,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
struct smb_sockaddr_in6 *p6;
struct cifs_server_iface *info = NULL, *iface = NULL, *niface = NULL;
struct cifs_server_iface tmp_iface;
+ __be16 port;
ssize_t bytes_left;
size_t next = 0;
int nb_iface = 0;
@@ -662,6 +663,15 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
goto out;
}
+ spin_lock(&ses->server->srv_lock);
+ if (ses->server->dstaddr.ss_family == AF_INET)
+ port = ((struct sockaddr_in *)&ses->server->dstaddr)->sin_port;
+ else if (ses->server->dstaddr.ss_family == AF_INET6)
+ port = ((struct sockaddr_in6 *)&ses->server->dstaddr)->sin6_port;
+ else
+ port = cpu_to_be16(CIFS_PORT);
+ spin_unlock(&ses->server->srv_lock);
+
while (bytes_left >= (ssize_t)sizeof(*p)) {
memset(&tmp_iface, 0, sizeof(tmp_iface));
/* default to 1Gbps when link speed is unset */
@@ -682,7 +692,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
/* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
- addr4->sin_port = cpu_to_be16(CIFS_PORT);
+ addr4->sin_port = port;
cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
&addr4->sin_addr);
@@ -696,7 +706,7 @@ parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
/* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
addr6->sin6_flowinfo = 0;
addr6->sin6_scope_id = 0;
- addr6->sin6_port = cpu_to_be16(CIFS_PORT);
+ addr6->sin6_port = port;
cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
&addr6->sin6_addr);
@@ -3352,7 +3362,7 @@ get_smb2_acl(struct cifs_sb_info *cifs_sb,
struct cifsFileInfo *open_file = NULL;
if (inode && !(info & SACL_SECINFO))
- open_file = find_readable_file(CIFS_I(inode), true);
+ open_file = find_readable_file(CIFS_I(inode), FIND_FSUID_ONLY);
if (!open_file || (info & SACL_SECINFO))
return get_smb2_acl_by_path(cifs_sb, path, pacllen, info);
@@ -3898,7 +3908,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs
* some servers (Windows2016) will not reflect recent writes in
* QUERY_ALLOCATED_RANGES until SMB2_flush is called.
*/
- wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
+ wrcfile = find_writable_file(cifsi, FIND_ANY);
if (wrcfile) {
filemap_write_and_wait(inode->i_mapping);
smb2_flush_file(xid, tcon, &wrcfile->fid);
diff --git a/fs/smb/client/smb2pdu.c b/fs/smb/client/smb2pdu.c
index c43ca74e8704..5188218c25be 100644
--- a/fs/smb/client/smb2pdu.c
+++ b/fs/smb/client/smb2pdu.c
@@ -5307,7 +5307,10 @@ replay_again:
memset(&rqst, 0, sizeof(struct smb_rqst));
rqst.rq_iov = iov;
- rqst.rq_nvec = n_vec + 1;
+ /* iov[0] is the SMB header; move payload to rq_iter for encryption safety */
+ rqst.rq_nvec = 1;
+ iov_iter_kvec(&rqst.rq_iter, ITER_SOURCE, &iov[1], n_vec,
+ io_parms->length);
if (retries) {
/* Back-off before retry */
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index 766631f0562e..09d4c17b3e7b 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -2716,12 +2716,8 @@ xfs_dabuf_map(
* larger one that needs to be free by the caller.
*/
if (nirecs > 1) {
- map = kzalloc(nirecs * sizeof(struct xfs_buf_map),
- GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
- if (!map) {
- error = -ENOMEM;
- goto out_free_irecs;
- }
+ map = kcalloc(nirecs, sizeof(struct xfs_buf_map),
+ GFP_KERNEL | __GFP_NOLOCKDEP | __GFP_NOFAIL);
*mapp = map;
}
diff --git a/fs/xfs/libxfs/xfs_defer.c b/fs/xfs/libxfs/xfs_defer.c
index 472c261163ed..c6909716b041 100644
--- a/fs/xfs/libxfs/xfs_defer.c
+++ b/fs/xfs/libxfs/xfs_defer.c
@@ -809,7 +809,7 @@ xfs_defer_can_append(
/* Paused items cannot absorb more work */
if (dfp->dfp_flags & XFS_DEFER_PAUSED)
- return NULL;
+ return false;
/* Already full? */
if (ops->max_items && dfp->dfp_count >= ops->max_items)
diff --git a/fs/xfs/xfs_bmap_item.c b/fs/xfs/xfs_bmap_item.c
index e8775f254c89..b237a25d6045 100644
--- a/fs/xfs/xfs_bmap_item.c
+++ b/fs/xfs/xfs_bmap_item.c
@@ -245,7 +245,7 @@ xfs_bmap_update_diff_items(
struct xfs_bmap_intent *ba = bi_entry(a);
struct xfs_bmap_intent *bb = bi_entry(b);
- return ba->bi_owner->i_ino - bb->bi_owner->i_ino;
+ return cmp_int(ba->bi_owner->i_ino, bb->bi_owner->i_ino);
}
/* Log bmap updates in the intent item. */
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 2b208e2c5264..69e9bc588c8b 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -1439,9 +1439,15 @@ xfs_qm_dqflush(
return 0;
out_abort:
+ /*
+ * Shut down the log before removing the dquot item from the AIL.
+ * Otherwise, the log tail may advance past this item's LSN while
+ * log writes are still in progress, making these unflushed changes
+ * unrecoverable on the next mount.
+ */
+ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
dqp->q_flags &= ~XFS_DQFLAG_DIRTY;
xfs_trans_ail_delete(lip, 0);
- xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
xfs_dqfunlock(dqp);
return error;
}
diff --git a/fs/xfs/xfs_healthmon.c b/fs/xfs/xfs_healthmon.c
index 4a06d6632f65..26c325d34bd1 100644
--- a/fs/xfs/xfs_healthmon.c
+++ b/fs/xfs/xfs_healthmon.c
@@ -141,6 +141,16 @@ xfs_healthmon_detach(
hm->mount_cookie = DETACHED_MOUNT_COOKIE;
spin_unlock(&xfs_healthmon_lock);
+ /*
+ * Wake up any readers that might remain. This can happen if unmount
+ * races with the healthmon fd owner entering ->read_iter, having
+ * already emptied the event queue.
+ *
+ * In the ->release case there shouldn't be any readers because the
+ * only users of the waiter are read and poll.
+ */
+ wake_up_all(&hm->wait);
+
trace_xfs_healthmon_detach(hm);
xfs_healthmon_put(hm);
}
@@ -1027,13 +1037,6 @@ xfs_healthmon_release(
* process can create another health monitor file.
*/
xfs_healthmon_detach(hm);
-
- /*
- * Wake up any readers that might be left. There shouldn't be any
- * because the only users of the waiter are read and poll.
- */
- wake_up_all(&hm->wait);
-
xfs_healthmon_put(hm);
return 0;
}
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index a7a09e7eec81..2040a9292ee6 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -159,7 +159,6 @@ xfs_inode_free_callback(
ASSERT(!test_bit(XFS_LI_IN_AIL,
&ip->i_itemp->ili_item.li_flags));
xfs_inode_item_destroy(ip);
- ip->i_itemp = NULL;
}
kmem_cache_free(xfs_inode_cache, ip);
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index b96f262ba139..f807f8f4f705 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -1357,6 +1357,8 @@ xlog_alloc_log(
if (xfs_has_logv2(mp) && mp->m_sb.sb_logsunit > 1)
log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
+ else if (mp->m_sb.sb_logsectsize > 0)
+ log->l_iclog_roundoff = mp->m_sb.sb_logsectsize;
else
log->l_iclog_roundoff = BBSIZE;
diff --git a/fs/xfs/xfs_zone_gc.c b/fs/xfs/xfs_zone_gc.c
index 7efeecd2d85f..309f70098524 100644
--- a/fs/xfs/xfs_zone_gc.c
+++ b/fs/xfs/xfs_zone_gc.c
@@ -96,7 +96,6 @@ struct xfs_gc_bio {
*/
xfs_fsblock_t old_startblock;
xfs_daddr_t new_daddr;
- struct xfs_zone_scratch *scratch;
/* Are we writing to a sequential write required zone? */
bool is_seq;
@@ -779,7 +778,6 @@ xfs_zone_gc_split_write(
ihold(VFS_I(chunk->ip));
split_chunk->ip = chunk->ip;
split_chunk->is_seq = chunk->is_seq;
- split_chunk->scratch = chunk->scratch;
split_chunk->offset = chunk->offset;
split_chunk->len = split_len;
split_chunk->old_startblock = chunk->old_startblock;
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
index 2cfbb4c65c78..d3dc5dc5f916 100644
--- a/include/linux/build_bug.h
+++ b/include/linux/build_bug.h
@@ -32,7 +32,8 @@
/**
* BUILD_BUG_ON_MSG - break compile if a condition is true & emit supplied
* error message.
- * @condition: the condition which the compiler should know is false.
+ * @cond: the condition which the compiler should know is false.
+ * @msg: build-time error message
*
* See BUILD_BUG_ON for description.
*/
@@ -60,6 +61,7 @@
/**
* static_assert - check integer constant expression at build time
+ * @expr: expression to be checked
*
* static_assert() is a wrapper for the C11 _Static_assert, with a
* little macro magic to make the message optional (defaulting to the
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index d290060f4c73..91013161e9db 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -68,12 +68,12 @@
* timeout value used in Stratix10 FPGA manager driver.
* timeout value used in RSU driver
*/
-#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 300
-#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 720
-#define SVC_RSU_REQUEST_TIMEOUT_MS 300
+#define SVC_RECONFIG_REQUEST_TIMEOUT_MS 5000
+#define SVC_RECONFIG_BUFFER_TIMEOUT_MS 5000
+#define SVC_RSU_REQUEST_TIMEOUT_MS 2000
#define SVC_FCS_REQUEST_TIMEOUT_MS 2000
#define SVC_COMPLETED_TIMEOUT_MS 30000
-#define SVC_HWMON_REQUEST_TIMEOUT_MS 300
+#define SVC_HWMON_REQUEST_TIMEOUT_MS 2000
struct stratix10_svc_chan;
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 2990b9f94cb5..31324609af4d 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -682,6 +682,7 @@ struct hid_device {
__s32 battery_charge_status;
enum hid_battery_status battery_status;
bool battery_avoid_query;
+ bool battery_present;
ktime_t battery_ratelimit_time;
#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 3e4a82a6f817..dd1420bfcb73 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -388,6 +388,7 @@ struct io_ring_ctx {
* regularly bounce b/w CPUs.
*/
struct {
+ struct io_rings __rcu *rings_rcu;
struct llist_head work_llist;
struct llist_head retry_llist;
unsigned long check_cq;
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 34759a262b28..6b76e7a6f4c2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1940,56 +1940,43 @@ enum kvm_stat_kind {
struct kvm_stat_data {
struct kvm *kvm;
- const struct _kvm_stats_desc *desc;
+ const struct kvm_stats_desc *desc;
enum kvm_stat_kind kind;
};
-struct _kvm_stats_desc {
- struct kvm_stats_desc desc;
- char name[KVM_STATS_NAME_SIZE];
-};
-
-#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
- .flags = type | unit | base | \
- BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
- BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
- BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
- .exponent = exp, \
- .size = sz, \
+#define STATS_DESC_COMMON(type, unit, base, exp, sz, bsz) \
+ .flags = type | unit | base | \
+ BUILD_BUG_ON_ZERO(type & ~KVM_STATS_TYPE_MASK) | \
+ BUILD_BUG_ON_ZERO(unit & ~KVM_STATS_UNIT_MASK) | \
+ BUILD_BUG_ON_ZERO(base & ~KVM_STATS_BASE_MASK), \
+ .exponent = exp, \
+ .size = sz, \
.bucket_size = bsz
-#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
- { \
- { \
- STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
- .offset = offsetof(struct kvm_vm_stat, generic.stat) \
- }, \
- .name = #stat, \
- }
-#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
- { \
- { \
- STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
- .offset = offsetof(struct kvm_vcpu_stat, generic.stat) \
- }, \
- .name = #stat, \
- }
-#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
- { \
- { \
- STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
- .offset = offsetof(struct kvm_vm_stat, stat) \
- }, \
- .name = #stat, \
- }
-#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
- { \
- { \
- STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
- .offset = offsetof(struct kvm_vcpu_stat, stat) \
- }, \
- .name = #stat, \
- }
+#define VM_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+{ \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vm_stat, generic.stat), \
+ .name = #stat, \
+}
+#define VCPU_GENERIC_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+{ \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vcpu_stat, generic.stat), \
+ .name = #stat, \
+}
+#define VM_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+{ \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vm_stat, stat), \
+ .name = #stat, \
+}
+#define VCPU_STATS_DESC(stat, type, unit, base, exp, sz, bsz) \
+{ \
+ STATS_DESC_COMMON(type, unit, base, exp, sz, bsz), \
+ .offset = offsetof(struct kvm_vcpu_stat, stat), \
+ .name = #stat, \
+}
/* SCOPE: VM, VM_GENERIC, VCPU, VCPU_GENERIC */
#define STATS_DESC(SCOPE, stat, type, unit, base, exp, sz, bsz) \
SCOPE##_STATS_DESC(stat, type, unit, base, exp, sz, bsz)
@@ -2066,7 +2053,7 @@ struct _kvm_stats_desc {
STATS_DESC_IBOOLEAN(VCPU_GENERIC, blocking)
ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
- const struct _kvm_stats_desc *desc,
+ const struct kvm_stats_desc *desc,
void *stats, size_t size_stats,
char __user *user_buffer, size_t size, loff_t *offset);
@@ -2111,9 +2098,9 @@ static inline void kvm_stats_log_hist_update(u64 *data, size_t size, u64 value)
extern const struct kvm_stats_header kvm_vm_stats_header;
-extern const struct _kvm_stats_desc kvm_vm_stats_desc[];
+extern const struct kvm_stats_desc kvm_vm_stats_desc[];
extern const struct kvm_stats_header kvm_vcpu_stats_header;
-extern const struct _kvm_stats_desc kvm_vcpu_stats_desc[];
+extern const struct kvm_stats_desc kvm_vcpu_stats_desc[];
static inline int mmu_invalidate_retry(struct kvm *kvm, unsigned long mmu_seq)
{
diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h
index 60e069a6757f..e75c29c51464 100644
--- a/include/linux/nvme-auth.h
+++ b/include/linux/nvme-auth.h
@@ -11,7 +11,7 @@
struct nvme_dhchap_key {
size_t len;
u8 hash;
- u8 key[];
+ u8 key[] __counted_by(len);
};
u32 nvme_auth_get_seqnum(void);
diff --git a/include/linux/rseq_types.h b/include/linux/rseq_types.h
index da5fa6f40294..0b42045988db 100644
--- a/include/linux/rseq_types.h
+++ b/include/linux/rseq_types.h
@@ -133,10 +133,12 @@ struct rseq_data { };
* @active: MM CID is active for the task
* @cid: The CID associated to the task either permanently or
* borrowed from the CPU
+ * @node: Queued in the per MM MMCID list
*/
struct sched_mm_cid {
unsigned int active;
unsigned int cid;
+ struct hlist_node node;
};
/**
@@ -157,6 +159,7 @@ struct mm_cid_pcpu {
* @work: Regular work to handle the affinity mode change case
* @lock: Spinlock to protect against affinity setting which can't take @mutex
* @mutex: Mutex to serialize forks and exits related to this mm
+ * @user_list: List of the MM CID users of a MM
* @nr_cpus_allowed: The number of CPUs in the per MM allowed CPUs map. The map
* is growth only.
* @users: The number of tasks sharing this MM. Separate from mm::mm_users
@@ -177,13 +180,14 @@ struct mm_mm_cid {
raw_spinlock_t lock;
struct mutex mutex;
+ struct hlist_head user_list;
/* Low frequency modified */
unsigned int nr_cpus_allowed;
unsigned int users;
unsigned int pcpu_thrs;
unsigned int update_deferred;
-}____cacheline_aligned_in_smp;
+} ____cacheline_aligned;
#else /* CONFIG_SCHED_MM_CID */
struct mm_mm_cid { };
struct sched_mm_cid { };
diff --git a/include/linux/sched.h b/include/linux/sched.h
index a7b4a980eb2f..5a5d3dbc9cdf 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2354,7 +2354,6 @@ static __always_inline void alloc_tag_restore(struct alloc_tag *tag, struct allo
#ifdef CONFIG_SCHED_MM_CID
void sched_mm_cid_before_execve(struct task_struct *t);
void sched_mm_cid_after_execve(struct task_struct *t);
-void sched_mm_cid_fork(struct task_struct *t);
void sched_mm_cid_exit(struct task_struct *t);
static __always_inline int task_mm_cid(struct task_struct *t)
{
@@ -2363,7 +2362,6 @@ static __always_inline int task_mm_cid(struct task_struct *t)
#else
static inline void sched_mm_cid_before_execve(struct task_struct *t) { }
static inline void sched_mm_cid_after_execve(struct task_struct *t) { }
-static inline void sched_mm_cid_fork(struct task_struct *t) { }
static inline void sched_mm_cid_exit(struct task_struct *t) { }
static __always_inline int task_mm_cid(struct task_struct *t)
{
diff --git a/include/linux/usb.h b/include/linux/usb.h
index fbfcc70b07fb..04277af4bb9d 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1862,14 +1862,18 @@ void usb_free_noncoherent(struct usb_device *dev, size_t size,
* SYNCHRONOUS CALL SUPPORT *
*-------------------------------------------------------------------*/
+/* Maximum value allowed for timeout in synchronous routines below */
+#define USB_MAX_SYNCHRONOUS_TIMEOUT 60000 /* ms */
+
extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
__u8 request, __u8 requesttype, __u16 value, __u16 index,
void *data, __u16 size, int timeout);
extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
void *data, int len, int *actual_length, int timeout);
extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
- void *data, int len, int *actual_length,
- int timeout);
+ void *data, int len, int *actual_length, int timeout);
+extern int usb_bulk_msg_killable(struct usb_device *usb_dev, unsigned int pipe,
+ void *data, int len, int *actual_length, int timeout);
/* wrappers around usb_control_msg() for the most common standard requests */
int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request,
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index 2f7bd2fdc616..b3cc7beab4a3 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -78,4 +78,7 @@
/* skip BOS descriptor request */
#define USB_QUIRK_NO_BOS BIT(17)
+/* Device claims zero configurations, forcing to 1 */
+#define USB_QUIRK_FORCE_ONE_CONFIG BIT(18)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 65500f5db379..80364d4dbebb 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -14,6 +14,10 @@
#include <linux/ioctl.h>
#include <asm/kvm.h>
+#ifdef __KERNEL__
+#include <linux/kvm_types.h>
+#endif
+
#define KVM_API_VERSION 12
/*
@@ -1601,7 +1605,11 @@ struct kvm_stats_desc {
__u16 size;
__u32 offset;
__u32 bucket_size;
+#ifdef __KERNEL__
+ char name[KVM_STATS_NAME_SIZE];
+#else
char name[];
+#endif
};
#define KVM_GET_STATS_FD _IO(KVMIO, 0xce)
diff --git a/io_uring/bpf_filter.c b/io_uring/bpf_filter.c
index 6a98750e38b0..c0037632b7af 100644
--- a/io_uring/bpf_filter.c
+++ b/io_uring/bpf_filter.c
@@ -85,7 +85,7 @@ int __io_uring_run_bpf_filters(struct io_bpf_filter __rcu **filters,
do {
if (filter == &dummy_filter)
return -EACCES;
- ret = bpf_prog_run(filter->prog, &bpf_ctx);
+ ret = bpf_prog_run_pin_on_cpu(filter->prog, &bpf_ctx);
if (!ret)
return -EACCES;
filter = filter->next;
diff --git a/io_uring/eventfd.c b/io_uring/eventfd.c
index cbea1c289485..7482a7dc6b38 100644
--- a/io_uring/eventfd.c
+++ b/io_uring/eventfd.c
@@ -76,11 +76,15 @@ void io_eventfd_signal(struct io_ring_ctx *ctx, bool cqe_event)
{
bool skip = false;
struct io_ev_fd *ev_fd;
-
- if (READ_ONCE(ctx->rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
- return;
+ struct io_rings *rings;
guard(rcu)();
+
+ rings = rcu_dereference(ctx->rings_rcu);
+ if (!rings)
+ return;
+ if (READ_ONCE(rings->cq_flags) & IORING_CQ_EVENTFD_DISABLED)
+ return;
ev_fd = rcu_dereference(ctx->io_ev_fd);
/*
* Check again if ev_fd exists in case an io_eventfd_unregister call
diff --git a/io_uring/io_uring.c b/io_uring/io_uring.c
index aa95703165f1..9a37035e76c0 100644
--- a/io_uring/io_uring.c
+++ b/io_uring/io_uring.c
@@ -1745,7 +1745,7 @@ static int io_init_req(struct io_ring_ctx *ctx, struct io_kiocb *req,
* well as 2 contiguous entries.
*/
if (!(ctx->flags & IORING_SETUP_SQE_MIXED) || *left < 2 ||
- !(ctx->cached_sq_head & (ctx->sq_entries - 1)))
+ (unsigned)(sqe - ctx->sq_sqes) >= ctx->sq_entries - 1)
return io_init_fail_req(req, -EINVAL);
/*
* A 128b operation on a mixed SQ uses two entries, so we have
@@ -2066,6 +2066,7 @@ static void io_rings_free(struct io_ring_ctx *ctx)
io_free_region(ctx->user, &ctx->sq_region);
io_free_region(ctx->user, &ctx->ring_region);
ctx->rings = NULL;
+ RCU_INIT_POINTER(ctx->rings_rcu, NULL);
ctx->sq_sqes = NULL;
}
@@ -2703,6 +2704,7 @@ static __cold int io_allocate_scq_urings(struct io_ring_ctx *ctx,
if (ret)
return ret;
ctx->rings = rings = io_region_get_ptr(&ctx->ring_region);
+ rcu_assign_pointer(ctx->rings_rcu, rings);
if (!(ctx->flags & IORING_SETUP_NO_SQARRAY))
ctx->sq_array = (u32 *)((char *)rings + rl->sq_array_offset);
diff --git a/io_uring/kbuf.c b/io_uring/kbuf.c
index 2ffa95b1c601..26813b0f1dfd 100644
--- a/io_uring/kbuf.c
+++ b/io_uring/kbuf.c
@@ -111,9 +111,18 @@ bool io_kbuf_recycle_legacy(struct io_kiocb *req, unsigned issue_flags)
buf = req->kbuf;
bl = io_buffer_get_list(ctx, buf->bgid);
- list_add(&buf->list, &bl->buf_list);
- bl->nbufs++;
+ /*
+ * If the buffer list was upgraded to a ring-based one, or removed,
+ * while the request was in-flight in io-wq, drop it.
+ */
+ if (bl && !(bl->flags & IOBL_BUF_RING)) {
+ list_add(&buf->list, &bl->buf_list);
+ bl->nbufs++;
+ } else {
+ kfree(buf);
+ }
req->flags &= ~REQ_F_BUFFER_SELECTED;
+ req->kbuf = NULL;
io_ring_submit_unlock(ctx, issue_flags);
return true;
diff --git a/io_uring/register.c b/io_uring/register.c
index 6015a3e9ce69..0148735f7711 100644
--- a/io_uring/register.c
+++ b/io_uring/register.c
@@ -202,7 +202,7 @@ static int io_register_restrictions_task(void __user *arg, unsigned int nr_args)
return -EPERM;
/*
* Similar to seccomp, disallow setting a filter if task_no_new_privs
- * is true and we're not CAP_SYS_ADMIN.
+ * is false and we're not CAP_SYS_ADMIN.
*/
if (!task_no_new_privs(current) &&
!ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN))
@@ -238,7 +238,7 @@ static int io_register_bpf_filter_task(void __user *arg, unsigned int nr_args)
/*
* Similar to seccomp, disallow setting a filter if task_no_new_privs
- * is true and we're not CAP_SYS_ADMIN.
+ * is false and we're not CAP_SYS_ADMIN.
*/
if (!task_no_new_privs(current) &&
!ns_capable_noaudit(current_user_ns(), CAP_SYS_ADMIN))
@@ -633,7 +633,15 @@ overflow:
ctx->sq_entries = p->sq_entries;
ctx->cq_entries = p->cq_entries;
+ /*
+ * Just mark any flag we may have missed and that the application
+ * should act on unconditionally. Worst case it'll be an extra
+ * syscall.
+ */
+ atomic_or(IORING_SQ_TASKRUN | IORING_SQ_NEED_WAKEUP, &n.rings->sq_flags);
ctx->rings = n.rings;
+ rcu_assign_pointer(ctx->rings_rcu, n.rings);
+
ctx->sq_sqes = n.sq_sqes;
swap_old(ctx, o, n, ring_region);
swap_old(ctx, o, n, sq_region);
@@ -642,6 +650,9 @@ overflow:
out:
spin_unlock(&ctx->completion_lock);
mutex_unlock(&ctx->mmap_lock);
+ /* Wait for concurrent io_ctx_mark_taskrun() */
+ if (to_free == &o)
+ synchronize_rcu_expedited();
io_register_free_rings(ctx, to_free);
if (ctx->sq_data)
diff --git a/io_uring/tw.c b/io_uring/tw.c
index 1ee2b8ab07c8..2f2b4ac4b126 100644
--- a/io_uring/tw.c
+++ b/io_uring/tw.c
@@ -152,6 +152,21 @@ void tctx_task_work(struct callback_head *cb)
WARN_ON_ONCE(ret);
}
+/*
+ * Sets IORING_SQ_TASKRUN in the sq_flags shared with userspace, using the
+ * RCU protected rings pointer to be safe against concurrent ring resizing.
+ */
+static void io_ctx_mark_taskrun(struct io_ring_ctx *ctx)
+{
+ lockdep_assert_in_rcu_read_lock();
+
+ if (ctx->flags & IORING_SETUP_TASKRUN_FLAG) {
+ struct io_rings *rings = rcu_dereference(ctx->rings_rcu);
+
+ atomic_or(IORING_SQ_TASKRUN, &rings->sq_flags);
+ }
+}
+
void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
{
struct io_ring_ctx *ctx = req->ctx;
@@ -206,8 +221,7 @@ void io_req_local_work_add(struct io_kiocb *req, unsigned flags)
*/
if (!head) {
- if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
- atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
+ io_ctx_mark_taskrun(ctx);
if (ctx->has_evfd)
io_eventfd_signal(ctx, false);
}
@@ -231,6 +245,10 @@ void io_req_normal_work_add(struct io_kiocb *req)
if (!llist_add(&req->io_task_work.node, &tctx->task_list))
return;
+ /*
+ * Doesn't need to use ->rings_rcu, as resizing isn't supported for
+ * !DEFER_TASKRUN.
+ */
if (ctx->flags & IORING_SETUP_TASKRUN_FLAG)
atomic_or(IORING_SQ_TASKRUN, &ctx->rings->sq_flags);
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index be1d71dda317..01fc2a93f3ef 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -5109,6 +5109,12 @@ repeat:
return;
task = list_entry(it->task_pos, struct task_struct, cg_list);
+ /*
+ * Hide tasks that are exiting but not yet removed. Keep zombie
+ * leaders with live threads visible.
+ */
+ if ((task->flags & PF_EXITING) && !atomic_read(&task->signal->live))
+ goto repeat;
if (it->flags & CSS_TASK_ITER_PROCS) {
/* if PROCS, skip over tasks which aren't group leaders */
diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
index e200de7c60b6..d21868455341 100644
--- a/kernel/cgroup/cpuset.c
+++ b/kernel/cgroup/cpuset.c
@@ -879,7 +879,7 @@ generate_doms:
/*
* Cgroup v2 doesn't support domain attributes, just set all of them
* to SD_ATTR_INIT. Also non-isolating partition root CPUs are a
- * subset of HK_TYPE_DOMAIN housekeeping CPUs.
+ * subset of HK_TYPE_DOMAIN_BOOT housekeeping CPUs.
*/
for (i = 0; i < ndoms; i++) {
/*
@@ -888,7 +888,7 @@ generate_doms:
*/
if (!csa || csa[i] == &top_cpuset)
cpumask_and(doms[i], top_cpuset.effective_cpus,
- housekeeping_cpumask(HK_TYPE_DOMAIN));
+ housekeeping_cpumask(HK_TYPE_DOMAIN_BOOT));
else
cpumask_copy(doms[i], csa[i]->effective_cpus);
if (dattr)
@@ -1329,17 +1329,22 @@ static bool prstate_housekeeping_conflict(int prstate, struct cpumask *new_cpus)
}
/*
- * update_hk_sched_domains - Update HK cpumasks & rebuild sched domains
+ * cpuset_update_sd_hk_unlock - Rebuild sched domains, update HK & unlock
*
- * Update housekeeping cpumasks and rebuild sched domains if necessary.
- * This should be called at the end of cpuset or hotplug actions.
+ * Update housekeeping cpumasks and rebuild sched domains if necessary and
+ * then do a cpuset_full_unlock().
+ * This should be called at the end of cpuset operation.
*/
-static void update_hk_sched_domains(void)
+static void cpuset_update_sd_hk_unlock(void)
+ __releases(&cpuset_mutex)
+ __releases(&cpuset_top_mutex)
{
+ /* force_sd_rebuild will be cleared in rebuild_sched_domains_locked() */
+ if (force_sd_rebuild)
+ rebuild_sched_domains_locked();
+
if (update_housekeeping) {
- /* Updating HK cpumasks implies rebuild sched domains */
update_housekeeping = false;
- force_sd_rebuild = true;
cpumask_copy(isolated_hk_cpus, isolated_cpus);
/*
@@ -1350,22 +1355,19 @@ static void update_hk_sched_domains(void)
mutex_unlock(&cpuset_mutex);
cpus_read_unlock();
WARN_ON_ONCE(housekeeping_update(isolated_hk_cpus));
- cpus_read_lock();
- mutex_lock(&cpuset_mutex);
+ mutex_unlock(&cpuset_top_mutex);
+ } else {
+ cpuset_full_unlock();
}
- /* force_sd_rebuild will be cleared in rebuild_sched_domains_locked() */
- if (force_sd_rebuild)
- rebuild_sched_domains_locked();
}
/*
- * Work function to invoke update_hk_sched_domains()
+ * Work function to invoke cpuset_update_sd_hk_unlock()
*/
static void hk_sd_workfn(struct work_struct *work)
{
cpuset_full_lock();
- update_hk_sched_domains();
- cpuset_full_unlock();
+ cpuset_update_sd_hk_unlock();
}
/**
@@ -3230,8 +3232,7 @@ ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
free_cpuset(trialcs);
out_unlock:
- update_hk_sched_domains();
- cpuset_full_unlock();
+ cpuset_update_sd_hk_unlock();
if (of_cft(of)->private == FILE_MEMLIST)
schedule_flush_migrate_mm();
return retval ?: nbytes;
@@ -3338,8 +3339,7 @@ static ssize_t cpuset_partition_write(struct kernfs_open_file *of, char *buf,
cpuset_full_lock();
if (is_cpuset_online(cs))
retval = update_prstate(cs, val);
- update_hk_sched_domains();
- cpuset_full_unlock();
+ cpuset_update_sd_hk_unlock();
return retval ?: nbytes;
}
@@ -3513,8 +3513,7 @@ static void cpuset_css_killed(struct cgroup_subsys_state *css)
/* Reset valid partition back to member */
if (is_partition_valid(cs))
update_prstate(cs, PRS_MEMBER);
- update_hk_sched_domains();
- cpuset_full_unlock();
+ cpuset_update_sd_hk_unlock();
}
static void cpuset_css_free(struct cgroup_subsys_state *css)
@@ -3923,11 +3922,13 @@ static void cpuset_handle_hotplug(void)
rcu_read_unlock();
}
-
/*
- * Queue a work to call housekeeping_update() & rebuild_sched_domains()
- * There will be a slight delay before the HK_TYPE_DOMAIN housekeeping
- * cpumask can correctly reflect what is in isolated_cpus.
+ * rebuild_sched_domains() will always be called directly if needed
+ * to make sure that newly added or removed CPU will be reflected in
+ * the sched domains. However, if isolated partition invalidation
+ * or recreation is being done (update_housekeeping set), a work item
+ * will be queued to call housekeeping_update() to update the
+ * corresponding housekeeping cpumasks after some slight delay.
*
* We rely on WORK_STRUCT_PENDING_BIT to not requeue a work item that
* is still pending. Before the pending bit is cleared, the work data
@@ -3936,8 +3937,10 @@ static void cpuset_handle_hotplug(void)
* previously queued work. Since hk_sd_workfn() doesn't use the work
* item at all, this is not a problem.
*/
- if (update_housekeeping || force_sd_rebuild)
- queue_work(system_unbound_wq, &hk_sd_work);
+ if (force_sd_rebuild)
+ rebuild_sched_domains_cpuslocked();
+ if (update_housekeeping)
+ queue_work(system_dfl_wq, &hk_sd_work);
free_tmpmasks(ptmp);
}
diff --git a/kernel/crash_dump_dm_crypt.c b/kernel/crash_dump_dm_crypt.c
index 1f4067fbdb94..a20d4097744a 100644
--- a/kernel/crash_dump_dm_crypt.c
+++ b/kernel/crash_dump_dm_crypt.c
@@ -168,8 +168,8 @@ static int read_key_from_user_keying(struct dm_crypt_key *dm_key)
memcpy(dm_key->data, ukp->data, ukp->datalen);
dm_key->key_size = ukp->datalen;
- kexec_dprintk("Get dm crypt key (size=%u) %s: %8ph\n", dm_key->key_size,
- dm_key->key_desc, dm_key->data);
+ kexec_dprintk("Get dm crypt key (size=%u) %s\n", dm_key->key_size,
+ dm_key->key_desc);
out:
up_read(&key->sem);
diff --git a/kernel/fork.c b/kernel/fork.c
index 65113a304518..bc2bf58b93b6 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1000,6 +1000,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig, int node)
#ifdef CONFIG_SCHED_MM_CID
tsk->mm_cid.cid = MM_CID_UNSET;
tsk->mm_cid.active = 0;
+ INIT_HLIST_NODE(&tsk->mm_cid.node);
#endif
return tsk;
@@ -1586,7 +1587,6 @@ static int copy_mm(u64 clone_flags, struct task_struct *tsk)
tsk->mm = mm;
tsk->active_mm = mm;
- sched_mm_cid_fork(tsk);
return 0;
}
@@ -2498,7 +2498,6 @@ bad_fork_cleanup_namespaces:
exit_nsproxy_namespaces(p);
bad_fork_cleanup_mm:
if (p->mm) {
- sched_mm_cid_exit(p);
mm_clear_owner(p->mm, p);
mmput(p->mm);
}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index ab25b4aa9095..bfc89083daa9 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1144,12 +1144,12 @@ static int __arm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
lockdep_assert_held(&kprobe_mutex);
ret = ftrace_set_filter_ip(ops, (unsigned long)p->addr, 0, 0);
- if (WARN_ONCE(ret < 0, "Failed to arm kprobe-ftrace at %pS (error %d)\n", p->addr, ret))
+ if (ret < 0)
return ret;
if (*cnt == 0) {
ret = register_ftrace_function(ops);
- if (WARN(ret < 0, "Failed to register kprobe-ftrace (error %d)\n", ret)) {
+ if (ret < 0) {
/*
* At this point, sinec ops is not registered, we should be sefe from
* registering empty filter.
@@ -1178,6 +1178,10 @@ static int __disarm_kprobe_ftrace(struct kprobe *p, struct ftrace_ops *ops,
int ret;
lockdep_assert_held(&kprobe_mutex);
+ if (unlikely(kprobe_ftrace_disabled)) {
+ /* Now ftrace is disabled forever, disarm is already done. */
+ return 0;
+ }
if (*cnt == 1) {
ret = unregister_ftrace_function(ops);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b7f77c165a6e..496dff740dca 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4729,8 +4729,11 @@ void sched_cancel_fork(struct task_struct *p)
scx_cancel_fork(p);
}
+static void sched_mm_cid_fork(struct task_struct *t);
+
void sched_post_fork(struct task_struct *p)
{
+ sched_mm_cid_fork(p);
uclamp_post_fork(p);
scx_post_fork(p);
}
@@ -10617,13 +10620,10 @@ static inline void mm_cid_transit_to_cpu(struct task_struct *t, struct mm_cid_pc
}
}
-static bool mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm)
+static void mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm)
{
/* Remote access to mm::mm_cid::pcpu requires rq_lock */
guard(task_rq_lock)(t);
- /* If the task is not active it is not in the users count */
- if (!t->mm_cid.active)
- return false;
if (cid_on_task(t->mm_cid.cid)) {
/* If running on the CPU, put the CID in transit mode, otherwise drop it */
if (task_rq(t)->curr == t)
@@ -10631,69 +10631,43 @@ static bool mm_cid_fixup_task_to_cpu(struct task_struct *t, struct mm_struct *mm
else
mm_unset_cid_on_task(t);
}
- return true;
}
-static void mm_cid_do_fixup_tasks_to_cpus(struct mm_struct *mm)
+static void mm_cid_fixup_tasks_to_cpus(void)
{
- struct task_struct *p, *t;
- unsigned int users;
+ struct mm_struct *mm = current->mm;
+ struct task_struct *t;
- /*
- * This can obviously race with a concurrent affinity change, which
- * increases the number of allowed CPUs for this mm, but that does
- * not affect the mode and only changes the CID constraints. A
- * possible switch back to per task mode happens either in the
- * deferred handler function or in the next fork()/exit().
- *
- * The caller has already transferred. The newly incoming task is
- * already accounted for, but not yet visible.
- */
- users = mm->mm_cid.users - 2;
- if (!users)
- return;
+ lockdep_assert_held(&mm->mm_cid.mutex);
- guard(rcu)();
- for_other_threads(current, t) {
- if (mm_cid_fixup_task_to_cpu(t, mm))
- users--;
+ hlist_for_each_entry(t, &mm->mm_cid.user_list, mm_cid.node) {
+ /* Current has already transferred before invoking the fixup. */
+ if (t != current)
+ mm_cid_fixup_task_to_cpu(t, mm);
}
- if (!users)
- return;
-
- /* Happens only for VM_CLONE processes. */
- for_each_process_thread(p, t) {
- if (t == current || t->mm != mm)
- continue;
- if (mm_cid_fixup_task_to_cpu(t, mm)) {
- if (--users == 0)
- return;
- }
- }
-}
-
-static void mm_cid_fixup_tasks_to_cpus(void)
-{
- struct mm_struct *mm = current->mm;
-
- mm_cid_do_fixup_tasks_to_cpus(mm);
mm_cid_complete_transit(mm, MM_CID_ONCPU);
}
static bool sched_mm_cid_add_user(struct task_struct *t, struct mm_struct *mm)
{
+ lockdep_assert_held(&mm->mm_cid.lock);
+
t->mm_cid.active = 1;
+ hlist_add_head(&t->mm_cid.node, &mm->mm_cid.user_list);
mm->mm_cid.users++;
return mm_update_max_cids(mm);
}
-void sched_mm_cid_fork(struct task_struct *t)
+static void sched_mm_cid_fork(struct task_struct *t)
{
struct mm_struct *mm = t->mm;
bool percpu;
- WARN_ON_ONCE(!mm || t->mm_cid.cid != MM_CID_UNSET);
+ if (!mm)
+ return;
+
+ WARN_ON_ONCE(t->mm_cid.cid != MM_CID_UNSET);
guard(mutex)(&mm->mm_cid.mutex);
scoped_guard(raw_spinlock_irq, &mm->mm_cid.lock) {
@@ -10732,12 +10706,13 @@ void sched_mm_cid_fork(struct task_struct *t)
static bool sched_mm_cid_remove_user(struct task_struct *t)
{
+ lockdep_assert_held(&t->mm->mm_cid.lock);
+
t->mm_cid.active = 0;
- scoped_guard(preempt) {
- /* Clear the transition bit */
- t->mm_cid.cid = cid_from_transit_cid(t->mm_cid.cid);
- mm_unset_cid_on_task(t);
- }
+ /* Clear the transition bit */
+ t->mm_cid.cid = cid_from_transit_cid(t->mm_cid.cid);
+ mm_unset_cid_on_task(t);
+ hlist_del_init(&t->mm_cid.node);
t->mm->mm_cid.users--;
return mm_update_max_cids(t->mm);
}
@@ -10880,11 +10855,13 @@ void mm_init_cid(struct mm_struct *mm, struct task_struct *p)
mutex_init(&mm->mm_cid.mutex);
mm->mm_cid.irq_work = IRQ_WORK_INIT_HARD(mm_cid_irq_work);
INIT_WORK(&mm->mm_cid.work, mm_cid_work_fn);
+ INIT_HLIST_HEAD(&mm->mm_cid.user_list);
cpumask_copy(mm_cpus_allowed(mm), &p->cpus_mask);
bitmap_zero(mm_cidmask(mm), num_possible_cpus());
}
#else /* CONFIG_SCHED_MM_CID */
static inline void mm_update_cpus_allowed(struct mm_struct *mm, const struct cpumask *affmsk) { }
+static inline void sched_mm_cid_fork(struct task_struct *t) { }
#endif /* !CONFIG_SCHED_MM_CID */
static DEFINE_PER_CPU(struct sched_change_ctx, sched_change_ctx);
diff --git a/kernel/sched/ext.c b/kernel/sched/ext.c
index 1594987d637b..26a6ac2f8826 100644
--- a/kernel/sched/ext.c
+++ b/kernel/sched/ext.c
@@ -1103,7 +1103,7 @@ static void dispatch_enqueue(struct scx_sched *sch, struct scx_dispatch_q *dsq,
}
/* seq records the order tasks are queued, used by BPF DSQ iterator */
- dsq->seq++;
+ WRITE_ONCE(dsq->seq, dsq->seq + 1);
p->scx.dsq_seq = dsq->seq;
dsq_mod_nr(dsq, 1);
@@ -1470,16 +1470,15 @@ static void clr_task_runnable(struct task_struct *p, bool reset_runnable_at)
p->scx.flags |= SCX_TASK_RESET_RUNNABLE_AT;
}
-static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int enq_flags)
+static void enqueue_task_scx(struct rq *rq, struct task_struct *p, int core_enq_flags)
{
struct scx_sched *sch = scx_root;
int sticky_cpu = p->scx.sticky_cpu;
+ u64 enq_flags = core_enq_flags | rq->scx.extra_enq_flags;
if (enq_flags & ENQUEUE_WAKEUP)
rq->scx.flags |= SCX_RQ_IN_WAKEUP;
- enq_flags |= rq->scx.extra_enq_flags;
-
if (sticky_cpu >= 0)
p->scx.sticky_cpu = -1;
@@ -3908,8 +3907,8 @@ static u32 bypass_lb_cpu(struct scx_sched *sch, struct rq *rq,
* consider offloading iff the total queued duration is over the
* threshold.
*/
- min_delta_us = scx_bypass_lb_intv_us / SCX_BYPASS_LB_MIN_DELTA_DIV;
- if (delta < DIV_ROUND_UP(min_delta_us, scx_slice_bypass_us))
+ min_delta_us = READ_ONCE(scx_bypass_lb_intv_us) / SCX_BYPASS_LB_MIN_DELTA_DIV;
+ if (delta < DIV_ROUND_UP(min_delta_us, READ_ONCE(scx_slice_bypass_us)))
return 0;
raw_spin_rq_lock_irq(rq);
@@ -4137,7 +4136,7 @@ static void scx_bypass(bool bypass)
WARN_ON_ONCE(scx_bypass_depth <= 0);
if (scx_bypass_depth != 1)
goto unlock;
- WRITE_ONCE(scx_slice_dfl, scx_slice_bypass_us * NSEC_PER_USEC);
+ WRITE_ONCE(scx_slice_dfl, READ_ONCE(scx_slice_bypass_us) * NSEC_PER_USEC);
bypass_timestamp = ktime_get_ns();
if (sch)
scx_add_event(sch, SCX_EV_BYPASS_ACTIVATE, 1);
@@ -5259,13 +5258,14 @@ static int scx_enable(struct sched_ext_ops *ops, struct bpf_link *link)
if (!READ_ONCE(helper)) {
mutex_lock(&helper_mutex);
if (!helper) {
- helper = kthread_run_worker(0, "scx_enable_helper");
- if (IS_ERR_OR_NULL(helper)) {
- helper = NULL;
+ struct kthread_worker *w =
+ kthread_run_worker(0, "scx_enable_helper");
+ if (IS_ERR_OR_NULL(w)) {
mutex_unlock(&helper_mutex);
return -ENOMEM;
}
- sched_set_fifo(helper->task);
+ sched_set_fifo(w->task);
+ WRITE_ONCE(helper, w);
}
mutex_unlock(&helper_mutex);
}
diff --git a/kernel/sched/ext_internal.h b/kernel/sched/ext_internal.h
index 11ebb744d893..00b450597f3e 100644
--- a/kernel/sched/ext_internal.h
+++ b/kernel/sched/ext_internal.h
@@ -1035,26 +1035,108 @@ static const char *scx_enable_state_str[] = {
};
/*
- * sched_ext_entity->ops_state
+ * Task Ownership State Machine (sched_ext_entity->ops_state)
*
- * Used to track the task ownership between the SCX core and the BPF scheduler.
- * State transitions look as follows:
+ * The sched_ext core uses this state machine to track task ownership
+ * between the SCX core and the BPF scheduler. This allows the BPF
+ * scheduler to dispatch tasks without strict ordering requirements, while
+ * the SCX core safely rejects invalid dispatches.
*
- * NONE -> QUEUEING -> QUEUED -> DISPATCHING
- * ^ | |
- * | v v
- * \-------------------------------/
+ * State Transitions
*
- * QUEUEING and DISPATCHING states can be waited upon. See wait_ops_state() call
- * sites for explanations on the conditions being waited upon and why they are
- * safe. Transitions out of them into NONE or QUEUED must store_release and the
- * waiters should load_acquire.
+ * .------------> NONE (owned by SCX core)
+ * | | ^
+ * | enqueue | | direct dispatch
+ * | v |
+ * | QUEUEING -------'
+ * | |
+ * | enqueue |
+ * | completes |
+ * | v
+ * | QUEUED (owned by BPF scheduler)
+ * | |
+ * | dispatch |
+ * | |
+ * | v
+ * | DISPATCHING
+ * | |
+ * | dispatch |
+ * | completes |
+ * `---------------'
*
- * Tracking scx_ops_state enables sched_ext core to reliably determine whether
- * any given task can be dispatched by the BPF scheduler at all times and thus
- * relaxes the requirements on the BPF scheduler. This allows the BPF scheduler
- * to try to dispatch any task anytime regardless of its state as the SCX core
- * can safely reject invalid dispatches.
+ * State Descriptions
+ *
+ * - %SCX_OPSS_NONE:
+ * Task is owned by the SCX core. It's either on a run queue, running,
+ * or being manipulated by the core scheduler. The BPF scheduler has no
+ * claim on this task.
+ *
+ * - %SCX_OPSS_QUEUEING:
+ * Transitional state while transferring a task from the SCX core to
+ * the BPF scheduler. The task's rq lock is held during this state.
+ * Since QUEUEING is both entered and exited under the rq lock, dequeue
+ * can never observe this state (it would be a BUG). When finishing a
+ * dispatch, if the task is still in %SCX_OPSS_QUEUEING the completion
+ * path busy-waits for it to leave this state (via wait_ops_state())
+ * before retrying.
+ *
+ * - %SCX_OPSS_QUEUED:
+ * Task is owned by the BPF scheduler. It's on a DSQ (dispatch queue)
+ * and the BPF scheduler is responsible for dispatching it. A QSEQ
+ * (queue sequence number) is embedded in this state to detect
+ * dispatch/dequeue races: if a task is dequeued and re-enqueued, the
+ * QSEQ changes and any in-flight dispatch operations targeting the old
+ * QSEQ are safely ignored.
+ *
+ * - %SCX_OPSS_DISPATCHING:
+ * Transitional state while transferring a task from the BPF scheduler
+ * back to the SCX core. This state indicates the BPF scheduler has
+ * selected the task for execution. When dequeue needs to take the task
+ * off a DSQ and it is still in %SCX_OPSS_DISPATCHING, the dequeue path
+ * busy-waits for it to leave this state (via wait_ops_state()) before
+ * proceeding. Exits to %SCX_OPSS_NONE when dispatch completes.
+ *
+ * Memory Ordering
+ *
+ * Transitions out of %SCX_OPSS_QUEUEING and %SCX_OPSS_DISPATCHING into
+ * %SCX_OPSS_NONE or %SCX_OPSS_QUEUED must use atomic_long_set_release()
+ * and waiters must use atomic_long_read_acquire(). This ensures proper
+ * synchronization between concurrent operations.
+ *
+ * Cross-CPU Task Migration
+ *
+ * When moving a task in the %SCX_OPSS_DISPATCHING state, we can't simply
+ * grab the target CPU's rq lock because a concurrent dequeue might be
+ * waiting on %SCX_OPSS_DISPATCHING while holding the source rq lock
+ * (deadlock).
+ *
+ * The sched_ext core uses a "lock dancing" protocol coordinated by
+ * p->scx.holding_cpu. When moving a task to a different rq:
+ *
+ * 1. Verify task can be moved (CPU affinity, migration_disabled, etc.)
+ * 2. Set p->scx.holding_cpu to the current CPU
+ * 3. Set task state to %SCX_OPSS_NONE; dequeue waits while DISPATCHING
+ * is set, so clearing DISPATCHING first prevents the circular wait
+ * (safe to lock the rq we need)
+ * 4. Unlock the current CPU's rq
+ * 5. Lock src_rq (where the task currently lives)
+ * 6. Verify p->scx.holding_cpu == current CPU, if not, dequeue won the
+ * race (dequeue clears holding_cpu to -1 when it takes the task), in
+ * this case migration is aborted
+ * 7. If src_rq == dst_rq: clear holding_cpu and enqueue directly
+ * into dst_rq's local DSQ (no lock swap needed)
+ * 8. Otherwise: call move_remote_task_to_local_dsq(), which releases
+ * src_rq, locks dst_rq, and performs the deactivate/activate
+ * migration cycle (dst_rq is held on return)
+ * 9. Unlock dst_rq and re-lock the current CPU's rq to restore
+ * the lock state expected by the caller
+ *
+ * If any verification fails, abort the migration.
+ *
+ * This state tracking allows the BPF scheduler to try to dispatch any task
+ * at any time regardless of its state. The SCX core can safely
+ * reject/ignore invalid dispatches, simplifying the BPF scheduler
+ * implementation.
*/
enum scx_ops_state {
SCX_OPSS_NONE, /* owned by the SCX core */
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 3681b6ad9276..a83be0c834dd 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -161,6 +161,14 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
return cpuidle_enter(drv, dev, next_state);
}
+static void idle_call_stop_or_retain_tick(bool stop_tick)
+{
+ if (stop_tick || tick_nohz_tick_stopped())
+ tick_nohz_idle_stop_tick();
+ else
+ tick_nohz_idle_retain_tick();
+}
+
/**
* cpuidle_idle_call - the main idle function
*
@@ -170,7 +178,7 @@ static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev,
* set, and it returns with polling set. If it ever stops polling, it
* must clear the polling bit.
*/
-static void cpuidle_idle_call(void)
+static void cpuidle_idle_call(bool stop_tick)
{
struct cpuidle_device *dev = cpuidle_get_device();
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
@@ -186,7 +194,7 @@ static void cpuidle_idle_call(void)
}
if (cpuidle_not_available(drv, dev)) {
- tick_nohz_idle_stop_tick();
+ idle_call_stop_or_retain_tick(stop_tick);
default_idle_call();
goto exit_idle;
@@ -221,24 +229,35 @@ static void cpuidle_idle_call(void)
next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
call_cpuidle(drv, dev, next_state);
- } else {
- bool stop_tick = true;
+ } else if (drv->state_count > 1) {
+ /*
+ * stop_tick is expected to be true by default by cpuidle
+ * governors, which allows them to select idle states with
+ * target residency above the tick period length.
+ */
+ stop_tick = true;
/*
* Ask the cpuidle framework to choose a convenient idle state.
*/
next_state = cpuidle_select(drv, dev, &stop_tick);
- if (stop_tick || tick_nohz_tick_stopped())
- tick_nohz_idle_stop_tick();
- else
- tick_nohz_idle_retain_tick();
+ idle_call_stop_or_retain_tick(stop_tick);
entered_state = call_cpuidle(drv, dev, next_state);
/*
* Give the governor an opportunity to reflect on the outcome
*/
cpuidle_reflect(dev, entered_state);
+ } else {
+ idle_call_stop_or_retain_tick(stop_tick);
+
+ /*
+ * If there is only a single idle state (or none), there is
+ * nothing meaningful for the governor to choose. Skip the
+ * governor and always use state 0.
+ */
+ call_cpuidle(drv, dev, 0);
}
exit_idle:
@@ -259,6 +278,7 @@ exit_idle:
static void do_idle(void)
{
int cpu = smp_processor_id();
+ bool got_tick = false;
/*
* Check if we need to update blocked load
@@ -329,8 +349,9 @@ static void do_idle(void)
tick_nohz_idle_restart_tick();
cpu_idle_poll();
} else {
- cpuidle_idle_call();
+ cpuidle_idle_call(got_tick);
}
+ got_tick = tick_nohz_idle_got_tick();
arch_cpu_idle_exit();
}
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 36fd2313ae7e..0d832317d576 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -697,7 +697,7 @@ EXPORT_SYMBOL(clock_t_to_jiffies);
*
* Return: jiffies_64 value converted to 64-bit "clock_t" (CLOCKS_PER_SEC)
*/
-u64 jiffies_64_to_clock_t(u64 x)
+notrace u64 jiffies_64_to_clock_t(u64 x)
{
#if (TICK_NSEC % (NSEC_PER_SEC / USER_HZ)) == 0
# if HZ < USER_HZ
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index aeaec79bc09c..b77119d71641 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -190,7 +190,7 @@ struct worker_pool {
int id; /* I: pool ID */
unsigned int flags; /* L: flags */
- unsigned long watchdog_ts; /* L: watchdog timestamp */
+ unsigned long last_progress_ts; /* L: last forward progress timestamp */
bool cpu_stall; /* WD: stalled cpu bound pool */
/*
@@ -1697,7 +1697,7 @@ static void __pwq_activate_work(struct pool_workqueue *pwq,
WARN_ON_ONCE(!(*wdb & WORK_STRUCT_INACTIVE));
trace_workqueue_activate_work(work);
if (list_empty(&pwq->pool->worklist))
- pwq->pool->watchdog_ts = jiffies;
+ pwq->pool->last_progress_ts = jiffies;
move_linked_works(work, &pwq->pool->worklist, NULL);
__clear_bit(WORK_STRUCT_INACTIVE_BIT, wdb);
}
@@ -2348,7 +2348,7 @@ retry:
*/
if (list_empty(&pwq->inactive_works) && pwq_tryinc_nr_active(pwq, false)) {
if (list_empty(&pool->worklist))
- pool->watchdog_ts = jiffies;
+ pool->last_progress_ts = jiffies;
trace_workqueue_activate_work(work);
insert_work(pwq, work, &pool->worklist, work_flags);
@@ -3204,6 +3204,7 @@ __acquires(&pool->lock)
worker->current_pwq = pwq;
if (worker->task)
worker->current_at = worker->task->se.sum_exec_runtime;
+ worker->current_start = jiffies;
work_data = *work_data_bits(work);
worker->current_color = get_work_color(work_data);
@@ -3352,7 +3353,7 @@ static void process_scheduled_works(struct worker *worker)
while ((work = list_first_entry_or_null(&worker->scheduled,
struct work_struct, entry))) {
if (first) {
- worker->pool->watchdog_ts = jiffies;
+ worker->pool->last_progress_ts = jiffies;
first = false;
}
process_one_work(worker, work);
@@ -4850,7 +4851,7 @@ static int init_worker_pool(struct worker_pool *pool)
pool->cpu = -1;
pool->node = NUMA_NO_NODE;
pool->flags |= POOL_DISASSOCIATED;
- pool->watchdog_ts = jiffies;
+ pool->last_progress_ts = jiffies;
INIT_LIST_HEAD(&pool->worklist);
INIT_LIST_HEAD(&pool->idle_list);
hash_init(pool->busy_hash);
@@ -6274,7 +6275,7 @@ static void pr_cont_worker_id(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
- if (pool->flags & WQ_BH)
+ if (pool->flags & POOL_BH)
pr_cont("bh%s",
pool->attrs->nice == HIGHPRI_NICE_LEVEL ? "-hi" : "");
else
@@ -6359,6 +6360,8 @@ static void show_pwq(struct pool_workqueue *pwq)
pr_cont(" %s", comma ? "," : "");
pr_cont_worker_id(worker);
pr_cont(":%ps", worker->current_func);
+ pr_cont(" for %us",
+ jiffies_to_msecs(jiffies - worker->current_start) / 1000);
list_for_each_entry(work, &worker->scheduled, entry)
pr_cont_work(false, work, &pcws);
pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
@@ -6462,7 +6465,7 @@ static void show_one_worker_pool(struct worker_pool *pool)
/* How long the first pending work is waiting for a worker. */
if (!list_empty(&pool->worklist))
- hung = jiffies_to_msecs(jiffies - pool->watchdog_ts) / 1000;
+ hung = jiffies_to_msecs(jiffies - pool->last_progress_ts) / 1000;
/*
* Defer printing to avoid deadlocks in console drivers that
@@ -7580,11 +7583,11 @@ MODULE_PARM_DESC(panic_on_stall_time, "Panic if stall exceeds this many seconds
/*
* Show workers that might prevent the processing of pending work items.
- * The only candidates are CPU-bound workers in the running state.
- * Pending work items should be handled by another idle worker
- * in all other situations.
+ * A busy worker that is not running on the CPU (e.g. sleeping in
+ * wait_event_idle() with PF_WQ_WORKER cleared) can stall the pool just as
+ * effectively as a CPU-bound one, so dump every in-flight worker.
*/
-static void show_cpu_pool_hog(struct worker_pool *pool)
+static void show_cpu_pool_busy_workers(struct worker_pool *pool)
{
struct worker *worker;
unsigned long irq_flags;
@@ -7593,36 +7596,34 @@ static void show_cpu_pool_hog(struct worker_pool *pool)
raw_spin_lock_irqsave(&pool->lock, irq_flags);
hash_for_each(pool->busy_hash, bkt, worker, hentry) {
- if (task_is_running(worker->task)) {
- /*
- * Defer printing to avoid deadlocks in console
- * drivers that queue work while holding locks
- * also taken in their write paths.
- */
- printk_deferred_enter();
+ /*
+ * Defer printing to avoid deadlocks in console
+ * drivers that queue work while holding locks
+ * also taken in their write paths.
+ */
+ printk_deferred_enter();
- pr_info("pool %d:\n", pool->id);
- sched_show_task(worker->task);
+ pr_info("pool %d:\n", pool->id);
+ sched_show_task(worker->task);
- printk_deferred_exit();
- }
+ printk_deferred_exit();
}
raw_spin_unlock_irqrestore(&pool->lock, irq_flags);
}
-static void show_cpu_pools_hogs(void)
+static void show_cpu_pools_busy_workers(void)
{
struct worker_pool *pool;
int pi;
- pr_info("Showing backtraces of running workers in stalled CPU-bound worker pools:\n");
+ pr_info("Showing backtraces of busy workers in stalled worker pools:\n");
rcu_read_lock();
for_each_pool(pool, pi) {
if (pool->cpu_stall)
- show_cpu_pool_hog(pool);
+ show_cpu_pool_busy_workers(pool);
}
@@ -7691,7 +7692,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
touched = READ_ONCE(per_cpu(wq_watchdog_touched_cpu, pool->cpu));
else
touched = READ_ONCE(wq_watchdog_touched);
- pool_ts = READ_ONCE(pool->watchdog_ts);
+ pool_ts = READ_ONCE(pool->last_progress_ts);
if (time_after(pool_ts, touched))
ts = pool_ts;
@@ -7719,7 +7720,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
show_all_workqueues();
if (cpu_pool_stall)
- show_cpu_pools_hogs();
+ show_cpu_pools_busy_workers();
if (lockup_detected)
panic_on_wq_watchdog(max_stall_time);
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index f6275944ada7..8def1ddc5a1b 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -32,6 +32,7 @@ struct worker {
work_func_t current_func; /* K: function */
struct pool_workqueue *current_pwq; /* K: pwq */
u64 current_at; /* K: runtime at start or last wakeup */
+ unsigned long current_start; /* K: start time of current work item */
unsigned int current_color; /* K: color */
int sleeping; /* S: is worker sleeping? */
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index 449369a60846..2da049216fe0 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -316,7 +316,7 @@ int __init xbc_node_compose_key_after(struct xbc_node *root,
depth ? "." : "");
if (ret < 0)
return ret;
- if (ret > size) {
+ if (ret >= size) {
size = 0;
} else {
size -= ret;
@@ -532,9 +532,9 @@ static char *skip_spaces_until_newline(char *p)
static int __init __xbc_open_brace(char *p)
{
/* Push the last key as open brace */
- open_brace[brace_index++] = xbc_node_index(last_parent);
if (brace_index >= XBC_DEPTH_MAX)
return xbc_parse_error("Exceed max depth of braces", p);
+ open_brace[brace_index++] = xbc_node_index(last_parent);
return 0;
}
@@ -802,7 +802,7 @@ static int __init xbc_verify_tree(void)
/* Brace closing */
if (brace_index) {
- n = &xbc_nodes[open_brace[brace_index]];
+ n = &xbc_nodes[open_brace[brace_index - 1]];
return xbc_parse_error("Brace is not closed",
xbc_node_get_data(n));
}
diff --git a/lib/crypto/Makefile b/lib/crypto/Makefile
index 725eef05b758..dc7a56f7287d 100644
--- a/lib/crypto/Makefile
+++ b/lib/crypto/Makefile
@@ -55,6 +55,9 @@ libaes-$(CONFIG_SPARC) += sparc/aes_asm.o
libaes-$(CONFIG_X86) += x86/aes-aesni.o
endif # CONFIG_CRYPTO_LIB_AES_ARCH
+# clean-files must be defined unconditionally
+clean-files += powerpc/aesp8-ppc.S
+
################################################################################
obj-$(CONFIG_CRYPTO_LIB_AESCFB) += libaescfb.o
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 912c248a3f7e..b298cba853ab 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2797,7 +2797,8 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
_dst_pmd = pmd_mkwrite(pmd_mkdirty(_dst_pmd), dst_vma);
} else {
src_pmdval = pmdp_huge_clear_flush(src_vma, src_addr, src_pmd);
- _dst_pmd = folio_mk_pmd(src_folio, dst_vma->vm_page_prot);
+ _dst_pmd = move_soft_dirty_pmd(src_pmdval);
+ _dst_pmd = clear_uffd_wp_pmd(_dst_pmd);
}
set_pmd_at(mm, dst_addr, dst_pmd, _dst_pmd);
diff --git a/mm/rmap.c b/mm/rmap.c
index 0f00570d1b9e..391337282e3f 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1955,7 +1955,14 @@ static inline unsigned int folio_unmap_pte_batch(struct folio *folio,
if (userfaultfd_wp(vma))
return 1;
- return folio_pte_batch(folio, pvmw->pte, pte, max_nr);
+ /*
+ * If unmap fails, we need to restore the ptes. To avoid accidentally
+ * upgrading write permissions for ptes that were not originally
+ * writable, and to avoid losing the soft-dirty bit, use the
+ * appropriate FPB flags.
+ */
+ return folio_pte_batch_flags(folio, vma, pvmw->pte, &pte, max_nr,
+ FPB_RESPECT_WRITE | FPB_RESPECT_SOFT_DIRTY);
}
/*
@@ -2443,11 +2450,17 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
__maybe_unused pmd_t pmdval;
if (flags & TTU_SPLIT_HUGE_PMD) {
+ /*
+ * split_huge_pmd_locked() might leave the
+ * folio mapped through PTEs. Retry the walk
+ * so we can detect this scenario and properly
+ * abort the walk.
+ */
split_huge_pmd_locked(vma, pvmw.address,
pvmw.pmd, true);
- ret = false;
- page_vma_mapped_walk_done(&pvmw);
- break;
+ flags &= ~TTU_SPLIT_HUGE_PMD;
+ page_vma_mapped_walk_restart(&pvmw);
+ continue;
}
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
pmdval = pmdp_get(pvmw.pmd);
diff --git a/mm/slub.c b/mm/slub.c
index 20cb4f3b636d..2b2d33cc735c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2119,13 +2119,6 @@ static inline size_t obj_exts_alloc_size(struct kmem_cache *s,
size_t sz = sizeof(struct slabobj_ext) * slab->objects;
struct kmem_cache *obj_exts_cache;
- /*
- * slabobj_ext array for KMALLOC_CGROUP allocations
- * are served from KMALLOC_NORMAL caches.
- */
- if (!mem_alloc_profiling_enabled())
- return sz;
-
if (sz > KMALLOC_MAX_CACHE_SIZE)
return sz;
@@ -2797,6 +2790,7 @@ static void free_empty_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf)
if (s->flags & SLAB_KMALLOC)
mark_obj_codetag_empty(sheaf);
+ VM_WARN_ON_ONCE(sheaf->size > 0);
kfree(sheaf);
stat(s, SHEAF_FREE);
@@ -2828,6 +2822,7 @@ static int refill_sheaf(struct kmem_cache *s, struct slab_sheaf *sheaf,
return 0;
}
+static void sheaf_flush_unused(struct kmem_cache *s, struct slab_sheaf *sheaf);
static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp)
{
@@ -2837,6 +2832,7 @@ static struct slab_sheaf *alloc_full_sheaf(struct kmem_cache *s, gfp_t gfp)
return NULL;
if (refill_sheaf(s, sheaf, gfp | __GFP_NOMEMALLOC | __GFP_NOWARN)) {
+ sheaf_flush_unused(s, sheaf);
free_empty_sheaf(s, sheaf);
return NULL;
}
@@ -4623,6 +4619,7 @@ __pcs_replace_empty_main(struct kmem_cache *s, struct slub_percpu_sheaves *pcs,
* we must be very low on memory so don't bother
* with the barn
*/
+ sheaf_flush_unused(s, empty);
free_empty_sheaf(s, empty);
}
} else {
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index 343c841784ce..901b93530b21 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -205,9 +205,9 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac,
s32 result;
u64 global_id;
void *payload, *payload_end;
- int payload_len;
+ u32 payload_len;
char *result_msg;
- int result_msg_len;
+ u32 result_msg_len;
int ret = -EINVAL;
mutex_lock(&ac->mutex);
@@ -217,10 +217,12 @@ int ceph_handle_auth_reply(struct ceph_auth_client *ac,
result = ceph_decode_32(&p);
global_id = ceph_decode_64(&p);
payload_len = ceph_decode_32(&p);
+ ceph_decode_need(&p, end, payload_len, bad);
payload = p;
p += payload_len;
ceph_decode_need(&p, end, sizeof(u32), bad);
result_msg_len = ceph_decode_32(&p);
+ ceph_decode_need(&p, end, result_msg_len, bad);
result_msg = p;
p += result_msg_len;
if (p != end)
diff --git a/net/ceph/messenger_v2.c b/net/ceph/messenger_v2.c
index 5ec3272cd2dd..50f65820f623 100644
--- a/net/ceph/messenger_v2.c
+++ b/net/ceph/messenger_v2.c
@@ -392,7 +392,7 @@ static int head_onwire_len(int ctrl_len, bool secure)
int head_len;
int rem_len;
- BUG_ON(ctrl_len < 0 || ctrl_len > CEPH_MSG_MAX_CONTROL_LEN);
+ BUG_ON(ctrl_len < 1 || ctrl_len > CEPH_MSG_MAX_CONTROL_LEN);
if (secure) {
head_len = CEPH_PREAMBLE_SECURE_LEN;
@@ -401,9 +401,7 @@ static int head_onwire_len(int ctrl_len, bool secure)
head_len += padded_len(rem_len) + CEPH_GCM_TAG_LEN;
}
} else {
- head_len = CEPH_PREAMBLE_PLAIN_LEN;
- if (ctrl_len)
- head_len += ctrl_len + CEPH_CRC_LEN;
+ head_len = CEPH_PREAMBLE_PLAIN_LEN + ctrl_len + CEPH_CRC_LEN;
}
return head_len;
}
@@ -528,11 +526,16 @@ static int decode_preamble(void *p, struct ceph_frame_desc *desc)
desc->fd_aligns[i] = ceph_decode_16(&p);
}
- if (desc->fd_lens[0] < 0 ||
+ /*
+ * This would fire for FRAME_TAG_WAIT (it has one empty
+ * segment), but we should never get it as client.
+ */
+ if (desc->fd_lens[0] < 1 ||
desc->fd_lens[0] > CEPH_MSG_MAX_CONTROL_LEN) {
pr_err("bad control segment length %d\n", desc->fd_lens[0]);
return -EINVAL;
}
+
if (desc->fd_lens[1] < 0 ||
desc->fd_lens[1] > CEPH_MSG_MAX_FRONT_LEN) {
pr_err("bad front segment length %d\n", desc->fd_lens[1]);
@@ -549,10 +552,6 @@ static int decode_preamble(void *p, struct ceph_frame_desc *desc)
return -EINVAL;
}
- /*
- * This would fire for FRAME_TAG_WAIT (it has one empty
- * segment), but we should never get it as client.
- */
if (!desc->fd_lens[desc->fd_seg_cnt - 1]) {
pr_err("last segment empty, segment count %d\n",
desc->fd_seg_cnt);
@@ -2833,12 +2832,15 @@ static int process_message_header(struct ceph_connection *con,
void *p, void *end)
{
struct ceph_frame_desc *desc = &con->v2.in_desc;
- struct ceph_msg_header2 *hdr2 = p;
+ struct ceph_msg_header2 *hdr2;
struct ceph_msg_header hdr;
int skip;
int ret;
u64 seq;
+ ceph_decode_need(&p, end, sizeof(*hdr2), bad);
+ hdr2 = p;
+
/* verify seq# */
seq = le64_to_cpu(hdr2->seq);
if ((s64)seq - (s64)con->in_seq < 1) {
@@ -2869,6 +2871,10 @@ static int process_message_header(struct ceph_connection *con,
WARN_ON(!con->in_msg);
WARN_ON(con->in_msg->con != con);
return 1;
+
+bad:
+ pr_err("failed to decode message header\n");
+ return -EINVAL;
}
static int process_message(struct ceph_connection *con)
@@ -2898,6 +2904,11 @@ static int __handle_control(struct ceph_connection *con, void *p)
if (con->v2.in_desc.fd_tag != FRAME_TAG_MESSAGE)
return process_control(con, p, end);
+ if (con->state != CEPH_CON_S_OPEN) {
+ con->error_msg = "protocol error, unexpected message";
+ return -EINVAL;
+ }
+
ret = process_message_header(con, p, end);
if (ret < 0)
return ret;
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 5136b3766c44..d5080530ce0c 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -72,8 +72,8 @@ static struct ceph_monmap *ceph_monmap_decode(void **p, void *end, bool msgr2)
struct ceph_monmap *monmap = NULL;
struct ceph_fsid fsid;
u32 struct_len;
- int blob_len;
- int num_mon;
+ u32 blob_len;
+ u32 num_mon;
u8 struct_v;
u32 epoch;
int ret;
@@ -112,7 +112,7 @@ static struct ceph_monmap *ceph_monmap_decode(void **p, void *end, bool msgr2)
}
ceph_decode_32_safe(p, end, num_mon, e_inval);
- dout("%s fsid %pU epoch %u num_mon %d\n", __func__, &fsid, epoch,
+ dout("%s fsid %pU epoch %u num_mon %u\n", __func__, &fsid, epoch,
num_mon);
if (num_mon > CEPH_MAX_MON)
goto e_inval;
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 237f67a5d004..ef8b7e8b1e9c 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1062,14 +1062,25 @@ static int cache_release(struct inode *inode, struct file *filp,
struct cache_reader *rp = filp->private_data;
if (rp) {
+ struct cache_request *rq = NULL;
+
spin_lock(&queue_lock);
if (rp->offset) {
struct cache_queue *cq;
- for (cq= &rp->q; &cq->list != &cd->queue;
- cq = list_entry(cq->list.next, struct cache_queue, list))
+ for (cq = &rp->q; &cq->list != &cd->queue;
+ cq = list_entry(cq->list.next,
+ struct cache_queue, list))
if (!cq->reader) {
- container_of(cq, struct cache_request, q)
- ->readers--;
+ struct cache_request *cr =
+ container_of(cq,
+ struct cache_request, q);
+ cr->readers--;
+ if (cr->readers == 0 &&
+ !test_bit(CACHE_PENDING,
+ &cr->item->flags)) {
+ list_del(&cr->q.list);
+ rq = cr;
+ }
break;
}
rp->offset = 0;
@@ -1077,9 +1088,14 @@ static int cache_release(struct inode *inode, struct file *filp,
list_del(&rp->q.list);
spin_unlock(&queue_lock);
+ if (rq) {
+ cache_put(rq->item, cd);
+ kfree(rq->buf);
+ kfree(rq);
+ }
+
filp->private_data = NULL;
kfree(rp);
-
}
if (filp->f_mode & FMODE_WRITE) {
atomic_dec(&cd->writers);
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c
index 15bbf953dfad..b51a162885bb 100644
--- a/net/sunrpc/xprtrdma/verbs.c
+++ b/net/sunrpc/xprtrdma/verbs.c
@@ -1362,7 +1362,7 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed)
needed += RPCRDMA_MAX_RECV_BATCH;
if (atomic_inc_return(&ep->re_receiving) > 1)
- goto out;
+ goto out_dec;
/* fast path: all needed reps can be found on the free list */
wr = NULL;
@@ -1385,7 +1385,7 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed)
++count;
}
if (!wr)
- goto out;
+ goto out_dec;
rc = ib_post_recv(ep->re_id->qp, wr,
(const struct ib_recv_wr **)&bad_wr);
@@ -1400,9 +1400,10 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, int needed)
--count;
}
}
+
+out_dec:
if (atomic_dec_return(&ep->re_receiving) > 0)
complete(&ep->re_done);
-
out:
trace_xprtrdma_post_recvs(r_xprt, count);
ep->re_receive_count += count;
diff --git a/rust/Makefile b/rust/Makefile
index 629b3bdd2b20..9801af2e1e02 100644
--- a/rust/Makefile
+++ b/rust/Makefile
@@ -148,7 +148,8 @@ doctests_modifiers_workaround := $(rustdoc_modifiers_workaround)$(if $(call rust
quiet_cmd_rustdoc = RUSTDOC $(if $(rustdoc_host),H, ) $<
cmd_rustdoc = \
OBJTREE=$(abspath $(objtree)) \
- $(RUSTDOC) $(filter-out $(skip_flags) --remap-path-prefix=%,$(if $(rustdoc_host),$(rust_common_flags),$(rust_flags))) \
+ $(RUSTDOC) $(filter-out $(skip_flags) --remap-path-prefix=% --remap-path-scope=%, \
+ $(if $(rustdoc_host),$(rust_common_flags),$(rust_flags))) \
$(rustc_target_flags) -L$(objtree)/$(obj) \
-Zunstable-options --generate-link-to-definition \
--output $(rustdoc_output) \
@@ -334,7 +335,7 @@ quiet_cmd_rustdoc_test_kernel = RUSTDOC TK $<
rm -rf $(objtree)/$(obj)/test/doctests/kernel; \
mkdir -p $(objtree)/$(obj)/test/doctests/kernel; \
OBJTREE=$(abspath $(objtree)) \
- $(RUSTDOC) --test $(filter-out --remap-path-prefix=%,$(rust_flags)) \
+ $(RUSTDOC) --test $(filter-out --remap-path-prefix=% --remap-path-scope=%,$(rust_flags)) \
-L$(objtree)/$(obj) --extern ffi --extern pin_init \
--extern kernel --extern build_error --extern macros \
--extern bindings --extern uapi \
@@ -526,11 +527,9 @@ quiet_cmd_rustc_procmacrolibrary = $(RUSTC_OR_CLIPPY_QUIET) PL $@
cmd_rustc_procmacrolibrary = \
$(if $(skip_clippy),$(RUSTC),$(RUSTC_OR_CLIPPY)) \
$(filter-out $(skip_flags),$(rust_common_flags) $(rustc_target_flags)) \
- --emit=dep-info,link --crate-type rlib -O \
+ --emit=dep-info=$(depfile) --emit=link=$@ --crate-type rlib -O \
--out-dir $(objtree)/$(obj) -L$(objtree)/$(obj) \
- --crate-name $(patsubst lib%.rlib,%,$(notdir $@)) $<; \
- mv $(objtree)/$(obj)/$(patsubst lib%.rlib,%,$(notdir $@)).d $(depfile); \
- sed -i '/^\#/d' $(depfile)
+ --crate-name $(patsubst lib%.rlib,%,$(notdir $@)) $<
$(obj)/libproc_macro2.rlib: private skip_clippy = 1
$(obj)/libproc_macro2.rlib: private rustc_target_flags = $(proc_macro2-flags)
diff --git a/rust/kernel/cpufreq.rs b/rust/kernel/cpufreq.rs
index 76faa1ac8501..f5adee48d40c 100644
--- a/rust/kernel/cpufreq.rs
+++ b/rust/kernel/cpufreq.rs
@@ -401,6 +401,7 @@ impl TableBuilder {
/// ```
/// use kernel::cpufreq::{DEFAULT_TRANSITION_LATENCY_NS, Policy};
///
+/// #[allow(clippy::double_parens, reason = "False positive before 1.92.0")]
/// fn update_policy(policy: &mut Policy) {
/// policy
/// .set_dvfs_possible_from_any_cpu(true)
diff --git a/rust/kernel/dma.rs b/rust/kernel/dma.rs
index 909d56fd5118..a396f8435739 100644
--- a/rust/kernel/dma.rs
+++ b/rust/kernel/dma.rs
@@ -461,6 +461,19 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
self.count * core::mem::size_of::<T>()
}
+ /// Returns the raw pointer to the allocated region in the CPU's virtual address space.
+ #[inline]
+ pub fn as_ptr(&self) -> *const [T] {
+ core::ptr::slice_from_raw_parts(self.cpu_addr.as_ptr(), self.count)
+ }
+
+ /// Returns the raw pointer to the allocated region in the CPU's virtual address space as
+ /// a mutable pointer.
+ #[inline]
+ pub fn as_mut_ptr(&self) -> *mut [T] {
+ core::ptr::slice_from_raw_parts_mut(self.cpu_addr.as_ptr(), self.count)
+ }
+
/// Returns the base address to the allocated region in the CPU's virtual address space.
pub fn start_ptr(&self) -> *const T {
self.cpu_addr.as_ptr()
@@ -581,23 +594,6 @@ impl<T: AsBytes + FromBytes> CoherentAllocation<T> {
Ok(())
}
- /// Returns a pointer to an element from the region with bounds checking. `offset` is in
- /// units of `T`, not the number of bytes.
- ///
- /// Public but hidden since it should only be used from [`dma_read`] and [`dma_write`] macros.
- #[doc(hidden)]
- pub fn item_from_index(&self, offset: usize) -> Result<*mut T> {
- if offset >= self.count {
- return Err(EINVAL);
- }
- // SAFETY:
- // - The pointer is valid due to type invariant on `CoherentAllocation`
- // and we've just checked that the range and index is within bounds.
- // - `offset` can't overflow since it is smaller than `self.count` and we've checked
- // that `self.count` won't overflow early in the constructor.
- Ok(unsafe { self.cpu_addr.as_ptr().add(offset) })
- }
-
/// Reads the value of `field` and ensures that its type is [`FromBytes`].
///
/// # Safety
@@ -670,6 +666,9 @@ unsafe impl<T: AsBytes + FromBytes + Send> Send for CoherentAllocation<T> {}
/// Reads a field of an item from an allocated region of structs.
///
+/// The syntax is of the form `kernel::dma_read!(dma, proj)` where `dma` is an expression evaluating
+/// to a [`CoherentAllocation`] and `proj` is a [projection specification](kernel::ptr::project!).
+///
/// # Examples
///
/// ```
@@ -684,36 +683,29 @@ unsafe impl<T: AsBytes + FromBytes + Send> Send for CoherentAllocation<T> {}
/// unsafe impl kernel::transmute::AsBytes for MyStruct{};
///
/// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
-/// let whole = kernel::dma_read!(alloc[2]);
-/// let field = kernel::dma_read!(alloc[1].field);
+/// let whole = kernel::dma_read!(alloc, [2]?);
+/// let field = kernel::dma_read!(alloc, [1]?.field);
/// # Ok::<(), Error>(()) }
/// ```
#[macro_export]
macro_rules! dma_read {
- ($dma:expr, $idx: expr, $($field:tt)*) => {{
- (|| -> ::core::result::Result<_, $crate::error::Error> {
- let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
- // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
- // dereferenced. The compiler also further validates the expression on whether `field`
- // is a member of `item` when expanded by the macro.
- unsafe {
- let ptr_field = ::core::ptr::addr_of!((*item) $($field)*);
- ::core::result::Result::Ok(
- $crate::dma::CoherentAllocation::field_read(&$dma, ptr_field)
- )
- }
- })()
+ ($dma:expr, $($proj:tt)*) => {{
+ let dma = &$dma;
+ let ptr = $crate::ptr::project!(
+ $crate::dma::CoherentAllocation::as_ptr(dma), $($proj)*
+ );
+ // SAFETY: The pointer created by the projection is within the DMA region.
+ unsafe { $crate::dma::CoherentAllocation::field_read(dma, ptr) }
}};
- ($dma:ident [ $idx:expr ] $($field:tt)* ) => {
- $crate::dma_read!($dma, $idx, $($field)*)
- };
- ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {
- $crate::dma_read!($($dma).*, $idx, $($field)*)
- };
}
/// Writes to a field of an item from an allocated region of structs.
///
+/// The syntax is of the form `kernel::dma_write!(dma, proj, val)` where `dma` is an expression
+/// evaluating to a [`CoherentAllocation`], `proj` is a
+/// [projection specification](kernel::ptr::project!), and `val` is the value to be written to the
+/// projected location.
+///
/// # Examples
///
/// ```
@@ -728,37 +720,31 @@ macro_rules! dma_read {
/// unsafe impl kernel::transmute::AsBytes for MyStruct{};
///
/// # fn test(alloc: &kernel::dma::CoherentAllocation<MyStruct>) -> Result {
-/// kernel::dma_write!(alloc[2].member = 0xf);
-/// kernel::dma_write!(alloc[1] = MyStruct { member: 0xf });
+/// kernel::dma_write!(alloc, [2]?.member, 0xf);
+/// kernel::dma_write!(alloc, [1]?, MyStruct { member: 0xf });
/// # Ok::<(), Error>(()) }
/// ```
#[macro_export]
macro_rules! dma_write {
- ($dma:ident [ $idx:expr ] $($field:tt)*) => {{
- $crate::dma_write!($dma, $idx, $($field)*)
- }};
- ($($dma:ident).* [ $idx:expr ] $($field:tt)* ) => {{
- $crate::dma_write!($($dma).*, $idx, $($field)*)
+ (@parse [$dma:expr] [$($proj:tt)*] [, $val:expr]) => {{
+ let dma = &$dma;
+ let ptr = $crate::ptr::project!(
+ mut $crate::dma::CoherentAllocation::as_mut_ptr(dma), $($proj)*
+ );
+ let val = $val;
+ // SAFETY: The pointer created by the projection is within the DMA region.
+ unsafe { $crate::dma::CoherentAllocation::field_write(dma, ptr, val) }
}};
- ($dma:expr, $idx: expr, = $val:expr) => {
- (|| -> ::core::result::Result<_, $crate::error::Error> {
- let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
- // SAFETY: `item_from_index` ensures that `item` is always a valid item.
- unsafe { $crate::dma::CoherentAllocation::field_write(&$dma, item, $val) }
- ::core::result::Result::Ok(())
- })()
+ (@parse [$dma:expr] [$($proj:tt)*] [.$field:tt $($rest:tt)*]) => {
+ $crate::dma_write!(@parse [$dma] [$($proj)* .$field] [$($rest)*])
+ };
+ (@parse [$dma:expr] [$($proj:tt)*] [[$index:expr]? $($rest:tt)*]) => {
+ $crate::dma_write!(@parse [$dma] [$($proj)* [$index]?] [$($rest)*])
+ };
+ (@parse [$dma:expr] [$($proj:tt)*] [[$index:expr] $($rest:tt)*]) => {
+ $crate::dma_write!(@parse [$dma] [$($proj)* [$index]] [$($rest)*])
};
- ($dma:expr, $idx: expr, $(.$field:ident)* = $val:expr) => {
- (|| -> ::core::result::Result<_, $crate::error::Error> {
- let item = $crate::dma::CoherentAllocation::item_from_index(&$dma, $idx)?;
- // SAFETY: `item_from_index` ensures that `item` is always a valid pointer and can be
- // dereferenced. The compiler also further validates the expression on whether `field`
- // is a member of `item` when expanded by the macro.
- unsafe {
- let ptr_field = ::core::ptr::addr_of_mut!((*item) $(.$field)*);
- $crate::dma::CoherentAllocation::field_write(&$dma, ptr_field, $val)
- }
- ::core::result::Result::Ok(())
- })()
+ ($dma:expr, $($rest:tt)*) => {
+ $crate::dma_write!(@parse [$dma] [] [$($rest)*])
};
}
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index 3da92f18f4ee..d93292d47420 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -20,6 +20,7 @@
#![feature(generic_nonzero)]
#![feature(inline_const)]
#![feature(pointer_is_aligned)]
+#![feature(slice_ptr_len)]
//
// Stable since Rust 1.80.0.
#![feature(slice_flatten)]
@@ -37,6 +38,9 @@
#![feature(const_ptr_write)]
#![feature(const_refs_to_cell)]
//
+// Stable since Rust 1.84.0.
+#![feature(strict_provenance)]
+//
// Expected to become stable.
#![feature(arbitrary_self_types)]
//
diff --git a/rust/kernel/ptr.rs b/rust/kernel/ptr.rs
index 5b6a382637fe..bdc2d79ff669 100644
--- a/rust/kernel/ptr.rs
+++ b/rust/kernel/ptr.rs
@@ -2,7 +2,13 @@
//! Types and functions to work with pointers and addresses.
-use core::mem::align_of;
+pub mod projection;
+pub use crate::project_pointer as project;
+
+use core::mem::{
+ align_of,
+ size_of, //
+};
use core::num::NonZero;
/// Type representing an alignment, which is always a power of two.
@@ -225,3 +231,25 @@ macro_rules! impl_alignable_uint {
}
impl_alignable_uint!(u8, u16, u32, u64, usize);
+
+/// Trait to represent compile-time known size information.
+///
+/// This is a generalization of [`size_of`] that works for dynamically sized types.
+pub trait KnownSize {
+ /// Get the size of an object of this type in bytes, with the metadata of the given pointer.
+ fn size(p: *const Self) -> usize;
+}
+
+impl<T> KnownSize for T {
+ #[inline(always)]
+ fn size(_: *const Self) -> usize {
+ size_of::<T>()
+ }
+}
+
+impl<T> KnownSize for [T] {
+ #[inline(always)]
+ fn size(p: *const Self) -> usize {
+ p.len() * size_of::<T>()
+ }
+}
diff --git a/rust/kernel/ptr/projection.rs b/rust/kernel/ptr/projection.rs
new file mode 100644
index 000000000000..140ea8e21617
--- /dev/null
+++ b/rust/kernel/ptr/projection.rs
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0
+
+//! Infrastructure for handling projections.
+
+use core::{
+ mem::MaybeUninit,
+ ops::Deref, //
+};
+
+use crate::prelude::*;
+
+/// Error raised when a projection is attempted on an array or slice out of bounds.
+pub struct OutOfBound;
+
+impl From<OutOfBound> for Error {
+ #[inline(always)]
+ fn from(_: OutOfBound) -> Self {
+ ERANGE
+ }
+}
+
+/// A helper trait to perform index projection.
+///
+/// This is similar to [`core::slice::SliceIndex`], but operates on raw pointers safely and
+/// fallibly.
+///
+/// # Safety
+///
+/// The implementation of `index` and `get` (if [`Some`] is returned) must ensure that, if provided
+/// input pointer `slice` and returned pointer `output`, then:
+/// - `output` has the same provenance as `slice`;
+/// - `output.byte_offset_from(slice)` is between 0 to
+/// `KnownSize::size(slice) - KnownSize::size(output)`.
+///
+/// This means that if the input pointer is valid, then pointer returned by `get` or `index` is
+/// also valid.
+#[diagnostic::on_unimplemented(message = "`{Self}` cannot be used to index `{T}`")]
+#[doc(hidden)]
+pub unsafe trait ProjectIndex<T: ?Sized>: Sized {
+ type Output: ?Sized;
+
+ /// Returns an index-projected pointer, if in bounds.
+ fn get(self, slice: *mut T) -> Option<*mut Self::Output>;
+
+ /// Returns an index-projected pointer; fail the build if it cannot be proved to be in bounds.
+ #[inline(always)]
+ fn index(self, slice: *mut T) -> *mut Self::Output {
+ Self::get(self, slice).unwrap_or_else(|| build_error!())
+ }
+}
+
+// Forward array impl to slice impl.
+//
+// SAFETY: Safety requirement guaranteed by the forwarded impl.
+unsafe impl<T, I, const N: usize> ProjectIndex<[T; N]> for I
+where
+ I: ProjectIndex<[T]>,
+{
+ type Output = <I as ProjectIndex<[T]>>::Output;
+
+ #[inline(always)]
+ fn get(self, slice: *mut [T; N]) -> Option<*mut Self::Output> {
+ <I as ProjectIndex<[T]>>::get(self, slice)
+ }
+
+ #[inline(always)]
+ fn index(self, slice: *mut [T; N]) -> *mut Self::Output {
+ <I as ProjectIndex<[T]>>::index(self, slice)
+ }
+}
+
+// SAFETY: `get`-returned pointer has the same provenance as `slice` and the offset is checked to
+// not exceed the required bound.
+unsafe impl<T> ProjectIndex<[T]> for usize {
+ type Output = T;
+
+ #[inline(always)]
+ fn get(self, slice: *mut [T]) -> Option<*mut T> {
+ if self >= slice.len() {
+ None
+ } else {
+ Some(slice.cast::<T>().wrapping_add(self))
+ }
+ }
+}
+
+// SAFETY: `get`-returned pointer has the same provenance as `slice` and the offset is checked to
+// not exceed the required bound.
+unsafe impl<T> ProjectIndex<[T]> for core::ops::Range<usize> {
+ type Output = [T];
+
+ #[inline(always)]
+ fn get(self, slice: *mut [T]) -> Option<*mut [T]> {
+ let new_len = self.end.checked_sub(self.start)?;
+ if self.end > slice.len() {
+ return None;
+ }
+ Some(core::ptr::slice_from_raw_parts_mut(
+ slice.cast::<T>().wrapping_add(self.start),
+ new_len,
+ ))
+ }
+}
+
+// SAFETY: Safety requirement guaranteed by the forwarded impl.
+unsafe impl<T> ProjectIndex<[T]> for core::ops::RangeTo<usize> {
+ type Output = [T];
+
+ #[inline(always)]
+ fn get(self, slice: *mut [T]) -> Option<*mut [T]> {
+ (0..self.end).get(slice)
+ }
+}
+
+// SAFETY: Safety requirement guaranteed by the forwarded impl.
+unsafe impl<T> ProjectIndex<[T]> for core::ops::RangeFrom<usize> {
+ type Output = [T];
+
+ #[inline(always)]
+ fn get(self, slice: *mut [T]) -> Option<*mut [T]> {
+ (self.start..slice.len()).get(slice)
+ }
+}
+
+// SAFETY: `get` returned the pointer as is, so it always has the same provenance and offset of 0.
+unsafe impl<T> ProjectIndex<[T]> for core::ops::RangeFull {
+ type Output = [T];
+
+ #[inline(always)]
+ fn get(self, slice: *mut [T]) -> Option<*mut [T]> {
+ Some(slice)
+ }
+}
+
+/// A helper trait to perform field projection.
+///
+/// This trait has a `DEREF` generic parameter so it can be implemented twice for types that
+/// implement [`Deref`]. This will cause an ambiguity error and thus block [`Deref`] types being
+/// used as base of projection, as they can inject unsoundness. Users therefore must not specify
+/// `DEREF` and should always leave it to be inferred.
+///
+/// # Safety
+///
+/// `proj` may only invoke `f` with a valid allocation, as the documentation of [`Self::proj`]
+/// describes.
+#[doc(hidden)]
+pub unsafe trait ProjectField<const DEREF: bool> {
+ /// Project a pointer to a type to a pointer of a field.
+ ///
+ /// `f` may only be invoked with a valid allocation so it can safely obtain raw pointers to
+ /// fields using `&raw mut`.
+ ///
+ /// This is needed because `base` might not point to a valid allocation, while `&raw mut`
+ /// requires pointers to be in bounds of a valid allocation.
+ ///
+ /// # Safety
+ ///
+ /// `f` must return a pointer in bounds of the provided pointer.
+ unsafe fn proj<F>(base: *mut Self, f: impl FnOnce(*mut Self) -> *mut F) -> *mut F;
+}
+
+// NOTE: in theory, this API should work for `T: ?Sized` and `F: ?Sized`, too. However, we cannot
+// currently support that as we need to obtain a valid allocation that `&raw const` can operate on.
+//
+// SAFETY: `proj` invokes `f` with valid allocation.
+unsafe impl<T> ProjectField<false> for T {
+ #[inline(always)]
+ unsafe fn proj<F>(base: *mut Self, f: impl FnOnce(*mut Self) -> *mut F) -> *mut F {
+ // Create a valid allocation to start projection, as `base` is not necessarily so. The
+ // memory is never actually used so it will be optimized out, so it should work even for
+ // very large `T` (`memoffset` crate also relies on this). To be extra certain, we also
+ // annotate `f` closure with `#[inline(always)]` in the macro.
+ let mut place = MaybeUninit::uninit();
+ let place_base = place.as_mut_ptr();
+ let field = f(place_base);
+ // SAFETY: `field` is in bounds from `base` per safety requirement.
+ let offset = unsafe { field.byte_offset_from(place_base) };
+ // Use `wrapping_byte_offset` as `base` does not need to be of valid allocation.
+ base.wrapping_byte_offset(offset).cast()
+ }
+}
+
+// SAFETY: Vacuously satisfied.
+unsafe impl<T: Deref> ProjectField<true> for T {
+ #[inline(always)]
+ unsafe fn proj<F>(_: *mut Self, _: impl FnOnce(*mut Self) -> *mut F) -> *mut F {
+ build_error!("this function is a guard against `Deref` impl and is never invoked");
+ }
+}
+
+/// Create a projection from a raw pointer.
+///
+/// The projected pointer is within the memory region marked by the input pointer. There is no
+/// requirement that the input raw pointer needs to be valid, so this macro may be used for
+/// projecting pointers outside normal address space, e.g. I/O pointers. However, if the input
+/// pointer is valid, the projected pointer is also valid.
+///
+/// Supported projections include field projections and index projections.
+/// It is not allowed to project into types that implement custom [`Deref`] or
+/// [`Index`](core::ops::Index).
+///
+/// The macro has basic syntax of `kernel::ptr::project!(ptr, projection)`, where `ptr` is an
+/// expression that evaluates to a raw pointer which serves as the base of projection. `projection`
+/// can be a projection expression of form `.field` (normally identifier, or numeral in case of
+/// tuple structs) or of form `[index]`.
+///
+/// If a mutable pointer is needed, the macro input can be prefixed with the `mut` keyword, i.e.
+/// `kernel::ptr::project!(mut ptr, projection)`. By default, a const pointer is created.
+///
+/// `ptr::project!` macro can perform both fallible indexing and build-time checked indexing.
+/// `[index]` form performs build-time bounds checking; if compiler fails to prove `[index]` is in
+/// bounds, compilation will fail. `[index]?` can be used to perform runtime bounds checking;
+/// `OutOfBound` error is raised via `?` if the index is out of bounds.
+///
+/// # Examples
+///
+/// Field projections are performed with `.field_name`:
+///
+/// ```
+/// struct MyStruct { field: u32, }
+/// let ptr: *const MyStruct = core::ptr::dangling();
+/// let field_ptr: *const u32 = kernel::ptr::project!(ptr, .field);
+///
+/// struct MyTupleStruct(u32, u32);
+///
+/// fn proj(ptr: *const MyTupleStruct) {
+/// let field_ptr: *const u32 = kernel::ptr::project!(ptr, .1);
+/// }
+/// ```
+///
+/// Index projections are performed with `[index]`:
+///
+/// ```
+/// fn proj(ptr: *const [u8; 32]) -> Result {
+/// let field_ptr: *const u8 = kernel::ptr::project!(ptr, [1]);
+/// // The following invocation, if uncommented, would fail the build.
+/// //
+/// // kernel::ptr::project!(ptr, [128]);
+///
+/// // This will raise an `OutOfBound` error (which is convertible to `ERANGE`).
+/// kernel::ptr::project!(ptr, [128]?);
+/// Ok(())
+/// }
+/// ```
+///
+/// If you need to match on the error instead of propagate, put the invocation inside a closure:
+///
+/// ```
+/// let ptr: *const [u8; 32] = core::ptr::dangling();
+/// let field_ptr: Result<*const u8> = (|| -> Result<_> {
+/// Ok(kernel::ptr::project!(ptr, [128]?))
+/// })();
+/// assert!(field_ptr.is_err());
+/// ```
+///
+/// For mutable pointers, put `mut` as the first token in macro invocation.
+///
+/// ```
+/// let ptr: *mut [(u8, u16); 32] = core::ptr::dangling_mut();
+/// let field_ptr: *mut u16 = kernel::ptr::project!(mut ptr, [1].1);
+/// ```
+#[macro_export]
+macro_rules! project_pointer {
+ (@gen $ptr:ident, ) => {};
+ // Field projection. `$field` needs to be `tt` to support tuple index like `.0`.
+ (@gen $ptr:ident, .$field:tt $($rest:tt)*) => {
+ // SAFETY: The provided closure always returns an in-bounds pointer.
+ let $ptr = unsafe {
+ $crate::ptr::projection::ProjectField::proj($ptr, #[inline(always)] |ptr| {
+ // Check unaligned field. Not all users (e.g. DMA) can handle unaligned
+ // projections.
+ if false {
+ let _ = &(*ptr).$field;
+ }
+ // SAFETY: `$field` is in bounds, and no implicit `Deref` is possible (if the
+ // type implements `Deref`, Rust cannot infer the generic parameter `DEREF`).
+ &raw mut (*ptr).$field
+ })
+ };
+ $crate::ptr::project!(@gen $ptr, $($rest)*)
+ };
+ // Fallible index projection.
+ (@gen $ptr:ident, [$index:expr]? $($rest:tt)*) => {
+ let $ptr = $crate::ptr::projection::ProjectIndex::get($index, $ptr)
+ .ok_or($crate::ptr::projection::OutOfBound)?;
+ $crate::ptr::project!(@gen $ptr, $($rest)*)
+ };
+ // Build-time checked index projection.
+ (@gen $ptr:ident, [$index:expr] $($rest:tt)*) => {
+ let $ptr = $crate::ptr::projection::ProjectIndex::index($index, $ptr);
+ $crate::ptr::project!(@gen $ptr, $($rest)*)
+ };
+ (mut $ptr:expr, $($proj:tt)*) => {{
+ let ptr: *mut _ = $ptr;
+ $crate::ptr::project!(@gen ptr, $($proj)*);
+ ptr
+ }};
+ ($ptr:expr, $($proj:tt)*) => {{
+ let ptr = <*const _>::cast_mut($ptr);
+ // We currently always project using mutable pointer, as it is not decided whether `&raw
+ // const` allows the resulting pointer to be mutated (see documentation of `addr_of!`).
+ $crate::ptr::project!(@gen ptr, $($proj)*);
+ ptr.cast_const()
+ }};
+}
diff --git a/rust/kernel/str.rs b/rust/kernel/str.rs
index fa87779d2253..3f8918764640 100644
--- a/rust/kernel/str.rs
+++ b/rust/kernel/str.rs
@@ -664,13 +664,13 @@ impl fmt::Write for Formatter<'_> {
///
/// * The first byte of `buffer` is always zero.
/// * The length of `buffer` is at least 1.
-pub(crate) struct NullTerminatedFormatter<'a> {
+pub struct NullTerminatedFormatter<'a> {
buffer: &'a mut [u8],
}
impl<'a> NullTerminatedFormatter<'a> {
/// Create a new [`Self`] instance.
- pub(crate) fn new(buffer: &'a mut [u8]) -> Option<NullTerminatedFormatter<'a>> {
+ pub fn new(buffer: &'a mut [u8]) -> Option<NullTerminatedFormatter<'a>> {
*(buffer.first_mut()?) = 0;
// INVARIANT:
diff --git a/rust/pin-init/internal/src/init.rs b/rust/pin-init/internal/src/init.rs
index 42936f915a07..2fe918f4d82a 100644
--- a/rust/pin-init/internal/src/init.rs
+++ b/rust/pin-init/internal/src/init.rs
@@ -62,7 +62,6 @@ impl InitializerKind {
enum InitializerAttribute {
DefaultError(DefaultErrorAttribute),
- DisableInitializedFieldAccess,
}
struct DefaultErrorAttribute {
@@ -86,6 +85,7 @@ pub(crate) fn expand(
let error = error.map_or_else(
|| {
if let Some(default_error) = attrs.iter().fold(None, |acc, attr| {
+ #[expect(irrefutable_let_patterns)]
if let InitializerAttribute::DefaultError(DefaultErrorAttribute { ty }) = attr {
Some(ty.clone())
} else {
@@ -145,22 +145,9 @@ pub(crate) fn expand(
};
// `mixed_site` ensures that the data is not accessible to the user-controlled code.
let data = Ident::new("__data", Span::mixed_site());
- let init_fields = init_fields(
- &fields,
- pinned,
- !attrs
- .iter()
- .any(|attr| matches!(attr, InitializerAttribute::DisableInitializedFieldAccess)),
- &data,
- &slot,
- );
+ let init_fields = init_fields(&fields, pinned, &data, &slot);
let field_check = make_field_check(&fields, init_kind, &path);
Ok(quote! {{
- // We do not want to allow arbitrary returns, so we declare this type as the `Ok` return
- // type and shadow it later when we insert the arbitrary user code. That way there will be
- // no possibility of returning without `unsafe`.
- struct __InitOk;
-
// Get the data about fields from the supplied type.
// SAFETY: TODO
let #data = unsafe {
@@ -170,18 +157,15 @@ pub(crate) fn expand(
#path::#get_data()
};
// Ensure that `#data` really is of type `#data` and help with type inference:
- let init = ::pin_init::__internal::#data_trait::make_closure::<_, __InitOk, #error>(
+ let init = ::pin_init::__internal::#data_trait::make_closure::<_, #error>(
#data,
move |slot| {
- {
- // Shadow the structure so it cannot be used to return early.
- struct __InitOk;
- #zeroable_check
- #this
- #init_fields
- #field_check
- }
- Ok(__InitOk)
+ #zeroable_check
+ #this
+ #init_fields
+ #field_check
+ // SAFETY: we are the `init!` macro that is allowed to call this.
+ Ok(unsafe { ::pin_init::__internal::InitOk::new() })
}
);
let init = move |slot| -> ::core::result::Result<(), #error> {
@@ -236,7 +220,6 @@ fn get_init_kind(rest: Option<(Token![..], Expr)>, dcx: &mut DiagCtxt) -> InitKi
fn init_fields(
fields: &Punctuated<InitializerField, Token![,]>,
pinned: bool,
- generate_initialized_accessors: bool,
data: &Ident,
slot: &Ident,
) -> TokenStream {
@@ -260,6 +243,10 @@ fn init_fields(
});
// Again span for better diagnostics
let write = quote_spanned!(ident.span()=> ::core::ptr::write);
+ // NOTE: the field accessor ensures that the initialized field is properly aligned.
+ // Unaligned fields will cause the compiler to emit E0793. We do not support
+ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
+ // `ptr::write` below has the same requirement.
let accessor = if pinned {
let project_ident = format_ident!("__project_{ident}");
quote! {
@@ -272,13 +259,6 @@ fn init_fields(
unsafe { &mut (*#slot).#ident }
}
};
- let accessor = generate_initialized_accessors.then(|| {
- quote! {
- #(#cfgs)*
- #[allow(unused_variables)]
- let #ident = #accessor;
- }
- });
quote! {
#(#attrs)*
{
@@ -286,12 +266,18 @@ fn init_fields(
// SAFETY: TODO
unsafe { #write(::core::ptr::addr_of_mut!((*#slot).#ident), #value_ident) };
}
- #accessor
+ #(#cfgs)*
+ #[allow(unused_variables)]
+ let #ident = #accessor;
}
}
InitializerKind::Init { ident, value, .. } => {
// Again span for better diagnostics
let init = format_ident!("init", span = value.span());
+ // NOTE: the field accessor ensures that the initialized field is properly aligned.
+ // Unaligned fields will cause the compiler to emit E0793. We do not support
+ // unaligned fields since `Init::__init` requires an aligned pointer; the call to
+ // `ptr::write` below has the same requirement.
let (value_init, accessor) = if pinned {
let project_ident = format_ident!("__project_{ident}");
(
@@ -326,20 +312,15 @@ fn init_fields(
},
)
};
- let accessor = generate_initialized_accessors.then(|| {
- quote! {
- #(#cfgs)*
- #[allow(unused_variables)]
- let #ident = #accessor;
- }
- });
quote! {
#(#attrs)*
{
let #init = #value;
#value_init
}
- #accessor
+ #(#cfgs)*
+ #[allow(unused_variables)]
+ let #ident = #accessor;
}
}
InitializerKind::Code { block: value, .. } => quote! {
@@ -466,10 +447,6 @@ impl Parse for Initializer {
if a.path().is_ident("default_error") {
a.parse_args::<DefaultErrorAttribute>()
.map(InitializerAttribute::DefaultError)
- } else if a.path().is_ident("disable_initialized_field_access") {
- a.meta
- .require_path_only()
- .map(|_| InitializerAttribute::DisableInitializedFieldAccess)
} else {
Err(syn::Error::new_spanned(a, "unknown initializer attribute"))
}
diff --git a/rust/pin-init/src/__internal.rs b/rust/pin-init/src/__internal.rs
index 90f18e9a2912..90adbdc1893b 100644
--- a/rust/pin-init/src/__internal.rs
+++ b/rust/pin-init/src/__internal.rs
@@ -46,6 +46,24 @@ where
}
}
+/// Token type to signify successful initialization.
+///
+/// Can only be constructed via the unsafe [`Self::new`] function. The initializer macros use this
+/// token type to prevent returning `Ok` from an initializer without initializing all fields.
+pub struct InitOk(());
+
+impl InitOk {
+ /// Creates a new token.
+ ///
+ /// # Safety
+ ///
+ /// This function may only be called from the `init!` macro in `../internal/src/init.rs`.
+ #[inline(always)]
+ pub unsafe fn new() -> Self {
+ Self(())
+ }
+}
+
/// This trait is only implemented via the `#[pin_data]` proc-macro. It is used to facilitate
/// the pin projections within the initializers.
///
@@ -68,9 +86,10 @@ pub unsafe trait PinData: Copy {
type Datee: ?Sized + HasPinData;
/// Type inference helper function.
- fn make_closure<F, O, E>(self, f: F) -> F
+ #[inline(always)]
+ fn make_closure<F, E>(self, f: F) -> F
where
- F: FnOnce(*mut Self::Datee) -> Result<O, E>,
+ F: FnOnce(*mut Self::Datee) -> Result<InitOk, E>,
{
f
}
@@ -98,9 +117,10 @@ pub unsafe trait InitData: Copy {
type Datee: ?Sized + HasInitData;
/// Type inference helper function.
- fn make_closure<F, O, E>(self, f: F) -> F
+ #[inline(always)]
+ fn make_closure<F, E>(self, f: F) -> F
where
- F: FnOnce(*mut Self::Datee) -> Result<O, E>,
+ F: FnOnce(*mut Self::Datee) -> Result<InitOk, E>,
{
f
}
diff --git a/samples/rust/rust_dma.rs b/samples/rust/rust_dma.rs
index 9c45851c876e..ce39b5545097 100644
--- a/samples/rust/rust_dma.rs
+++ b/samples/rust/rust_dma.rs
@@ -68,7 +68,7 @@ impl pci::Driver for DmaSampleDriver {
CoherentAllocation::alloc_coherent(pdev.as_ref(), TEST_VALUES.len(), GFP_KERNEL)?;
for (i, value) in TEST_VALUES.into_iter().enumerate() {
- kernel::dma_write!(ca[i] = MyStruct::new(value.0, value.1))?;
+ kernel::dma_write!(ca, [i]?, MyStruct::new(value.0, value.1));
}
let size = 4 * page::PAGE_SIZE;
@@ -85,24 +85,26 @@ impl pci::Driver for DmaSampleDriver {
}
}
+impl DmaSampleDriver {
+ fn check_dma(&self) -> Result {
+ for (i, value) in TEST_VALUES.into_iter().enumerate() {
+ let val0 = kernel::dma_read!(self.ca, [i]?.h);
+ let val1 = kernel::dma_read!(self.ca, [i]?.b);
+
+ assert_eq!(val0, value.0);
+ assert_eq!(val1, value.1);
+ }
+
+ Ok(())
+ }
+}
+
#[pinned_drop]
impl PinnedDrop for DmaSampleDriver {
fn drop(self: Pin<&mut Self>) {
dev_info!(self.pdev, "Unload DMA test driver.\n");
- for (i, value) in TEST_VALUES.into_iter().enumerate() {
- let val0 = kernel::dma_read!(self.ca[i].h);
- let val1 = kernel::dma_read!(self.ca[i].b);
- assert!(val0.is_ok());
- assert!(val1.is_ok());
-
- if let Ok(val0) = val0 {
- assert_eq!(val0, value.0);
- }
- if let Ok(val1) = val1 {
- assert_eq!(val1, value.1);
- }
- }
+ assert!(self.check_dma().is_ok());
for (i, entry) in self.sgt.iter().enumerate() {
dev_info!(
diff --git a/samples/workqueue/stall_detector/Makefile b/samples/workqueue/stall_detector/Makefile
new file mode 100644
index 000000000000..8849e85e95bb
--- /dev/null
+++ b/samples/workqueue/stall_detector/Makefile
@@ -0,0 +1 @@
+obj-m += wq_stall.o
diff --git a/samples/workqueue/stall_detector/wq_stall.c b/samples/workqueue/stall_detector/wq_stall.c
new file mode 100644
index 000000000000..6f4a497b1881
--- /dev/null
+++ b/samples/workqueue/stall_detector/wq_stall.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * wq_stall - Test module for the workqueue stall detector.
+ *
+ * Deliberately creates a workqueue stall so the watchdog fires and
+ * prints diagnostic output. Useful for verifying that the stall
+ * detector correctly identifies stuck workers and produces useful
+ * backtraces.
+ *
+ * The stall is triggered by clearing PF_WQ_WORKER before sleeping,
+ * which hides the worker from the concurrency manager. A second
+ * work item queued on the same pool then sits in the worklist with
+ * no worker available to process it.
+ *
+ * After ~30s the workqueue watchdog fires:
+ * BUG: workqueue lockup - pool cpus=N ...
+ *
+ * Build:
+ * make -C <kernel tree> M=samples/workqueue/stall_detector modules
+ *
+ * Copyright (c) 2026 Meta Platforms, Inc. and affiliates.
+ * Copyright (c) 2026 Breno Leitao <leitao@debian.org>
+ */
+
+#include <linux/module.h>
+#include <linux/workqueue.h>
+#include <linux/wait.h>
+#include <linux/atomic.h>
+#include <linux/sched.h>
+
+static DECLARE_WAIT_QUEUE_HEAD(stall_wq_head);
+static atomic_t wake_condition = ATOMIC_INIT(0);
+static struct work_struct stall_work1;
+static struct work_struct stall_work2;
+
+static void stall_work2_fn(struct work_struct *work)
+{
+ pr_info("wq_stall: second work item finally ran\n");
+}
+
+static void stall_work1_fn(struct work_struct *work)
+{
+ pr_info("wq_stall: first work item running on cpu %d\n",
+ raw_smp_processor_id());
+
+ /*
+ * Queue second item while we're still counted as running
+ * (pool->nr_running > 0). Since schedule_work() on a per-CPU
+ * workqueue targets raw_smp_processor_id(), item 2 lands on the
+ * same pool. __queue_work -> kick_pool -> need_more_worker()
+ * sees nr_running > 0 and does NOT wake a new worker.
+ */
+ schedule_work(&stall_work2);
+
+ /*
+ * Hide from the workqueue concurrency manager. Without
+ * PF_WQ_WORKER, schedule() won't call wq_worker_sleeping(),
+ * so nr_running is never decremented and no replacement
+ * worker is created. Item 2 stays stuck in pool->worklist.
+ */
+ current->flags &= ~PF_WQ_WORKER;
+
+ pr_info("wq_stall: entering wait_event_idle (PF_WQ_WORKER cleared)\n");
+ pr_info("wq_stall: expect 'BUG: workqueue lockup' in ~30-60s\n");
+ wait_event_idle(stall_wq_head, atomic_read(&wake_condition) != 0);
+
+ /* Restore so process_one_work() cleanup works correctly */
+ current->flags |= PF_WQ_WORKER;
+ pr_info("wq_stall: woke up, PF_WQ_WORKER restored\n");
+}
+
+static int __init wq_stall_init(void)
+{
+ pr_info("wq_stall: loading\n");
+
+ INIT_WORK(&stall_work1, stall_work1_fn);
+ INIT_WORK(&stall_work2, stall_work2_fn);
+ schedule_work(&stall_work1);
+
+ return 0;
+}
+
+static void __exit wq_stall_exit(void)
+{
+ pr_info("wq_stall: unloading\n");
+ atomic_set(&wake_condition, 1);
+ wake_up(&stall_wq_head);
+ flush_work(&stall_work1);
+ flush_work(&stall_work2);
+ pr_info("wq_stall: all work flushed, module unloaded\n");
+}
+
+module_init(wq_stall_init);
+module_exit(wq_stall_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Reproduce workqueue stall caused by PF_WQ_WORKER misuse");
+MODULE_AUTHOR("Breno Leitao <leitao@debian.org>");
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 32e209bc7985..3652b85be545 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -310,16 +310,18 @@ $(obj)/%.lst: $(obj)/%.c FORCE
# The features in this list are the ones allowed for non-`rust/` code.
#
+# - Stable since Rust 1.79.0: `feature(slice_ptr_len)`.
# - Stable since Rust 1.81.0: `feature(lint_reasons)`.
# - Stable since Rust 1.82.0: `feature(asm_const)`,
# `feature(offset_of_nested)`, `feature(raw_ref_op)`.
+# - Stable since Rust 1.84.0: `feature(strict_provenance)`.
# - Stable since Rust 1.87.0: `feature(asm_goto)`.
# - Expected to become stable: `feature(arbitrary_self_types)`.
# - To be determined: `feature(used_with_arg)`.
#
# Please see https://github.com/Rust-for-Linux/linux/issues/2 for details on
# the unstable features in use.
-rust_allowed_features := asm_const,asm_goto,arbitrary_self_types,lint_reasons,offset_of_nested,raw_ref_op,used_with_arg
+rust_allowed_features := asm_const,asm_goto,arbitrary_self_types,lint_reasons,offset_of_nested,raw_ref_op,slice_ptr_len,strict_provenance,used_with_arg
# `--out-dir` is required to avoid temporaries being created by `rustc` in the
# current working directory, which may be not accessible in the out-of-tree
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 67cf6a0e17ba..5a64453da728 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2144,6 +2144,10 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
for (;;) {
long tout;
struct snd_pcm_runtime *to_check;
+ unsigned int drain_rate;
+ snd_pcm_uframes_t drain_bufsz;
+ bool drain_no_period_wakeup;
+
if (signal_pending(current)) {
result = -ERESTARTSYS;
break;
@@ -2163,16 +2167,25 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream,
snd_pcm_group_unref(group, substream);
if (!to_check)
break; /* all drained */
+ /*
+ * Cache the runtime fields needed after unlock.
+ * A concurrent close() on the linked stream may free
+ * its runtime via snd_pcm_detach_substream() once we
+ * release the stream lock below.
+ */
+ drain_no_period_wakeup = to_check->no_period_wakeup;
+ drain_rate = to_check->rate;
+ drain_bufsz = to_check->buffer_size;
init_waitqueue_entry(&wait, current);
set_current_state(TASK_INTERRUPTIBLE);
add_wait_queue(&to_check->sleep, &wait);
snd_pcm_stream_unlock_irq(substream);
- if (runtime->no_period_wakeup)
+ if (drain_no_period_wakeup)
tout = MAX_SCHEDULE_TIMEOUT;
else {
tout = 100;
- if (runtime->rate) {
- long t = runtime->buffer_size * 1100 / runtime->rate;
+ if (drain_rate) {
+ long t = drain_bufsz * 1100 / drain_rate;
tout = max(t, tout);
}
tout = msecs_to_jiffies(tout);
diff --git a/sound/hda/codecs/realtek/alc269.c b/sound/hda/codecs/realtek/alc269.c
index 4c49f1195e1b..ab4b22fcb72e 100644
--- a/sound/hda/codecs/realtek/alc269.c
+++ b/sound/hda/codecs/realtek/alc269.c
@@ -6940,6 +6940,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x89da, "HP Spectre x360 14t-ea100", ALC245_FIXUP_HP_SPECTRE_X360_EU0XXX),
SND_PCI_QUIRK(0x103c, 0x89e7, "HP Elite x2 G9", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED),
SND_PCI_QUIRK(0x103c, 0x8a0f, "HP Pavilion 14-ec1xxx", ALC287_FIXUP_HP_GPIO_LED),
+ SND_PCI_QUIRK(0x103c, 0x8a1f, "HP Laptop 14s-dr5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x8a20, "HP Laptop 15s-fq5xxx", ALC236_FIXUP_HP_MUTE_LED_COEFBIT2),
SND_PCI_QUIRK(0x103c, 0x8a25, "HP Victus 16-d1xxx (MB 8A25)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
SND_PCI_QUIRK(0x103c, 0x8a26, "HP Victus 16-d1xxx (MB 8A26)", ALC245_FIXUP_HP_MUTE_LED_COEFBIT),
@@ -7273,6 +7274,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x1e93, "ASUS ExpertBook B9403CVAR", ALC294_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x1eb3, "ASUS Ally RCLA72", ALC287_FIXUP_TAS2781_I2C),
SND_PCI_QUIRK(0x1043, 0x1ed3, "ASUS HN7306W", ALC287_FIXUP_CS35L41_I2C_2),
+ HDA_CODEC_QUIRK(0x1043, 0x1ee2, "ASUS UM6702RA/RC", ALC285_FIXUP_ASUS_I2C_SPEAKER2_TO_DAC1),
SND_PCI_QUIRK(0x1043, 0x1ee2, "ASUS UM6702RA/RC", ALC287_FIXUP_CS35L41_I2C_2),
SND_PCI_QUIRK(0x1043, 0x1c52, "ASUS Zephyrus G15 2022", ALC289_FIXUP_ASUS_GA401),
SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401),
@@ -7493,6 +7495,7 @@ static const struct hda_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x2288, "Thinkpad X390", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK),
SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
SND_PCI_QUIRK(0x17aa, 0x22be, "Thinkpad X1 Carbon 8th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
SND_PCI_QUIRK(0x17aa, 0x22c1, "Thinkpad P1 Gen 3", ALC285_FIXUP_THINKPAD_NO_BASS_SPK_HEADSET_JACK),
diff --git a/sound/hda/codecs/realtek/alc662.c b/sound/hda/codecs/realtek/alc662.c
index 5073165d1f3c..3a943adf9087 100644
--- a/sound/hda/codecs/realtek/alc662.c
+++ b/sound/hda/codecs/realtek/alc662.c
@@ -313,6 +313,7 @@ enum {
ALC897_FIXUP_HEADSET_MIC_PIN2,
ALC897_FIXUP_UNIS_H3C_X500S,
ALC897_FIXUP_HEADSET_MIC_PIN3,
+ ALC897_FIXUP_H610M_HP_PIN,
};
static const struct hda_fixup alc662_fixups[] = {
@@ -766,6 +767,13 @@ static const struct hda_fixup alc662_fixups[] = {
{ }
},
},
+ [ALC897_FIXUP_H610M_HP_PIN] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x0321403f }, /* HP out */
+ { }
+ },
+ },
};
static const struct hda_quirk alc662_fixup_tbl[] = {
@@ -815,6 +823,7 @@ static const struct hda_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x8469, "ASUS mobo", ALC662_FIXUP_NO_JACK_DETECT),
SND_PCI_QUIRK(0x105b, 0x0cd6, "Foxconn", ALC662_FIXUP_ASUS_MODE2),
SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
+ SND_PCI_QUIRK(0x1458, 0xa194, "H610M H V2 DDR4", ALC897_FIXUP_H610M_HP_PIN),
SND_PCI_QUIRK(0x14cd, 0x5003, "USI", ALC662_FIXUP_USI_HEADSET_MODE),
SND_PCI_QUIRK(0x17aa, 0x1036, "Lenovo P520", ALC662_FIXUP_LENOVO_MULTI_CODECS),
SND_PCI_QUIRK(0x17aa, 0x1057, "Lenovo P360", ALC897_FIXUP_HEADSET_MIC_PIN),
diff --git a/sound/soc/amd/acp/acp-mach-common.c b/sound/soc/amd/acp/acp-mach-common.c
index 4d99472c75ba..09f6c9a2c041 100644
--- a/sound/soc/amd/acp/acp-mach-common.c
+++ b/sound/soc/amd/acp/acp-mach-common.c
@@ -127,8 +127,13 @@ static int acp_card_rt5682_init(struct snd_soc_pcm_runtime *rtd)
if (drvdata->hs_codec_id != RT5682)
return -EINVAL;
- drvdata->wclk = clk_get(component->dev, "rt5682-dai-wclk");
- drvdata->bclk = clk_get(component->dev, "rt5682-dai-bclk");
+ drvdata->wclk = devm_clk_get(component->dev, "rt5682-dai-wclk");
+ if (IS_ERR(drvdata->wclk))
+ return PTR_ERR(drvdata->wclk);
+
+ drvdata->bclk = devm_clk_get(component->dev, "rt5682-dai-bclk");
+ if (IS_ERR(drvdata->bclk))
+ return PTR_ERR(drvdata->bclk);
ret = snd_soc_dapm_new_controls(dapm, rt5682_widgets,
ARRAY_SIZE(rt5682_widgets));
@@ -370,8 +375,13 @@ static int acp_card_rt5682s_init(struct snd_soc_pcm_runtime *rtd)
return -EINVAL;
if (!drvdata->soc_mclk) {
- drvdata->wclk = clk_get(component->dev, "rt5682-dai-wclk");
- drvdata->bclk = clk_get(component->dev, "rt5682-dai-bclk");
+ drvdata->wclk = devm_clk_get(component->dev, "rt5682-dai-wclk");
+ if (IS_ERR(drvdata->wclk))
+ return PTR_ERR(drvdata->wclk);
+
+ drvdata->bclk = devm_clk_get(component->dev, "rt5682-dai-bclk");
+ if (IS_ERR(drvdata->bclk))
+ return PTR_ERR(drvdata->bclk);
}
ret = snd_soc_dapm_new_controls(dapm, rt5682s_widgets,
diff --git a/sound/soc/amd/acp3x-rt5682-max9836.c b/sound/soc/amd/acp3x-rt5682-max9836.c
index 4ca1978020a9..d1eb6f12a183 100644
--- a/sound/soc/amd/acp3x-rt5682-max9836.c
+++ b/sound/soc/amd/acp3x-rt5682-max9836.c
@@ -94,8 +94,13 @@ static int acp3x_5682_init(struct snd_soc_pcm_runtime *rtd)
return ret;
}
- rt5682_dai_wclk = clk_get(component->dev, "rt5682-dai-wclk");
- rt5682_dai_bclk = clk_get(component->dev, "rt5682-dai-bclk");
+ rt5682_dai_wclk = devm_clk_get(component->dev, "rt5682-dai-wclk");
+ if (IS_ERR(rt5682_dai_wclk))
+ return PTR_ERR(rt5682_dai_wclk);
+
+ rt5682_dai_bclk = devm_clk_get(component->dev, "rt5682-dai-bclk");
+ if (IS_ERR(rt5682_dai_bclk))
+ return PTR_ERR(rt5682_dai_bclk);
ret = snd_soc_card_jack_new_pins(card, "Headset Jack",
SND_JACK_HEADSET |
diff --git a/sound/soc/codecs/rt1011.c b/sound/soc/codecs/rt1011.c
index 9f34a6a35487..03f31d9d916e 100644
--- a/sound/soc/codecs/rt1011.c
+++ b/sound/soc/codecs/rt1011.c
@@ -1047,7 +1047,7 @@ static int rt1011_recv_spk_mode_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
- struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_to_dapm(kcontrol);
+ struct snd_soc_dapm_context *dapm = snd_soc_component_to_dapm(component);
struct rt1011_priv *rt1011 =
snd_soc_component_get_drvdata(component);
diff --git a/sound/soc/generic/simple-card-utils.c b/sound/soc/generic/simple-card-utils.c
index bdc02e85b089..9e5be0eaa77f 100644
--- a/sound/soc/generic/simple-card-utils.c
+++ b/sound/soc/generic/simple-card-utils.c
@@ -1038,11 +1038,15 @@ int graph_util_is_ports0(struct device_node *np)
else
port = np;
- struct device_node *ports __free(device_node) = of_get_parent(port);
- struct device_node *top __free(device_node) = of_get_parent(ports);
- struct device_node *ports0 __free(device_node) = of_get_child_by_name(top, "ports");
+ struct device_node *ports __free(device_node) = of_get_parent(port);
+ const char *at = strchr(kbasename(ports->full_name), '@');
- return ports0 == ports;
+ /*
+ * Since child iteration order may differ
+ * between a base DT and DT overlays,
+ * string match "ports" or "ports@0" in the node name instead.
+ */
+ return !at || !strcmp(at, "@0");
}
EXPORT_SYMBOL_GPL(graph_util_is_ports0);
diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c b/sound/soc/qcom/qdsp6/q6apm-dai.c
index de3bdac3e791..168c166c960d 100644
--- a/sound/soc/qcom/qdsp6/q6apm-dai.c
+++ b/sound/soc/qcom/qdsp6/q6apm-dai.c
@@ -838,6 +838,7 @@ static const struct snd_soc_component_driver q6apm_fe_dai_component = {
.ack = q6apm_dai_ack,
.compress_ops = &q6apm_dai_compress_ops,
.use_dai_pcm_id = true,
+ .remove_order = SND_SOC_COMP_ORDER_EARLY,
};
static int q6apm_dai_probe(struct platform_device *pdev)
diff --git a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
index 528756f1332b..5be37eeea329 100644
--- a/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
+++ b/sound/soc/qcom/qdsp6/q6apm-lpass-dais.c
@@ -278,6 +278,7 @@ static const struct snd_soc_component_driver q6apm_lpass_dai_component = {
.of_xlate_dai_name = q6dsp_audio_ports_of_xlate_dai_name,
.be_pcm_base = AUDIOREACH_BE_PCM_BASE,
.use_dai_pcm_id = true,
+ .remove_order = SND_SOC_COMP_ORDER_FIRST,
};
static int q6apm_lpass_dai_dev_probe(struct platform_device *pdev)
diff --git a/sound/soc/qcom/qdsp6/q6apm.c b/sound/soc/qcom/qdsp6/q6apm.c
index 44841fde3856..970b08c89bb3 100644
--- a/sound/soc/qcom/qdsp6/q6apm.c
+++ b/sound/soc/qcom/qdsp6/q6apm.c
@@ -715,6 +715,7 @@ static const struct snd_soc_component_driver q6apm_audio_component = {
.name = APM_AUDIO_DRV_NAME,
.probe = q6apm_audio_probe,
.remove = q6apm_audio_remove,
+ .remove_order = SND_SOC_COMP_ORDER_LAST,
};
static int apm_probe(gpr_device_t *gdev)
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index d0fffef65daf..573693e21780 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -462,8 +462,7 @@ static void soc_free_pcm_runtime(struct snd_soc_pcm_runtime *rtd)
list_del(&rtd->list);
- if (delayed_work_pending(&rtd->delayed_work))
- flush_delayed_work(&rtd->delayed_work);
+ flush_delayed_work(&rtd->delayed_work);
snd_soc_pcm_component_free(rtd);
/*
@@ -1864,12 +1863,15 @@ static void cleanup_dmi_name(char *name)
/*
* Check if a DMI field is valid, i.e. not containing any string
- * in the black list.
+ * in the black list and not the empty string.
*/
static int is_dmi_valid(const char *field)
{
int i = 0;
+ if (!field[0])
+ return 0;
+
while (dmi_blacklist[i]) {
if (strstr(field, dmi_blacklist[i]))
return 0;
@@ -2122,6 +2124,9 @@ static void soc_cleanup_card_resources(struct snd_soc_card *card)
for_each_card_rtds(card, rtd)
if (rtd->initialized)
snd_soc_link_exit(rtd);
+ /* flush delayed work before removing DAIs and DAPM widgets */
+ snd_soc_flush_all_delayed_work(card);
+
/* remove and free each DAI */
soc_remove_link_dais(card);
soc_remove_link_components(card);
diff --git a/sound/soc/tegra/tegra_audio_graph_card.c b/sound/soc/tegra/tegra_audio_graph_card.c
index 94b5ab77649b..ea10e6e8a9fe 100644
--- a/sound/soc/tegra/tegra_audio_graph_card.c
+++ b/sound/soc/tegra/tegra_audio_graph_card.c
@@ -231,6 +231,15 @@ static const struct tegra_audio_cdata tegra186_data = {
.plla_out0_rates[x11_RATE] = 45158400,
};
+static const struct tegra_audio_cdata tegra238_data = {
+ /* PLLA */
+ .plla_rates[x8_RATE] = 1277952000,
+ .plla_rates[x11_RATE] = 1264435200,
+ /* PLLA_OUT0 */
+ .plla_out0_rates[x8_RATE] = 49152000,
+ .plla_out0_rates[x11_RATE] = 45158400,
+};
+
static const struct tegra_audio_cdata tegra264_data = {
/* PLLA1 */
.plla_rates[x8_RATE] = 983040000,
@@ -245,6 +254,8 @@ static const struct of_device_id graph_of_tegra_match[] = {
.data = &tegra210_data },
{ .compatible = "nvidia,tegra186-audio-graph-card",
.data = &tegra186_data },
+ { .compatible = "nvidia,tegra238-audio-graph-card",
+ .data = &tegra238_data },
{ .compatible = "nvidia,tegra264-audio-graph-card",
.data = &tegra264_data },
{},
diff --git a/sound/usb/mixer_scarlett2.c b/sound/usb/mixer_scarlett2.c
index ef3150581eab..fd1fb668929a 100644
--- a/sound/usb/mixer_scarlett2.c
+++ b/sound/usb/mixer_scarlett2.c
@@ -8251,6 +8251,8 @@ static int scarlett2_find_fc_interface(struct usb_device *dev,
if (desc->bInterfaceClass != 255)
continue;
+ if (desc->bNumEndpoints < 1)
+ continue;
epd = get_endpoint(intf->altsetting, 0);
private->bInterfaceNumber = desc->bInterfaceNumber;
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index d54a1a44a69b..049a94079f9e 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -2243,6 +2243,8 @@ static const struct usb_audio_quirk_flags_table quirk_flags_table[] = {
QUIRK_FLAG_IFACE_DELAY | QUIRK_FLAG_FORCE_IFACE_RESET),
DEVICE_FLG(0x0661, 0x0883, /* iBasso DC04 Ultra */
QUIRK_FLAG_DSD_RAW),
+ DEVICE_FLG(0x0666, 0x0880, /* SPACETOUCH USB Audio */
+ QUIRK_FLAG_FORCE_IFACE_RESET | QUIRK_FLAG_IFACE_DELAY),
DEVICE_FLG(0x06f8, 0xb000, /* Hercules DJ Console (Windows Edition) */
QUIRK_FLAG_IGNORE_CTL_ERROR),
DEVICE_FLG(0x06f8, 0xd002, /* Hercules DJ Console (Macintosh Edition) */
diff --git a/tools/arch/x86/include/asm/amd/ibs.h b/tools/arch/x86/include/asm/amd/ibs.h
index cbce54fec7b9..41e8abd72c8b 100644
--- a/tools/arch/x86/include/asm/amd/ibs.h
+++ b/tools/arch/x86/include/asm/amd/ibs.h
@@ -110,7 +110,7 @@ union ibs_op_data3 {
__u64 ld_op:1, /* 0: load op */
st_op:1, /* 1: store op */
dc_l1tlb_miss:1, /* 2: data cache L1TLB miss */
- dc_l2tlb_miss:1, /* 3: data cache L2TLB hit in 2M page */
+ dc_l2tlb_miss:1, /* 3: data cache L2TLB miss in 2M page */
dc_l1tlb_hit_2m:1, /* 4: data cache L1TLB hit in 2M page */
dc_l1tlb_hit_1g:1, /* 5: data cache L1TLB hit in 1G page */
dc_l2tlb_hit_2m:1, /* 6: data cache L2TLB hit in 2M page */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index c3b53beb1300..dbe104df339b 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -84,7 +84,7 @@
#define X86_FEATURE_PEBS ( 3*32+12) /* "pebs" Precise-Event Based Sampling */
#define X86_FEATURE_BTS ( 3*32+13) /* "bts" Branch Trace Store */
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* syscall in IA32 userspace */
-#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* sysenter in IA32 userspace */
+#define X86_FEATURE_SYSFAST32 ( 3*32+15) /* sysenter/syscall in IA32 userspace */
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* "rep_good" REP microcode works well */
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* "amd_lbr_v2" AMD Last Branch Record Extension Version 2 */
#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* Clear CPU buffers using VERW */
@@ -326,6 +326,7 @@
#define X86_FEATURE_AMX_FP16 (12*32+21) /* AMX fp16 Support */
#define X86_FEATURE_AVX_IFMA (12*32+23) /* Support for VPMADD52[H,L]UQ */
#define X86_FEATURE_LAM (12*32+26) /* "lam" Linear Address Masking */
+#define X86_FEATURE_MOVRS (12*32+31) /* MOVRS instructions */
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
#define X86_FEATURE_CLZERO (13*32+ 0) /* "clzero" CLZERO instruction */
@@ -472,6 +473,7 @@
#define X86_FEATURE_GP_ON_USER_CPUID (20*32+17) /* User CPUID faulting */
#define X86_FEATURE_PREFETCHI (20*32+20) /* Prefetch Data/Instruction to Cache Level */
+#define X86_FEATURE_ERAPS (20*32+24) /* Enhanced Return Address Predictor Security */
#define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
#define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index 43adc38d31d5..da5275d8eda6 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -263,6 +263,11 @@
#define MSR_SNOOP_RSP_0 0x00001328
#define MSR_SNOOP_RSP_1 0x00001329
+#define MSR_OMR_0 0x000003e0
+#define MSR_OMR_1 0x000003e1
+#define MSR_OMR_2 0x000003e2
+#define MSR_OMR_3 0x000003e3
+
#define MSR_LBR_SELECT 0x000001c8
#define MSR_LBR_TOS 0x000001c9
@@ -1219,6 +1224,7 @@
#define MSR_CORE_PERF_GLOBAL_STATUS 0x0000038e
#define MSR_CORE_PERF_GLOBAL_CTRL 0x0000038f
#define MSR_CORE_PERF_GLOBAL_OVF_CTRL 0x00000390
+#define MSR_CORE_PERF_GLOBAL_STATUS_SET 0x00000391
#define MSR_PERF_METRICS 0x00000329
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index 7ceff6583652..846a63215ce1 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -503,6 +503,7 @@ struct kvm_sync_regs {
#define KVM_X86_GRP_SEV 1
# define KVM_X86_SEV_VMSA_FEATURES 0
# define KVM_X86_SNP_POLICY_BITS 1
+# define KVM_X86_SEV_SNP_REQ_CERTS 2
struct kvm_vmx_nested_state_data {
__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
@@ -743,6 +744,7 @@ enum sev_cmd_id {
KVM_SEV_SNP_LAUNCH_START = 100,
KVM_SEV_SNP_LAUNCH_UPDATE,
KVM_SEV_SNP_LAUNCH_FINISH,
+ KVM_SEV_SNP_ENABLE_REQ_CERTS,
KVM_SEV_NR_MAX,
};
@@ -914,8 +916,10 @@ struct kvm_sev_snp_launch_finish {
__u64 pad1[4];
};
-#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
-#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
+#define KVM_X2APIC_API_USE_32BIT_IDS _BITULL(0)
+#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK _BITULL(1)
+#define KVM_X2APIC_ENABLE_SUPPRESS_EOI_BROADCAST _BITULL(2)
+#define KVM_X2APIC_DISABLE_SUPPRESS_EOI_BROADCAST _BITULL(3)
struct kvm_hyperv_eventfd {
__u32 conn_id;
diff --git a/tools/bootconfig/samples/bad-non-closed-brace.bconf b/tools/bootconfig/samples/bad-non-closed-brace.bconf
new file mode 100644
index 000000000000..6ed9f3363dde
--- /dev/null
+++ b/tools/bootconfig/samples/bad-non-closed-brace.bconf
@@ -0,0 +1,4 @@
+foo {
+ bar {
+ buz
+ }
diff --git a/tools/bootconfig/samples/bad-over-max-brace.bconf b/tools/bootconfig/samples/bad-over-max-brace.bconf
new file mode 100644
index 000000000000..74b5dc9e21dc
--- /dev/null
+++ b/tools/bootconfig/samples/bad-over-max-brace.bconf
@@ -0,0 +1,19 @@
+key1 {
+key2 {
+key3 {
+key4 {
+key5 {
+key6 {
+key7 {
+key8 {
+key9 {
+key10 {
+key11 {
+key12 {
+key13 {
+key14 {
+key15 {
+key16 {
+key17 {
+}}}}}}}}}}}}}}}}}
+
diff --git a/tools/bootconfig/samples/exp-good-nested-brace.bconf b/tools/bootconfig/samples/exp-good-nested-brace.bconf
new file mode 100644
index 000000000000..19e0f51b4553
--- /dev/null
+++ b/tools/bootconfig/samples/exp-good-nested-brace.bconf
@@ -0,0 +1 @@
+key1.key2.key3.key4.key5.key6.key7.key8.key9.key10.key11.key12.key13.key14.key15.key16;
diff --git a/tools/bootconfig/samples/good-nested-brace.bconf b/tools/bootconfig/samples/good-nested-brace.bconf
new file mode 100644
index 000000000000..980d094f296e
--- /dev/null
+++ b/tools/bootconfig/samples/good-nested-brace.bconf
@@ -0,0 +1,18 @@
+key1 {
+key2 {
+key3 {
+key4 {
+key5 {
+key6 {
+key7 {
+key8 {
+key9 {
+key10 {
+key11 {
+key12 {
+key13 {
+key14 {
+key15 {
+key16 {
+}}}}}}}}}}}}}}}}
+
diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh
index be9bd18b1d56..fc69f815ce4a 100755
--- a/tools/bootconfig/test-bootconfig.sh
+++ b/tools/bootconfig/test-bootconfig.sh
@@ -171,6 +171,15 @@ $BOOTCONF $INITRD > $OUTFILE
xfail grep -q 'val[[:space:]]' $OUTFILE
xpass grep -q 'val2[[:space:]]' $OUTFILE
+echo "Showing correct line:column of no closing brace"
+cat > $TEMPCONF << EOF
+foo {
+bar {
+}
+EOF
+$BOOTCONF -a $TEMPCONF $INITRD 2> $OUTFILE
+xpass grep -q "1:1" $OUTFILE
+
echo "=== expected failure cases ==="
for i in samples/bad-* ; do
xfail $BOOTCONF -a $i $INITRD
diff --git a/tools/build/Build.include b/tools/build/Build.include
index e45b2eb0d24a..cd0baa7a168d 100644
--- a/tools/build/Build.include
+++ b/tools/build/Build.include
@@ -99,6 +99,15 @@ c_flags = $(filter-out $(CFLAGS_REMOVE_$(obj)), $(c_flags_2))
cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXXFLAGS_$(basetarget).o) $(CXXFLAGS_$(obj))
###
+# Rust flags to be used on rule definition, includes:
+# - global $(RUST_FLAGS)
+# - per target Rust flags
+# - per object Rust flags
+rust_flags_1 = $(RUST_FLAGS) $(RUST_FLAGS_$(basetarget).o) $(RUST_FLAGS_$(obj))
+rust_flags_2 = $(filter-out $(RUST_FLAGS_REMOVE_$(basetarget).o), $(rust_flags_1))
+rust_flags = $(filter-out $(RUST_FLAGS_REMOVE_$(obj)), $(rust_flags_2))
+
+###
## HOSTCC C flags
host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj))
diff --git a/tools/build/Makefile.build b/tools/build/Makefile.build
index 60e65870eae1..ad69efdd4e85 100644
--- a/tools/build/Makefile.build
+++ b/tools/build/Makefile.build
@@ -70,11 +70,13 @@ quiet_cmd_gen = GEN $@
# If there's nothing to link, create empty $@ object.
quiet_cmd_ld_multi = LD $@
cmd_ld_multi = $(if $(strip $(obj-y)),\
- $(LD) -r -o $@ $(filter $(obj-y),$^),rm -f $@; $(AR) rcs $@)
+ printf "$(objprefix)%s " $(patsubst $(objprefix)%,%,$(filter $(obj-y),$^)) | \
+ xargs $(LD) -r -o $@,rm -f $@; $(AR) rcs $@)
quiet_cmd_host_ld_multi = HOSTLD $@
cmd_host_ld_multi = $(if $(strip $(obj-y)),\
- $(HOSTLD) -r -o $@ $(filter $(obj-y),$^),rm -f $@; $(HOSTAR) rcs $@)
+ printf "$(objprefix)%s " $(patsubst $(objprefix)%,%,$(filter $(obj-y),$^)) | \
+ xargs $(HOSTLD) -r -o $@,rm -f $@; $(HOSTAR) rcs $@)
rust_common_cmd = \
$(RUSTC) $(rust_flags) \
diff --git a/tools/include/linux/coresight-pmu.h b/tools/include/linux/coresight-pmu.h
index 89b0ac0014b0..2e179abe472a 100644
--- a/tools/include/linux/coresight-pmu.h
+++ b/tools/include/linux/coresight-pmu.h
@@ -22,30 +22,6 @@
#define CORESIGHT_LEGACY_CPU_TRACE_ID(cpu) (0x10 + (cpu * 2))
/*
- * Below are the definition of bit offsets for perf option, and works as
- * arbitrary values for all ETM versions.
- *
- * Most of them are orignally from ETMv3.5/PTM's ETMCR config, therefore,
- * ETMv3.5/PTM doesn't define ETMCR config bits with prefix "ETM3_" and
- * directly use below macros as config bits.
- */
-#define ETM_OPT_BRANCH_BROADCAST 8
-#define ETM_OPT_CYCACC 12
-#define ETM_OPT_CTXTID 14
-#define ETM_OPT_CTXTID2 15
-#define ETM_OPT_TS 28
-#define ETM_OPT_RETSTK 29
-
-/* ETMv4 CONFIGR programming bits for the ETM OPTs */
-#define ETM4_CFG_BIT_BB 3
-#define ETM4_CFG_BIT_CYCACC 4
-#define ETM4_CFG_BIT_CTXTID 6
-#define ETM4_CFG_BIT_VMID 7
-#define ETM4_CFG_BIT_TS 11
-#define ETM4_CFG_BIT_RETSTK 12
-#define ETM4_CFG_BIT_VMID_OPT 15
-
-/*
* Interpretation of the PERF_RECORD_AUX_OUTPUT_HW_ID payload.
* Used to associate a CPU with the CoreSight Trace ID.
* [07:00] - Trace ID - uses 8 bits to make value easy to read in file.
diff --git a/tools/include/linux/gfp_types.h b/tools/include/linux/gfp_types.h
index 3de43b12209e..6c75df30a281 100644
--- a/tools/include/linux/gfp_types.h
+++ b/tools/include/linux/gfp_types.h
@@ -139,6 +139,8 @@ enum {
* %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
*
* %__GFP_NO_OBJ_EXT causes slab allocation to have no object extension.
+ * mark_obj_codetag_empty() should be called upon freeing for objects allocated
+ * with this flag to indicate that their NULL tags are expected and normal.
*/
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
@@ -309,8 +311,10 @@ enum {
*
* %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
* watermark is applied to allow access to "atomic reserves".
- * The current implementation doesn't support NMI and few other strict
- * non-preemptive contexts (e.g. raw_spin_lock). The same applies to %GFP_NOWAIT.
+ * The current implementation doesn't support NMI, nor contexts that disable
+ * preemption under PREEMPT_RT. This includes raw_spin_lock() and plain
+ * preempt_disable() - see "Memory allocation" in
+ * Documentation/core-api/real-time/differences.rst for more info.
*
* %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
* %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
@@ -321,6 +325,7 @@ enum {
* %GFP_NOWAIT is for kernel allocations that should not stall for direct
* reclaim, start physical IO or use any filesystem callback. It is very
* likely to fail to allocate memory, even for very small allocations.
+ * The same restrictions on calling contexts apply as for %GFP_ATOMIC.
*
* %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
* that do not require the starting of any physical IO.
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 942370b3f5d2..a627acc8fb5f 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -860,8 +860,11 @@ __SYSCALL(__NR_file_setattr, sys_file_setattr)
#define __NR_listns 470
__SYSCALL(__NR_listns, sys_listns)
+#define __NR_rseq_slice_yield 471
+__SYSCALL(__NR_rseq_slice_yield, sys_rseq_slice_yield)
+
#undef __NR_syscalls
-#define __NR_syscalls 471
+#define __NR_syscalls 472
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index dddb781b0507..65500f5db379 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -135,6 +135,12 @@ struct kvm_xen_exit {
} u;
};
+struct kvm_exit_snp_req_certs {
+ __u64 gpa;
+ __u64 npages;
+ __u64 ret;
+};
+
#define KVM_S390_GET_SKEYS_NONE 1
#define KVM_S390_SKEYS_MAX 1048576
@@ -180,6 +186,8 @@ struct kvm_xen_exit {
#define KVM_EXIT_MEMORY_FAULT 39
#define KVM_EXIT_TDX 40
#define KVM_EXIT_ARM_SEA 41
+#define KVM_EXIT_ARM_LDST64B 42
+#define KVM_EXIT_SNP_REQ_CERTS 43
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -402,7 +410,7 @@ struct kvm_run {
} eoi;
/* KVM_EXIT_HYPERV */
struct kvm_hyperv_exit hyperv;
- /* KVM_EXIT_ARM_NISV */
+ /* KVM_EXIT_ARM_NISV / KVM_EXIT_ARM_LDST64B */
struct {
__u64 esr_iss;
__u64 fault_ipa;
@@ -482,6 +490,8 @@ struct kvm_run {
__u64 gva;
__u64 gpa;
} arm_sea;
+ /* KVM_EXIT_SNP_REQ_CERTS */
+ struct kvm_exit_snp_req_certs snp_req_certs;
/* Fix the size of the union. */
char padding[256];
};
@@ -974,6 +984,7 @@ struct kvm_enable_cap {
#define KVM_CAP_GUEST_MEMFD_FLAGS 244
#define KVM_CAP_ARM_SEA_TO_USER 245
#define KVM_CAP_S390_USER_OPEREXEC 246
+#define KVM_CAP_S390_KEYOP 247
struct kvm_irq_routing_irqchip {
__u32 irqchip;
@@ -1219,6 +1230,16 @@ struct kvm_vfio_spapr_tce {
__s32 tablefd;
};
+#define KVM_S390_KEYOP_ISKE 0x01
+#define KVM_S390_KEYOP_RRBE 0x02
+#define KVM_S390_KEYOP_SSKE 0x03
+struct kvm_s390_keyop {
+ __u64 guest_addr;
+ __u8 key;
+ __u8 operation;
+ __u8 pad[6];
+};
+
/*
* KVM_CREATE_VCPU receives as a parameter the vcpu slot, and returns
* a vcpu fd.
@@ -1238,6 +1259,7 @@ struct kvm_vfio_spapr_tce {
#define KVM_S390_UCAS_MAP _IOW(KVMIO, 0x50, struct kvm_s390_ucas_mapping)
#define KVM_S390_UCAS_UNMAP _IOW(KVMIO, 0x51, struct kvm_s390_ucas_mapping)
#define KVM_S390_VCPU_FAULT _IOW(KVMIO, 0x52, unsigned long)
+#define KVM_S390_KEYOP _IOWR(KVMIO, 0x53, struct kvm_s390_keyop)
/* Device model IOC */
#define KVM_CREATE_IRQCHIP _IO(KVMIO, 0x60)
diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h
index 76e9d0664d0c..fd10aa8d697f 100644
--- a/tools/include/uapi/linux/perf_event.h
+++ b/tools/include/uapi/linux/perf_event.h
@@ -1396,7 +1396,7 @@ union perf_mem_data_src {
#define PERF_MEM_LVLNUM_L4 0x0004 /* L4 */
#define PERF_MEM_LVLNUM_L2_MHB 0x0005 /* L2 Miss Handling Buffer */
#define PERF_MEM_LVLNUM_MSC 0x0006 /* Memory-side Cache */
-#define PERF_MEM_LVLNUM_L0 0x0007 /* L0 */
+#define PERF_MEM_LVLNUM_L0 0x0007 /* L0 */
#define PERF_MEM_LVLNUM_UNC 0x0008 /* Uncached */
#define PERF_MEM_LVLNUM_CXL 0x0009 /* CXL */
#define PERF_MEM_LVLNUM_IO 0x000a /* I/O */
diff --git a/tools/objtool/Makefile b/tools/objtool/Makefile
index 76bcd4e85de3..b71d1886022e 100644
--- a/tools/objtool/Makefile
+++ b/tools/objtool/Makefile
@@ -13,7 +13,7 @@ endif
ifeq ($(ARCH_HAS_KLP),y)
HAVE_XXHASH = $(shell printf "$(pound)include <xxhash.h>\nXXH3_state_t *state;int main() {}" | \
- $(HOSTCC) -xc - -o /dev/null -lxxhash 2> /dev/null && echo y || echo n)
+ $(HOSTCC) $(HOSTCFLAGS) -xc - -o /dev/null -lxxhash 2> /dev/null && echo y || echo n)
ifeq ($(HAVE_XXHASH),y)
BUILD_KLP := y
LIBXXHASH_CFLAGS := $(shell $(HOSTPKG_CONFIG) libxxhash --cflags 2>/dev/null) \
diff --git a/tools/objtool/arch/x86/decode.c b/tools/objtool/arch/x86/decode.c
index 73bfea220d1b..c5817829cdfa 100644
--- a/tools/objtool/arch/x86/decode.c
+++ b/tools/objtool/arch/x86/decode.c
@@ -395,52 +395,36 @@ int arch_decode_instruction(struct objtool_file *file, const struct section *sec
if (!rex_w)
break;
- if (modrm_reg == CFI_SP) {
-
- if (mod_is_reg()) {
- /* mov %rsp, reg */
- ADD_OP(op) {
- op->src.type = OP_SRC_REG;
- op->src.reg = CFI_SP;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = modrm_rm;
- }
- break;
-
- } else {
- /* skip RIP relative displacement */
- if (is_RIP())
- break;
-
- /* skip nontrivial SIB */
- if (have_SIB()) {
- modrm_rm = sib_base;
- if (sib_index != CFI_SP)
- break;
- }
-
- /* mov %rsp, disp(%reg) */
- ADD_OP(op) {
- op->src.type = OP_SRC_REG;
- op->src.reg = CFI_SP;
- op->dest.type = OP_DEST_REG_INDIRECT;
- op->dest.reg = modrm_rm;
- op->dest.offset = ins.displacement.value;
- }
- break;
+ if (mod_is_reg()) {
+ /* mov reg, reg */
+ ADD_OP(op) {
+ op->src.type = OP_SRC_REG;
+ op->src.reg = modrm_reg;
+ op->dest.type = OP_DEST_REG;
+ op->dest.reg = modrm_rm;
}
-
break;
}
- if (rm_is_reg(CFI_SP)) {
+ /* skip RIP relative displacement */
+ if (is_RIP())
+ break;
- /* mov reg, %rsp */
+ /* skip nontrivial SIB */
+ if (have_SIB()) {
+ modrm_rm = sib_base;
+ if (sib_index != CFI_SP)
+ break;
+ }
+
+ /* mov %rsp, disp(%reg) */
+ if (modrm_reg == CFI_SP) {
ADD_OP(op) {
op->src.type = OP_SRC_REG;
- op->src.reg = modrm_reg;
- op->dest.type = OP_DEST_REG;
- op->dest.reg = CFI_SP;
+ op->src.reg = CFI_SP;
+ op->dest.type = OP_DEST_REG_INDIRECT;
+ op->dest.reg = modrm_rm;
+ op->dest.offset = ins.displacement.value;
}
break;
}
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index a30379e4ff97..91b3ff4803cf 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -3000,6 +3000,20 @@ static int update_cfi_state(struct instruction *insn,
cfi->stack_size += 8;
}
+ else if (cfi->vals[op->src.reg].base == CFI_CFA) {
+ /*
+ * Clang RSP musical chairs:
+ *
+ * mov %rsp, %rdx [handled above]
+ * ...
+ * mov %rdx, %rbx [handled here]
+ * ...
+ * mov %rbx, %rsp [handled above]
+ */
+ cfi->vals[op->dest.reg].base = CFI_CFA;
+ cfi->vals[op->dest.reg].offset = cfi->vals[op->src.reg].offset;
+ }
+
break;
@@ -3734,7 +3748,7 @@ static void checksum_update_insn(struct objtool_file *file, struct symbol *func,
static int validate_branch(struct objtool_file *file, struct symbol *func,
struct instruction *insn, struct insn_state state);
static int do_validate_branch(struct objtool_file *file, struct symbol *func,
- struct instruction *insn, struct insn_state state);
+ struct instruction *insn, struct insn_state *state);
static int validate_insn(struct objtool_file *file, struct symbol *func,
struct instruction *insn, struct insn_state *statep,
@@ -3999,7 +4013,7 @@ static int validate_insn(struct objtool_file *file, struct symbol *func,
* tools/objtool/Documentation/objtool.txt.
*/
static int do_validate_branch(struct objtool_file *file, struct symbol *func,
- struct instruction *insn, struct insn_state state)
+ struct instruction *insn, struct insn_state *state)
{
struct instruction *next_insn, *prev_insn = NULL;
bool dead_end;
@@ -4030,7 +4044,7 @@ static int do_validate_branch(struct objtool_file *file, struct symbol *func,
return 1;
}
- ret = validate_insn(file, func, insn, &state, prev_insn, next_insn,
+ ret = validate_insn(file, func, insn, state, prev_insn, next_insn,
&dead_end);
if (!insn->trace) {
@@ -4041,7 +4055,7 @@ static int do_validate_branch(struct objtool_file *file, struct symbol *func,
}
if (!dead_end && !next_insn) {
- if (state.cfi.cfa.base == CFI_UNDEFINED)
+ if (state->cfi.cfa.base == CFI_UNDEFINED)
return 0;
if (file->ignore_unreachables)
return 0;
@@ -4066,7 +4080,7 @@ static int validate_branch(struct objtool_file *file, struct symbol *func,
int ret;
trace_depth_inc();
- ret = do_validate_branch(file, func, insn, state);
+ ret = do_validate_branch(file, func, insn, &state);
trace_depth_dec();
return ret;
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 2c02c7b49265..3da90686350d 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -1375,7 +1375,7 @@ void *elf_add_data(struct elf *elf, struct section *sec, const void *data, size_
memcpy(sec->data->d_buf, data, size);
sec->data->d_size = size;
- sec->data->d_align = 1;
+ sec->data->d_align = sec->sh.sh_addralign;
offset = ALIGN(sec->sh.sh_size, sec->sh.sh_addralign);
sec->sh.sh_size = offset + size;
diff --git a/tools/objtool/include/objtool/warn.h b/tools/objtool/include/objtool/warn.h
index 2b27b54096b8..fa8b7d292e83 100644
--- a/tools/objtool/include/objtool/warn.h
+++ b/tools/objtool/include/objtool/warn.h
@@ -107,7 +107,7 @@ static inline char *offstr(struct section *sec, unsigned long offset)
#define ERROR_ELF(format, ...) __WARN_ELF(ERROR_STR, format, ##__VA_ARGS__)
#define ERROR_GLIBC(format, ...) __WARN_GLIBC(ERROR_STR, format, ##__VA_ARGS__)
#define ERROR_FUNC(sec, offset, format, ...) __WARN_FUNC(ERROR_STR, sec, offset, format, ##__VA_ARGS__)
-#define ERROR_INSN(insn, format, ...) WARN_FUNC(insn->sec, insn->offset, format, ##__VA_ARGS__)
+#define ERROR_INSN(insn, format, ...) ERROR_FUNC(insn->sec, insn->offset, format, ##__VA_ARGS__)
extern bool debug;
extern int indent;
diff --git a/tools/objtool/klp-diff.c b/tools/objtool/klp-diff.c
index 9f1f4011eb9c..a3198a63c2f0 100644
--- a/tools/objtool/klp-diff.c
+++ b/tools/objtool/klp-diff.c
@@ -1334,25 +1334,25 @@ static bool should_keep_special_sym(struct elf *elf, struct symbol *sym)
* be applied after static branch/call init, resulting in code corruption.
*
* Validate a special section entry to avoid that. Note that an inert
- * tracepoint is harmless enough, in that case just skip the entry and print a
- * warning. Otherwise, return an error.
+ * tracepoint or pr_debug() is harmless enough, in that case just skip the
+ * entry and print a warning. Otherwise, return an error.
*
- * This is only a temporary limitation which will be fixed when livepatch adds
- * support for submodules: fully self-contained modules which are embedded in
- * the top-level livepatch module's data and which can be loaded on demand when
- * their corresponding to-be-patched module gets loaded. Then klp relocs can
- * be retired.
+ * TODO: This is only a temporary limitation which will be fixed when livepatch
+ * adds support for submodules: fully self-contained modules which are embedded
+ * in the top-level livepatch module's data and which can be loaded on demand
+ * when their corresponding to-be-patched module gets loaded. Then klp relocs
+ * can be retired.
*
* Return:
* -1: error: validation failed
- * 1: warning: tracepoint skipped
+ * 1: warning: disabled tracepoint or pr_debug()
* 0: success
*/
static int validate_special_section_klp_reloc(struct elfs *e, struct symbol *sym)
{
bool static_branch = !strcmp(sym->sec->name, "__jump_table");
bool static_call = !strcmp(sym->sec->name, ".static_call_sites");
- struct symbol *code_sym = NULL;
+ const char *code_sym = NULL;
unsigned long code_offset = 0;
struct reloc *reloc;
int ret = 0;
@@ -1364,12 +1364,15 @@ static int validate_special_section_klp_reloc(struct elfs *e, struct symbol *sym
const char *sym_modname;
struct export *export;
+ if (convert_reloc_sym(e->patched, reloc))
+ continue;
+
/* Static branch/call keys are always STT_OBJECT */
if (reloc->sym->type != STT_OBJECT) {
/* Save code location which can be printed below */
if (reloc->sym->type == STT_FUNC && !code_sym) {
- code_sym = reloc->sym;
+ code_sym = reloc->sym->name;
code_offset = reloc_addend(reloc);
}
@@ -1392,16 +1395,26 @@ static int validate_special_section_klp_reloc(struct elfs *e, struct symbol *sym
if (!strcmp(sym_modname, "vmlinux"))
continue;
+ if (!code_sym)
+ code_sym = "<unknown>";
+
if (static_branch) {
if (strstarts(reloc->sym->name, "__tracepoint_")) {
WARN("%s: disabling unsupported tracepoint %s",
- code_sym->name, reloc->sym->name + 13);
+ code_sym, reloc->sym->name + 13);
+ ret = 1;
+ continue;
+ }
+
+ if (strstr(reloc->sym->name, "__UNIQUE_ID_ddebug_")) {
+ WARN("%s: disabling unsupported pr_debug()",
+ code_sym);
ret = 1;
continue;
}
ERROR("%s+0x%lx: unsupported static branch key %s. Use static_key_enabled() instead",
- code_sym->name, code_offset, reloc->sym->name);
+ code_sym, code_offset, reloc->sym->name);
return -1;
}
@@ -1412,7 +1425,7 @@ static int validate_special_section_klp_reloc(struct elfs *e, struct symbol *sym
}
ERROR("%s()+0x%lx: unsupported static call key %s. Use KLP_STATIC_CALL() instead",
- code_sym->name, code_offset, reloc->sym->name);
+ code_sym, code_offset, reloc->sym->name);
return -1;
}
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index a8dc72cfe48e..15fbba9f4ca8 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -1163,6 +1163,24 @@ ifndef NO_RUST
CFLAGS += -DHAVE_RUST_SUPPORT
$(call detected,CONFIG_RUST_SUPPORT)
endif
+
+ ifneq ($(CROSS_COMPILE),)
+ RUST_TARGET_FLAGS_arm := arm-unknown-linux-gnueabi
+ RUST_TARGET_FLAGS_arm64 := aarch64-unknown-linux-gnu
+ RUST_TARGET_FLAGS_m68k := m68k-unknown-linux-gnu
+ RUST_TARGET_FLAGS_mips := mipsel-unknown-linux-gnu
+ RUST_TARGET_FLAGS_powerpc := powerpc64le-unknown-linux-gnu
+ RUST_TARGET_FLAGS_riscv := riscv64gc-unknown-linux-gnu
+ RUST_TARGET_FLAGS_s390 := s390x-unknown-linux-gnu
+ RUST_TARGET_FLAGS_x86 := x86_64-unknown-linux-gnu
+ RUST_TARGET_FLAGS_x86_64 := x86_64-unknown-linux-gnu
+
+ ifeq ($(RUST_TARGET_FLAGS_$(ARCH)),)
+ $(error Unknown rust cross compilation architecture $(ARCH))
+ endif
+
+ RUST_FLAGS += --target=$(RUST_TARGET_FLAGS_$(ARCH))
+ endif
endif
# Among the variables below, these:
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 11b63bafdb23..f7b936deeaa2 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -274,7 +274,7 @@ ifeq ($(PYLINT),1)
PYLINT := $(shell which pylint 2> /dev/null)
endif
-export srctree OUTPUT RM CC CXX RUSTC LD AR CFLAGS CXXFLAGS V BISON FLEX AWK
+export srctree OUTPUT RM CC CXX RUSTC LD AR CFLAGS CXXFLAGS RUST_FLAGS V BISON FLEX AWK
export HOSTCC HOSTLD HOSTAR HOSTCFLAGS SHELLCHECK MYPY PYLINT
include $(srctree)/tools/build/Makefile.include
diff --git a/tools/perf/arch/arm/entry/syscalls/syscall.tbl b/tools/perf/arch/arm/entry/syscalls/syscall.tbl
index fd09afae72a2..94351e22bfcf 100644
--- a/tools/perf/arch/arm/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/arm/entry/syscalls/syscall.tbl
@@ -485,3 +485,4 @@
468 common file_getattr sys_file_getattr
469 common file_setattr sys_file_setattr
470 common listns sys_listns
+471 common rseq_slice_yield sys_rseq_slice_yield
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index dc3f4e86b075..4418d21708d6 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -68,20 +68,6 @@ static const char * const metadata_ete_ro[] = {
enum cs_etm_version { CS_NOT_PRESENT, CS_ETMV3, CS_ETMV4, CS_ETE };
-/* ETMv4 CONFIGR register bits */
-#define TRCCONFIGR_BB BIT(3)
-#define TRCCONFIGR_CCI BIT(4)
-#define TRCCONFIGR_CID BIT(6)
-#define TRCCONFIGR_VMID BIT(7)
-#define TRCCONFIGR_TS BIT(11)
-#define TRCCONFIGR_RS BIT(12)
-#define TRCCONFIGR_VMIDOPT BIT(15)
-
-/* ETMv3 ETMCR register bits */
-#define ETMCR_CYC_ACC BIT(12)
-#define ETMCR_TIMESTAMP_EN BIT(28)
-#define ETMCR_RETURN_STACK BIT(29)
-
static bool cs_etm_is_ete(struct perf_pmu *cs_etm_pmu, struct perf_cpu cpu);
static int cs_etm_get_ro(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path, __u64 *val);
static bool cs_etm_pmu_path_exists(struct perf_pmu *pmu, struct perf_cpu cpu, const char *path);
diff --git a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
index 9b92bddf06b5..630aab9e5425 100644
--- a/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
+++ b/tools/perf/arch/mips/entry/syscalls/syscall_n64.tbl
@@ -385,3 +385,4 @@
468 n64 file_getattr sys_file_getattr
469 n64 file_setattr sys_file_setattr
470 n64 listns sys_listns
+471 n64 rseq_slice_yield sys_rseq_slice_yield
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index ec4458cdb97b..4fcc7c58a105 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -561,3 +561,4 @@
468 common file_getattr sys_file_getattr
469 common file_setattr sys_file_setattr
470 common listns sys_listns
+471 nospu rseq_slice_yield sys_rseq_slice_yield
diff --git a/tools/perf/arch/s390/entry/syscalls/syscall.tbl b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
index 5863787ab036..09a7ef04d979 100644
--- a/tools/perf/arch/s390/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/s390/entry/syscalls/syscall.tbl
@@ -3,473 +3,398 @@
# System call table for s390
#
# Format:
+# <nr> <abi> <syscall> <entry>
#
-# <nr> <abi> <syscall> <entry-64bit> <compat-entry>
-#
-# where <abi> can be common, 64, or 32
+# <abi> is always common.
-1 common exit sys_exit sys_exit
-2 common fork sys_fork sys_fork
-3 common read sys_read compat_sys_s390_read
-4 common write sys_write compat_sys_s390_write
-5 common open sys_open compat_sys_open
-6 common close sys_close sys_close
-7 common restart_syscall sys_restart_syscall sys_restart_syscall
-8 common creat sys_creat sys_creat
-9 common link sys_link sys_link
-10 common unlink sys_unlink sys_unlink
-11 common execve sys_execve compat_sys_execve
-12 common chdir sys_chdir sys_chdir
-13 32 time - sys_time32
-14 common mknod sys_mknod sys_mknod
-15 common chmod sys_chmod sys_chmod
-16 32 lchown - sys_lchown16
-19 common lseek sys_lseek compat_sys_lseek
-20 common getpid sys_getpid sys_getpid
-21 common mount sys_mount sys_mount
-22 common umount sys_oldumount sys_oldumount
-23 32 setuid - sys_setuid16
-24 32 getuid - sys_getuid16
-25 32 stime - sys_stime32
-26 common ptrace sys_ptrace compat_sys_ptrace
-27 common alarm sys_alarm sys_alarm
-29 common pause sys_pause sys_pause
-30 common utime sys_utime sys_utime32
-33 common access sys_access sys_access
-34 common nice sys_nice sys_nice
-36 common sync sys_sync sys_sync
-37 common kill sys_kill sys_kill
-38 common rename sys_rename sys_rename
-39 common mkdir sys_mkdir sys_mkdir
-40 common rmdir sys_rmdir sys_rmdir
-41 common dup sys_dup sys_dup
-42 common pipe sys_pipe sys_pipe
-43 common times sys_times compat_sys_times
-45 common brk sys_brk sys_brk
-46 32 setgid - sys_setgid16
-47 32 getgid - sys_getgid16
-48 common signal sys_signal sys_signal
-49 32 geteuid - sys_geteuid16
-50 32 getegid - sys_getegid16
-51 common acct sys_acct sys_acct
-52 common umount2 sys_umount sys_umount
-54 common ioctl sys_ioctl compat_sys_ioctl
-55 common fcntl sys_fcntl compat_sys_fcntl
-57 common setpgid sys_setpgid sys_setpgid
-60 common umask sys_umask sys_umask
-61 common chroot sys_chroot sys_chroot
-62 common ustat sys_ustat compat_sys_ustat
-63 common dup2 sys_dup2 sys_dup2
-64 common getppid sys_getppid sys_getppid
-65 common getpgrp sys_getpgrp sys_getpgrp
-66 common setsid sys_setsid sys_setsid
-67 common sigaction sys_sigaction compat_sys_sigaction
-70 32 setreuid - sys_setreuid16
-71 32 setregid - sys_setregid16
-72 common sigsuspend sys_sigsuspend sys_sigsuspend
-73 common sigpending sys_sigpending compat_sys_sigpending
-74 common sethostname sys_sethostname sys_sethostname
-75 common setrlimit sys_setrlimit compat_sys_setrlimit
-76 32 getrlimit - compat_sys_old_getrlimit
-77 common getrusage sys_getrusage compat_sys_getrusage
-78 common gettimeofday sys_gettimeofday compat_sys_gettimeofday
-79 common settimeofday sys_settimeofday compat_sys_settimeofday
-80 32 getgroups - sys_getgroups16
-81 32 setgroups - sys_setgroups16
-83 common symlink sys_symlink sys_symlink
-85 common readlink sys_readlink sys_readlink
-86 common uselib sys_uselib sys_uselib
-87 common swapon sys_swapon sys_swapon
-88 common reboot sys_reboot sys_reboot
-89 common readdir - compat_sys_old_readdir
-90 common mmap sys_old_mmap compat_sys_s390_old_mmap
-91 common munmap sys_munmap sys_munmap
-92 common truncate sys_truncate compat_sys_truncate
-93 common ftruncate sys_ftruncate compat_sys_ftruncate
-94 common fchmod sys_fchmod sys_fchmod
-95 32 fchown - sys_fchown16
-96 common getpriority sys_getpriority sys_getpriority
-97 common setpriority sys_setpriority sys_setpriority
-99 common statfs sys_statfs compat_sys_statfs
-100 common fstatfs sys_fstatfs compat_sys_fstatfs
-101 32 ioperm - -
-102 common socketcall sys_socketcall compat_sys_socketcall
-103 common syslog sys_syslog sys_syslog
-104 common setitimer sys_setitimer compat_sys_setitimer
-105 common getitimer sys_getitimer compat_sys_getitimer
-106 common stat sys_newstat compat_sys_newstat
-107 common lstat sys_newlstat compat_sys_newlstat
-108 common fstat sys_newfstat compat_sys_newfstat
-110 common lookup_dcookie - -
-111 common vhangup sys_vhangup sys_vhangup
-112 common idle - -
-114 common wait4 sys_wait4 compat_sys_wait4
-115 common swapoff sys_swapoff sys_swapoff
-116 common sysinfo sys_sysinfo compat_sys_sysinfo
-117 common ipc sys_s390_ipc compat_sys_s390_ipc
-118 common fsync sys_fsync sys_fsync
-119 common sigreturn sys_sigreturn compat_sys_sigreturn
-120 common clone sys_clone sys_clone
-121 common setdomainname sys_setdomainname sys_setdomainname
-122 common uname sys_newuname sys_newuname
-124 common adjtimex sys_adjtimex sys_adjtimex_time32
-125 common mprotect sys_mprotect sys_mprotect
-126 common sigprocmask sys_sigprocmask compat_sys_sigprocmask
-127 common create_module - -
-128 common init_module sys_init_module sys_init_module
-129 common delete_module sys_delete_module sys_delete_module
-130 common get_kernel_syms - -
-131 common quotactl sys_quotactl sys_quotactl
-132 common getpgid sys_getpgid sys_getpgid
-133 common fchdir sys_fchdir sys_fchdir
-134 common bdflush sys_ni_syscall sys_ni_syscall
-135 common sysfs sys_sysfs sys_sysfs
-136 common personality sys_s390_personality sys_s390_personality
-137 common afs_syscall - -
-138 32 setfsuid - sys_setfsuid16
-139 32 setfsgid - sys_setfsgid16
-140 32 _llseek - sys_llseek
-141 common getdents sys_getdents compat_sys_getdents
-142 32 _newselect - compat_sys_select
-142 64 select sys_select -
-143 common flock sys_flock sys_flock
-144 common msync sys_msync sys_msync
-145 common readv sys_readv sys_readv
-146 common writev sys_writev sys_writev
-147 common getsid sys_getsid sys_getsid
-148 common fdatasync sys_fdatasync sys_fdatasync
-149 common _sysctl - -
-150 common mlock sys_mlock sys_mlock
-151 common munlock sys_munlock sys_munlock
-152 common mlockall sys_mlockall sys_mlockall
-153 common munlockall sys_munlockall sys_munlockall
-154 common sched_setparam sys_sched_setparam sys_sched_setparam
-155 common sched_getparam sys_sched_getparam sys_sched_getparam
-156 common sched_setscheduler sys_sched_setscheduler sys_sched_setscheduler
-157 common sched_getscheduler sys_sched_getscheduler sys_sched_getscheduler
-158 common sched_yield sys_sched_yield sys_sched_yield
-159 common sched_get_priority_max sys_sched_get_priority_max sys_sched_get_priority_max
-160 common sched_get_priority_min sys_sched_get_priority_min sys_sched_get_priority_min
-161 common sched_rr_get_interval sys_sched_rr_get_interval sys_sched_rr_get_interval_time32
-162 common nanosleep sys_nanosleep sys_nanosleep_time32
-163 common mremap sys_mremap sys_mremap
-164 32 setresuid - sys_setresuid16
-165 32 getresuid - sys_getresuid16
-167 common query_module - -
-168 common poll sys_poll sys_poll
-169 common nfsservctl - -
-170 32 setresgid - sys_setresgid16
-171 32 getresgid - sys_getresgid16
-172 common prctl sys_prctl sys_prctl
-173 common rt_sigreturn sys_rt_sigreturn compat_sys_rt_sigreturn
-174 common rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
-175 common rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
-176 common rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
-177 common rt_sigtimedwait sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time32
-178 common rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
-179 common rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
-180 common pread64 sys_pread64 compat_sys_s390_pread64
-181 common pwrite64 sys_pwrite64 compat_sys_s390_pwrite64
-182 32 chown - sys_chown16
-183 common getcwd sys_getcwd sys_getcwd
-184 common capget sys_capget sys_capget
-185 common capset sys_capset sys_capset
-186 common sigaltstack sys_sigaltstack compat_sys_sigaltstack
-187 common sendfile sys_sendfile64 compat_sys_sendfile
-188 common getpmsg - -
-189 common putpmsg - -
-190 common vfork sys_vfork sys_vfork
-191 32 ugetrlimit - compat_sys_getrlimit
-191 64 getrlimit sys_getrlimit -
-192 32 mmap2 - compat_sys_s390_mmap2
-193 32 truncate64 - compat_sys_s390_truncate64
-194 32 ftruncate64 - compat_sys_s390_ftruncate64
-195 32 stat64 - compat_sys_s390_stat64
-196 32 lstat64 - compat_sys_s390_lstat64
-197 32 fstat64 - compat_sys_s390_fstat64
-198 32 lchown32 - sys_lchown
-198 64 lchown sys_lchown -
-199 32 getuid32 - sys_getuid
-199 64 getuid sys_getuid -
-200 32 getgid32 - sys_getgid
-200 64 getgid sys_getgid -
-201 32 geteuid32 - sys_geteuid
-201 64 geteuid sys_geteuid -
-202 32 getegid32 - sys_getegid
-202 64 getegid sys_getegid -
-203 32 setreuid32 - sys_setreuid
-203 64 setreuid sys_setreuid -
-204 32 setregid32 - sys_setregid
-204 64 setregid sys_setregid -
-205 32 getgroups32 - sys_getgroups
-205 64 getgroups sys_getgroups -
-206 32 setgroups32 - sys_setgroups
-206 64 setgroups sys_setgroups -
-207 32 fchown32 - sys_fchown
-207 64 fchown sys_fchown -
-208 32 setresuid32 - sys_setresuid
-208 64 setresuid sys_setresuid -
-209 32 getresuid32 - sys_getresuid
-209 64 getresuid sys_getresuid -
-210 32 setresgid32 - sys_setresgid
-210 64 setresgid sys_setresgid -
-211 32 getresgid32 - sys_getresgid
-211 64 getresgid sys_getresgid -
-212 32 chown32 - sys_chown
-212 64 chown sys_chown -
-213 32 setuid32 - sys_setuid
-213 64 setuid sys_setuid -
-214 32 setgid32 - sys_setgid
-214 64 setgid sys_setgid -
-215 32 setfsuid32 - sys_setfsuid
-215 64 setfsuid sys_setfsuid -
-216 32 setfsgid32 - sys_setfsgid
-216 64 setfsgid sys_setfsgid -
-217 common pivot_root sys_pivot_root sys_pivot_root
-218 common mincore sys_mincore sys_mincore
-219 common madvise sys_madvise sys_madvise
-220 common getdents64 sys_getdents64 sys_getdents64
-221 32 fcntl64 - compat_sys_fcntl64
-222 common readahead sys_readahead compat_sys_s390_readahead
-223 32 sendfile64 - compat_sys_sendfile64
-224 common setxattr sys_setxattr sys_setxattr
-225 common lsetxattr sys_lsetxattr sys_lsetxattr
-226 common fsetxattr sys_fsetxattr sys_fsetxattr
-227 common getxattr sys_getxattr sys_getxattr
-228 common lgetxattr sys_lgetxattr sys_lgetxattr
-229 common fgetxattr sys_fgetxattr sys_fgetxattr
-230 common listxattr sys_listxattr sys_listxattr
-231 common llistxattr sys_llistxattr sys_llistxattr
-232 common flistxattr sys_flistxattr sys_flistxattr
-233 common removexattr sys_removexattr sys_removexattr
-234 common lremovexattr sys_lremovexattr sys_lremovexattr
-235 common fremovexattr sys_fremovexattr sys_fremovexattr
-236 common gettid sys_gettid sys_gettid
-237 common tkill sys_tkill sys_tkill
-238 common futex sys_futex sys_futex_time32
-239 common sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
-240 common sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
-241 common tgkill sys_tgkill sys_tgkill
-243 common io_setup sys_io_setup compat_sys_io_setup
-244 common io_destroy sys_io_destroy sys_io_destroy
-245 common io_getevents sys_io_getevents sys_io_getevents_time32
-246 common io_submit sys_io_submit compat_sys_io_submit
-247 common io_cancel sys_io_cancel sys_io_cancel
-248 common exit_group sys_exit_group sys_exit_group
-249 common epoll_create sys_epoll_create sys_epoll_create
-250 common epoll_ctl sys_epoll_ctl sys_epoll_ctl
-251 common epoll_wait sys_epoll_wait sys_epoll_wait
-252 common set_tid_address sys_set_tid_address sys_set_tid_address
-253 common fadvise64 sys_fadvise64_64 compat_sys_s390_fadvise64
-254 common timer_create sys_timer_create compat_sys_timer_create
-255 common timer_settime sys_timer_settime sys_timer_settime32
-256 common timer_gettime sys_timer_gettime sys_timer_gettime32
-257 common timer_getoverrun sys_timer_getoverrun sys_timer_getoverrun
-258 common timer_delete sys_timer_delete sys_timer_delete
-259 common clock_settime sys_clock_settime sys_clock_settime32
-260 common clock_gettime sys_clock_gettime sys_clock_gettime32
-261 common clock_getres sys_clock_getres sys_clock_getres_time32
-262 common clock_nanosleep sys_clock_nanosleep sys_clock_nanosleep_time32
-264 32 fadvise64_64 - compat_sys_s390_fadvise64_64
-265 common statfs64 sys_statfs64 compat_sys_statfs64
-266 common fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
-267 common remap_file_pages sys_remap_file_pages sys_remap_file_pages
-268 common mbind sys_mbind sys_mbind
-269 common get_mempolicy sys_get_mempolicy sys_get_mempolicy
-270 common set_mempolicy sys_set_mempolicy sys_set_mempolicy
-271 common mq_open sys_mq_open compat_sys_mq_open
-272 common mq_unlink sys_mq_unlink sys_mq_unlink
-273 common mq_timedsend sys_mq_timedsend sys_mq_timedsend_time32
-274 common mq_timedreceive sys_mq_timedreceive sys_mq_timedreceive_time32
-275 common mq_notify sys_mq_notify compat_sys_mq_notify
-276 common mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
-277 common kexec_load sys_kexec_load compat_sys_kexec_load
-278 common add_key sys_add_key sys_add_key
-279 common request_key sys_request_key sys_request_key
-280 common keyctl sys_keyctl compat_sys_keyctl
-281 common waitid sys_waitid compat_sys_waitid
-282 common ioprio_set sys_ioprio_set sys_ioprio_set
-283 common ioprio_get sys_ioprio_get sys_ioprio_get
-284 common inotify_init sys_inotify_init sys_inotify_init
-285 common inotify_add_watch sys_inotify_add_watch sys_inotify_add_watch
-286 common inotify_rm_watch sys_inotify_rm_watch sys_inotify_rm_watch
-287 common migrate_pages sys_migrate_pages sys_migrate_pages
-288 common openat sys_openat compat_sys_openat
-289 common mkdirat sys_mkdirat sys_mkdirat
-290 common mknodat sys_mknodat sys_mknodat
-291 common fchownat sys_fchownat sys_fchownat
-292 common futimesat sys_futimesat sys_futimesat_time32
-293 32 fstatat64 - compat_sys_s390_fstatat64
-293 64 newfstatat sys_newfstatat -
-294 common unlinkat sys_unlinkat sys_unlinkat
-295 common renameat sys_renameat sys_renameat
-296 common linkat sys_linkat sys_linkat
-297 common symlinkat sys_symlinkat sys_symlinkat
-298 common readlinkat sys_readlinkat sys_readlinkat
-299 common fchmodat sys_fchmodat sys_fchmodat
-300 common faccessat sys_faccessat sys_faccessat
-301 common pselect6 sys_pselect6 compat_sys_pselect6_time32
-302 common ppoll sys_ppoll compat_sys_ppoll_time32
-303 common unshare sys_unshare sys_unshare
-304 common set_robust_list sys_set_robust_list compat_sys_set_robust_list
-305 common get_robust_list sys_get_robust_list compat_sys_get_robust_list
-306 common splice sys_splice sys_splice
-307 common sync_file_range sys_sync_file_range compat_sys_s390_sync_file_range
-308 common tee sys_tee sys_tee
-309 common vmsplice sys_vmsplice sys_vmsplice
-310 common move_pages sys_move_pages sys_move_pages
-311 common getcpu sys_getcpu sys_getcpu
-312 common epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
-313 common utimes sys_utimes sys_utimes_time32
-314 common fallocate sys_fallocate compat_sys_s390_fallocate
-315 common utimensat sys_utimensat sys_utimensat_time32
-316 common signalfd sys_signalfd compat_sys_signalfd
-317 common timerfd - -
-318 common eventfd sys_eventfd sys_eventfd
-319 common timerfd_create sys_timerfd_create sys_timerfd_create
-320 common timerfd_settime sys_timerfd_settime sys_timerfd_settime32
-321 common timerfd_gettime sys_timerfd_gettime sys_timerfd_gettime32
-322 common signalfd4 sys_signalfd4 compat_sys_signalfd4
-323 common eventfd2 sys_eventfd2 sys_eventfd2
-324 common inotify_init1 sys_inotify_init1 sys_inotify_init1
-325 common pipe2 sys_pipe2 sys_pipe2
-326 common dup3 sys_dup3 sys_dup3
-327 common epoll_create1 sys_epoll_create1 sys_epoll_create1
-328 common preadv sys_preadv compat_sys_preadv
-329 common pwritev sys_pwritev compat_sys_pwritev
-330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
-331 common perf_event_open sys_perf_event_open sys_perf_event_open
-332 common fanotify_init sys_fanotify_init sys_fanotify_init
-333 common fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
-334 common prlimit64 sys_prlimit64 sys_prlimit64
-335 common name_to_handle_at sys_name_to_handle_at sys_name_to_handle_at
-336 common open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
-337 common clock_adjtime sys_clock_adjtime sys_clock_adjtime32
-338 common syncfs sys_syncfs sys_syncfs
-339 common setns sys_setns sys_setns
-340 common process_vm_readv sys_process_vm_readv sys_process_vm_readv
-341 common process_vm_writev sys_process_vm_writev sys_process_vm_writev
-342 common s390_runtime_instr sys_s390_runtime_instr sys_s390_runtime_instr
-343 common kcmp sys_kcmp sys_kcmp
-344 common finit_module sys_finit_module sys_finit_module
-345 common sched_setattr sys_sched_setattr sys_sched_setattr
-346 common sched_getattr sys_sched_getattr sys_sched_getattr
-347 common renameat2 sys_renameat2 sys_renameat2
-348 common seccomp sys_seccomp sys_seccomp
-349 common getrandom sys_getrandom sys_getrandom
-350 common memfd_create sys_memfd_create sys_memfd_create
-351 common bpf sys_bpf sys_bpf
-352 common s390_pci_mmio_write sys_s390_pci_mmio_write sys_s390_pci_mmio_write
-353 common s390_pci_mmio_read sys_s390_pci_mmio_read sys_s390_pci_mmio_read
-354 common execveat sys_execveat compat_sys_execveat
-355 common userfaultfd sys_userfaultfd sys_userfaultfd
-356 common membarrier sys_membarrier sys_membarrier
-357 common recvmmsg sys_recvmmsg compat_sys_recvmmsg_time32
-358 common sendmmsg sys_sendmmsg compat_sys_sendmmsg
-359 common socket sys_socket sys_socket
-360 common socketpair sys_socketpair sys_socketpair
-361 common bind sys_bind sys_bind
-362 common connect sys_connect sys_connect
-363 common listen sys_listen sys_listen
-364 common accept4 sys_accept4 sys_accept4
-365 common getsockopt sys_getsockopt sys_getsockopt
-366 common setsockopt sys_setsockopt sys_setsockopt
-367 common getsockname sys_getsockname sys_getsockname
-368 common getpeername sys_getpeername sys_getpeername
-369 common sendto sys_sendto sys_sendto
-370 common sendmsg sys_sendmsg compat_sys_sendmsg
-371 common recvfrom sys_recvfrom compat_sys_recvfrom
-372 common recvmsg sys_recvmsg compat_sys_recvmsg
-373 common shutdown sys_shutdown sys_shutdown
-374 common mlock2 sys_mlock2 sys_mlock2
-375 common copy_file_range sys_copy_file_range sys_copy_file_range
-376 common preadv2 sys_preadv2 compat_sys_preadv2
-377 common pwritev2 sys_pwritev2 compat_sys_pwritev2
-378 common s390_guarded_storage sys_s390_guarded_storage sys_s390_guarded_storage
-379 common statx sys_statx sys_statx
-380 common s390_sthyi sys_s390_sthyi sys_s390_sthyi
-381 common kexec_file_load sys_kexec_file_load sys_kexec_file_load
-382 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
-383 common rseq sys_rseq sys_rseq
-384 common pkey_mprotect sys_pkey_mprotect sys_pkey_mprotect
-385 common pkey_alloc sys_pkey_alloc sys_pkey_alloc
-386 common pkey_free sys_pkey_free sys_pkey_free
+1 common exit sys_exit
+2 common fork sys_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open
+6 common close sys_close
+7 common restart_syscall sys_restart_syscall
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 common execve sys_execve
+12 common chdir sys_chdir
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+19 common lseek sys_lseek
+20 common getpid sys_getpid
+21 common mount sys_mount
+22 common umount sys_oldumount
+26 common ptrace sys_ptrace
+27 common alarm sys_alarm
+29 common pause sys_pause
+30 common utime sys_utime
+33 common access sys_access
+34 common nice sys_nice
+36 common sync sys_sync
+37 common kill sys_kill
+38 common rename sys_rename
+39 common mkdir sys_mkdir
+40 common rmdir sys_rmdir
+41 common dup sys_dup
+42 common pipe sys_pipe
+43 common times sys_times
+45 common brk sys_brk
+48 common signal sys_signal
+51 common acct sys_acct
+52 common umount2 sys_umount
+54 common ioctl sys_ioctl
+55 common fcntl sys_fcntl
+57 common setpgid sys_setpgid
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common ustat sys_ustat
+63 common dup2 sys_dup2
+64 common getppid sys_getppid
+65 common getpgrp sys_getpgrp
+66 common setsid sys_setsid
+67 common sigaction sys_sigaction
+72 common sigsuspend sys_sigsuspend
+73 common sigpending sys_sigpending
+74 common sethostname sys_sethostname
+75 common setrlimit sys_setrlimit
+77 common getrusage sys_getrusage
+78 common gettimeofday sys_gettimeofday
+79 common settimeofday sys_settimeofday
+83 common symlink sys_symlink
+85 common readlink sys_readlink
+86 common uselib sys_uselib
+87 common swapon sys_swapon
+88 common reboot sys_reboot
+89 common readdir sys_ni_syscall
+90 common mmap sys_old_mmap
+91 common munmap sys_munmap
+92 common truncate sys_truncate
+93 common ftruncate sys_ftruncate
+94 common fchmod sys_fchmod
+96 common getpriority sys_getpriority
+97 common setpriority sys_setpriority
+99 common statfs sys_statfs
+100 common fstatfs sys_fstatfs
+102 common socketcall sys_socketcall
+103 common syslog sys_syslog
+104 common setitimer sys_setitimer
+105 common getitimer sys_getitimer
+106 common stat sys_newstat
+107 common lstat sys_newlstat
+108 common fstat sys_newfstat
+110 common lookup_dcookie sys_ni_syscall
+111 common vhangup sys_vhangup
+112 common idle sys_ni_syscall
+114 common wait4 sys_wait4
+115 common swapoff sys_swapoff
+116 common sysinfo sys_sysinfo
+117 common ipc sys_s390_ipc
+118 common fsync sys_fsync
+119 common sigreturn sys_sigreturn
+120 common clone sys_clone
+121 common setdomainname sys_setdomainname
+122 common uname sys_newuname
+124 common adjtimex sys_adjtimex
+125 common mprotect sys_mprotect
+126 common sigprocmask sys_sigprocmask
+127 common create_module sys_ni_syscall
+128 common init_module sys_init_module
+129 common delete_module sys_delete_module
+130 common get_kernel_syms sys_ni_syscall
+131 common quotactl sys_quotactl
+132 common getpgid sys_getpgid
+133 common fchdir sys_fchdir
+134 common bdflush sys_ni_syscall
+135 common sysfs sys_sysfs
+136 common personality sys_s390_personality
+137 common afs_syscall sys_ni_syscall
+141 common getdents sys_getdents
+142 common select sys_select
+143 common flock sys_flock
+144 common msync sys_msync
+145 common readv sys_readv
+146 common writev sys_writev
+147 common getsid sys_getsid
+148 common fdatasync sys_fdatasync
+149 common _sysctl sys_ni_syscall
+150 common mlock sys_mlock
+151 common munlock sys_munlock
+152 common mlockall sys_mlockall
+153 common munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval
+162 common nanosleep sys_nanosleep
+163 common mremap sys_mremap
+167 common query_module sys_ni_syscall
+168 common poll sys_poll
+169 common nfsservctl sys_ni_syscall
+172 common prctl sys_prctl
+173 common rt_sigreturn sys_rt_sigreturn
+174 common rt_sigaction sys_rt_sigaction
+175 common rt_sigprocmask sys_rt_sigprocmask
+176 common rt_sigpending sys_rt_sigpending
+177 common rt_sigtimedwait sys_rt_sigtimedwait
+178 common rt_sigqueueinfo sys_rt_sigqueueinfo
+179 common rt_sigsuspend sys_rt_sigsuspend
+180 common pread64 sys_pread64
+181 common pwrite64 sys_pwrite64
+183 common getcwd sys_getcwd
+184 common capget sys_capget
+185 common capset sys_capset
+186 common sigaltstack sys_sigaltstack
+187 common sendfile sys_sendfile64
+188 common getpmsg sys_ni_syscall
+189 common putpmsg sys_ni_syscall
+190 common vfork sys_vfork
+191 common getrlimit sys_getrlimit
+198 common lchown sys_lchown
+199 common getuid sys_getuid
+200 common getgid sys_getgid
+201 common geteuid sys_geteuid
+202 common getegid sys_getegid
+203 common setreuid sys_setreuid
+204 common setregid sys_setregid
+205 common getgroups sys_getgroups
+206 common setgroups sys_setgroups
+207 common fchown sys_fchown
+208 common setresuid sys_setresuid
+209 common getresuid sys_getresuid
+210 common setresgid sys_setresgid
+211 common getresgid sys_getresgid
+212 common chown sys_chown
+213 common setuid sys_setuid
+214 common setgid sys_setgid
+215 common setfsuid sys_setfsuid
+216 common setfsgid sys_setfsgid
+217 common pivot_root sys_pivot_root
+218 common mincore sys_mincore
+219 common madvise sys_madvise
+220 common getdents64 sys_getdents64
+222 common readahead sys_readahead
+224 common setxattr sys_setxattr
+225 common lsetxattr sys_lsetxattr
+226 common fsetxattr sys_fsetxattr
+227 common getxattr sys_getxattr
+228 common lgetxattr sys_lgetxattr
+229 common fgetxattr sys_fgetxattr
+230 common listxattr sys_listxattr
+231 common llistxattr sys_llistxattr
+232 common flistxattr sys_flistxattr
+233 common removexattr sys_removexattr
+234 common lremovexattr sys_lremovexattr
+235 common fremovexattr sys_fremovexattr
+236 common gettid sys_gettid
+237 common tkill sys_tkill
+238 common futex sys_futex
+239 common sched_setaffinity sys_sched_setaffinity
+240 common sched_getaffinity sys_sched_getaffinity
+241 common tgkill sys_tgkill
+243 common io_setup sys_io_setup
+244 common io_destroy sys_io_destroy
+245 common io_getevents sys_io_getevents
+246 common io_submit sys_io_submit
+247 common io_cancel sys_io_cancel
+248 common exit_group sys_exit_group
+249 common epoll_create sys_epoll_create
+250 common epoll_ctl sys_epoll_ctl
+251 common epoll_wait sys_epoll_wait
+252 common set_tid_address sys_set_tid_address
+253 common fadvise64 sys_fadvise64_64
+254 common timer_create sys_timer_create
+255 common timer_settime sys_timer_settime
+256 common timer_gettime sys_timer_gettime
+257 common timer_getoverrun sys_timer_getoverrun
+258 common timer_delete sys_timer_delete
+259 common clock_settime sys_clock_settime
+260 common clock_gettime sys_clock_gettime
+261 common clock_getres sys_clock_getres
+262 common clock_nanosleep sys_clock_nanosleep
+265 common statfs64 sys_statfs64
+266 common fstatfs64 sys_fstatfs64
+267 common remap_file_pages sys_remap_file_pages
+268 common mbind sys_mbind
+269 common get_mempolicy sys_get_mempolicy
+270 common set_mempolicy sys_set_mempolicy
+271 common mq_open sys_mq_open
+272 common mq_unlink sys_mq_unlink
+273 common mq_timedsend sys_mq_timedsend
+274 common mq_timedreceive sys_mq_timedreceive
+275 common mq_notify sys_mq_notify
+276 common mq_getsetattr sys_mq_getsetattr
+277 common kexec_load sys_kexec_load
+278 common add_key sys_add_key
+279 common request_key sys_request_key
+280 common keyctl sys_keyctl
+281 common waitid sys_waitid
+282 common ioprio_set sys_ioprio_set
+283 common ioprio_get sys_ioprio_get
+284 common inotify_init sys_inotify_init
+285 common inotify_add_watch sys_inotify_add_watch
+286 common inotify_rm_watch sys_inotify_rm_watch
+287 common migrate_pages sys_migrate_pages
+288 common openat sys_openat
+289 common mkdirat sys_mkdirat
+290 common mknodat sys_mknodat
+291 common fchownat sys_fchownat
+292 common futimesat sys_futimesat
+293 common newfstatat sys_newfstatat
+294 common unlinkat sys_unlinkat
+295 common renameat sys_renameat
+296 common linkat sys_linkat
+297 common symlinkat sys_symlinkat
+298 common readlinkat sys_readlinkat
+299 common fchmodat sys_fchmodat
+300 common faccessat sys_faccessat
+301 common pselect6 sys_pselect6
+302 common ppoll sys_ppoll
+303 common unshare sys_unshare
+304 common set_robust_list sys_set_robust_list
+305 common get_robust_list sys_get_robust_list
+306 common splice sys_splice
+307 common sync_file_range sys_sync_file_range
+308 common tee sys_tee
+309 common vmsplice sys_vmsplice
+310 common move_pages sys_move_pages
+311 common getcpu sys_getcpu
+312 common epoll_pwait sys_epoll_pwait
+313 common utimes sys_utimes
+314 common fallocate sys_fallocate
+315 common utimensat sys_utimensat
+316 common signalfd sys_signalfd
+317 common timerfd sys_ni_syscall
+318 common eventfd sys_eventfd
+319 common timerfd_create sys_timerfd_create
+320 common timerfd_settime sys_timerfd_settime
+321 common timerfd_gettime sys_timerfd_gettime
+322 common signalfd4 sys_signalfd4
+323 common eventfd2 sys_eventfd2
+324 common inotify_init1 sys_inotify_init1
+325 common pipe2 sys_pipe2
+326 common dup3 sys_dup3
+327 common epoll_create1 sys_epoll_create1
+328 common preadv sys_preadv
+329 common pwritev sys_pwritev
+330 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+331 common perf_event_open sys_perf_event_open
+332 common fanotify_init sys_fanotify_init
+333 common fanotify_mark sys_fanotify_mark
+334 common prlimit64 sys_prlimit64
+335 common name_to_handle_at sys_name_to_handle_at
+336 common open_by_handle_at sys_open_by_handle_at
+337 common clock_adjtime sys_clock_adjtime
+338 common syncfs sys_syncfs
+339 common setns sys_setns
+340 common process_vm_readv sys_process_vm_readv
+341 common process_vm_writev sys_process_vm_writev
+342 common s390_runtime_instr sys_s390_runtime_instr
+343 common kcmp sys_kcmp
+344 common finit_module sys_finit_module
+345 common sched_setattr sys_sched_setattr
+346 common sched_getattr sys_sched_getattr
+347 common renameat2 sys_renameat2
+348 common seccomp sys_seccomp
+349 common getrandom sys_getrandom
+350 common memfd_create sys_memfd_create
+351 common bpf sys_bpf
+352 common s390_pci_mmio_write sys_s390_pci_mmio_write
+353 common s390_pci_mmio_read sys_s390_pci_mmio_read
+354 common execveat sys_execveat
+355 common userfaultfd sys_userfaultfd
+356 common membarrier sys_membarrier
+357 common recvmmsg sys_recvmmsg
+358 common sendmmsg sys_sendmmsg
+359 common socket sys_socket
+360 common socketpair sys_socketpair
+361 common bind sys_bind
+362 common connect sys_connect
+363 common listen sys_listen
+364 common accept4 sys_accept4
+365 common getsockopt sys_getsockopt
+366 common setsockopt sys_setsockopt
+367 common getsockname sys_getsockname
+368 common getpeername sys_getpeername
+369 common sendto sys_sendto
+370 common sendmsg sys_sendmsg
+371 common recvfrom sys_recvfrom
+372 common recvmsg sys_recvmsg
+373 common shutdown sys_shutdown
+374 common mlock2 sys_mlock2
+375 common copy_file_range sys_copy_file_range
+376 common preadv2 sys_preadv2
+377 common pwritev2 sys_pwritev2
+378 common s390_guarded_storage sys_s390_guarded_storage
+379 common statx sys_statx
+380 common s390_sthyi sys_s390_sthyi
+381 common kexec_file_load sys_kexec_file_load
+382 common io_pgetevents sys_io_pgetevents
+383 common rseq sys_rseq
+384 common pkey_mprotect sys_pkey_mprotect
+385 common pkey_alloc sys_pkey_alloc
+386 common pkey_free sys_pkey_free
# room for arch specific syscalls
-392 64 semtimedop sys_semtimedop -
-393 common semget sys_semget sys_semget
-394 common semctl sys_semctl compat_sys_semctl
-395 common shmget sys_shmget sys_shmget
-396 common shmctl sys_shmctl compat_sys_shmctl
-397 common shmat sys_shmat compat_sys_shmat
-398 common shmdt sys_shmdt sys_shmdt
-399 common msgget sys_msgget sys_msgget
-400 common msgsnd sys_msgsnd compat_sys_msgsnd
-401 common msgrcv sys_msgrcv compat_sys_msgrcv
-402 common msgctl sys_msgctl compat_sys_msgctl
-403 32 clock_gettime64 - sys_clock_gettime
-404 32 clock_settime64 - sys_clock_settime
-405 32 clock_adjtime64 - sys_clock_adjtime
-406 32 clock_getres_time64 - sys_clock_getres
-407 32 clock_nanosleep_time64 - sys_clock_nanosleep
-408 32 timer_gettime64 - sys_timer_gettime
-409 32 timer_settime64 - sys_timer_settime
-410 32 timerfd_gettime64 - sys_timerfd_gettime
-411 32 timerfd_settime64 - sys_timerfd_settime
-412 32 utimensat_time64 - sys_utimensat
-413 32 pselect6_time64 - compat_sys_pselect6_time64
-414 32 ppoll_time64 - compat_sys_ppoll_time64
-416 32 io_pgetevents_time64 - compat_sys_io_pgetevents_time64
-417 32 recvmmsg_time64 - compat_sys_recvmmsg_time64
-418 32 mq_timedsend_time64 - sys_mq_timedsend
-419 32 mq_timedreceive_time64 - sys_mq_timedreceive
-420 32 semtimedop_time64 - sys_semtimedop
-421 32 rt_sigtimedwait_time64 - compat_sys_rt_sigtimedwait_time64
-422 32 futex_time64 - sys_futex
-423 32 sched_rr_get_interval_time64 - sys_sched_rr_get_interval
-424 common pidfd_send_signal sys_pidfd_send_signal sys_pidfd_send_signal
-425 common io_uring_setup sys_io_uring_setup sys_io_uring_setup
-426 common io_uring_enter sys_io_uring_enter sys_io_uring_enter
-427 common io_uring_register sys_io_uring_register sys_io_uring_register
-428 common open_tree sys_open_tree sys_open_tree
-429 common move_mount sys_move_mount sys_move_mount
-430 common fsopen sys_fsopen sys_fsopen
-431 common fsconfig sys_fsconfig sys_fsconfig
-432 common fsmount sys_fsmount sys_fsmount
-433 common fspick sys_fspick sys_fspick
-434 common pidfd_open sys_pidfd_open sys_pidfd_open
-435 common clone3 sys_clone3 sys_clone3
-436 common close_range sys_close_range sys_close_range
-437 common openat2 sys_openat2 sys_openat2
-438 common pidfd_getfd sys_pidfd_getfd sys_pidfd_getfd
-439 common faccessat2 sys_faccessat2 sys_faccessat2
-440 common process_madvise sys_process_madvise sys_process_madvise
-441 common epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
-442 common mount_setattr sys_mount_setattr sys_mount_setattr
-443 common quotactl_fd sys_quotactl_fd sys_quotactl_fd
-444 common landlock_create_ruleset sys_landlock_create_ruleset sys_landlock_create_ruleset
-445 common landlock_add_rule sys_landlock_add_rule sys_landlock_add_rule
-446 common landlock_restrict_self sys_landlock_restrict_self sys_landlock_restrict_self
-447 common memfd_secret sys_memfd_secret sys_memfd_secret
-448 common process_mrelease sys_process_mrelease sys_process_mrelease
-449 common futex_waitv sys_futex_waitv sys_futex_waitv
-450 common set_mempolicy_home_node sys_set_mempolicy_home_node sys_set_mempolicy_home_node
-451 common cachestat sys_cachestat sys_cachestat
-452 common fchmodat2 sys_fchmodat2 sys_fchmodat2
-453 common map_shadow_stack sys_map_shadow_stack sys_map_shadow_stack
-454 common futex_wake sys_futex_wake sys_futex_wake
-455 common futex_wait sys_futex_wait sys_futex_wait
-456 common futex_requeue sys_futex_requeue sys_futex_requeue
-457 common statmount sys_statmount sys_statmount
-458 common listmount sys_listmount sys_listmount
-459 common lsm_get_self_attr sys_lsm_get_self_attr sys_lsm_get_self_attr
-460 common lsm_set_self_attr sys_lsm_set_self_attr sys_lsm_set_self_attr
-461 common lsm_list_modules sys_lsm_list_modules sys_lsm_list_modules
-462 common mseal sys_mseal sys_mseal
-463 common setxattrat sys_setxattrat sys_setxattrat
-464 common getxattrat sys_getxattrat sys_getxattrat
-465 common listxattrat sys_listxattrat sys_listxattrat
-466 common removexattrat sys_removexattrat sys_removexattrat
-467 common open_tree_attr sys_open_tree_attr sys_open_tree_attr
-468 common file_getattr sys_file_getattr sys_file_getattr
-469 common file_setattr sys_file_setattr sys_file_setattr
-470 common listns sys_listns sys_listns
+392 common semtimedop sys_semtimedop
+393 common semget sys_semget
+394 common semctl sys_semctl
+395 common shmget sys_shmget
+396 common shmctl sys_shmctl
+397 common shmat sys_shmat
+398 common shmdt sys_shmdt
+399 common msgget sys_msgget
+400 common msgsnd sys_msgsnd
+401 common msgrcv sys_msgrcv
+402 common msgctl sys_msgctl
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
+441 common epoll_pwait2 sys_epoll_pwait2
+442 common mount_setattr sys_mount_setattr
+443 common quotactl_fd sys_quotactl_fd
+444 common landlock_create_ruleset sys_landlock_create_ruleset
+445 common landlock_add_rule sys_landlock_add_rule
+446 common landlock_restrict_self sys_landlock_restrict_self
+447 common memfd_secret sys_memfd_secret
+448 common process_mrelease sys_process_mrelease
+449 common futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
+451 common cachestat sys_cachestat
+452 common fchmodat2 sys_fchmodat2
+453 common map_shadow_stack sys_map_shadow_stack
+454 common futex_wake sys_futex_wake
+455 common futex_wait sys_futex_wait
+456 common futex_requeue sys_futex_requeue
+457 common statmount sys_statmount
+458 common listmount sys_listmount
+459 common lsm_get_self_attr sys_lsm_get_self_attr
+460 common lsm_set_self_attr sys_lsm_set_self_attr
+461 common lsm_list_modules sys_lsm_list_modules
+462 common mseal sys_mseal
+463 common setxattrat sys_setxattrat
+464 common getxattrat sys_getxattrat
+465 common listxattrat sys_listxattrat
+466 common removexattrat sys_removexattrat
+467 common open_tree_attr sys_open_tree_attr
+468 common file_getattr sys_file_getattr
+469 common file_setattr sys_file_setattr
+470 common listns sys_listns
+471 common rseq_slice_yield sys_rseq_slice_yield
diff --git a/tools/perf/arch/sh/entry/syscalls/syscall.tbl b/tools/perf/arch/sh/entry/syscalls/syscall.tbl
index 969c11325ade..70b315cbe710 100644
--- a/tools/perf/arch/sh/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/sh/entry/syscalls/syscall.tbl
@@ -474,3 +474,4 @@
468 common file_getattr sys_file_getattr
469 common file_setattr sys_file_setattr
470 common listns sys_listns
+471 common rseq_slice_yield sys_rseq_slice_yield
diff --git a/tools/perf/arch/sparc/entry/syscalls/syscall.tbl b/tools/perf/arch/sparc/entry/syscalls/syscall.tbl
index 39aa26b6a50b..7e71bf7fcd14 100644
--- a/tools/perf/arch/sparc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/sparc/entry/syscalls/syscall.tbl
@@ -480,7 +480,7 @@
432 common fsmount sys_fsmount
433 common fspick sys_fspick
434 common pidfd_open sys_pidfd_open
-# 435 reserved for clone3
+435 common clone3 __sys_clone3
436 common close_range sys_close_range
437 common openat2 sys_openat2
438 common pidfd_getfd sys_pidfd_getfd
@@ -516,3 +516,4 @@
468 common file_getattr sys_file_getattr
469 common file_setattr sys_file_setattr
470 common listns sys_listns
+471 common rseq_slice_yield sys_rseq_slice_yield
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
index e979a3eac7a3..f832ebd2d79b 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_32.tbl
@@ -476,3 +476,4 @@
468 i386 file_getattr sys_file_getattr
469 i386 file_setattr sys_file_setattr
470 i386 listns sys_listns
+471 i386 rseq_slice_yield sys_rseq_slice_yield
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 8a4ac4841be6..524155d655da 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -395,6 +395,7 @@
468 common file_getattr sys_file_getattr
469 common file_setattr sys_file_setattr
470 common listns sys_listns
+471 common rseq_slice_yield sys_rseq_slice_yield
#
# Due to a historical design error, certain syscalls are numbered differently
diff --git a/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl b/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
index 438a3b170402..a9bca4e484de 100644
--- a/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/xtensa/entry/syscalls/syscall.tbl
@@ -441,3 +441,4 @@
468 common file_getattr sys_file_getattr
469 common file_setattr sys_file_setattr
470 common listns sys_listns
+471 common rseq_slice_yield sys_rseq_slice_yield
diff --git a/tools/perf/builtin-ftrace.c b/tools/perf/builtin-ftrace.c
index 6b6eec65f93f..4cc33452d79b 100644
--- a/tools/perf/builtin-ftrace.c
+++ b/tools/perf/builtin-ftrace.c
@@ -18,6 +18,7 @@
#include <poll.h>
#include <ctype.h>
#include <linux/capability.h>
+#include <linux/err.h>
#include <linux/string.h>
#include <sys/stat.h>
@@ -1209,8 +1210,12 @@ static int prepare_func_profile(struct perf_ftrace *ftrace)
ftrace->graph_verbose = 0;
ftrace->profile_hash = hashmap__new(profile_hash, profile_equal, NULL);
- if (ftrace->profile_hash == NULL)
- return -ENOMEM;
+ if (IS_ERR(ftrace->profile_hash)) {
+ int err = PTR_ERR(ftrace->profile_hash);
+
+ ftrace->profile_hash = NULL;
+ return err;
+ }
return 0;
}
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
index 63c65788d442..dc5f94862a3b 100644
--- a/tools/perf/pmu-events/Build
+++ b/tools/perf/pmu-events/Build
@@ -214,7 +214,8 @@ ifneq ($(strip $(ORPHAN_FILES)),)
quiet_cmd_rm = RM $^
prune_orphans: $(ORPHAN_FILES)
- $(Q)$(call echo-cmd,rm)rm -f $^
+ # The list of files can be long. Use xargs to prevent issues.
+ $(Q)$(call echo-cmd,rm)echo "$^" | xargs rm -f
JEVENTS_DEPS += prune_orphans
endif
diff --git a/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h b/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h
index 6e1d5b955aae..85253fc8e384 100644
--- a/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h
+++ b/tools/perf/trace/beauty/arch/x86/include/asm/irq_vectors.h
@@ -77,6 +77,7 @@
*/
#define IRQ_WORK_VECTOR 0xf6
+/* IRQ vector for PMIs when running a guest with a mediated PMU. */
#define PERF_GUEST_MEDIATED_PMI_VECTOR 0xf5
#define DEFERRED_ERROR_VECTOR 0xf4
diff --git a/tools/perf/trace/beauty/include/uapi/linux/fs.h b/tools/perf/trace/beauty/include/uapi/linux/fs.h
index 66ca526cf786..70b2b661f42c 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/fs.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/fs.h
@@ -253,6 +253,7 @@ struct file_attr {
#define FS_XFLAG_FILESTREAM 0x00004000 /* use filestream allocator */
#define FS_XFLAG_DAX 0x00008000 /* use DAX for IO */
#define FS_XFLAG_COWEXTSIZE 0x00010000 /* CoW extent size allocator hint */
+#define FS_XFLAG_VERITY 0x00020000 /* fs-verity enabled */
#define FS_XFLAG_HASATTR 0x80000000 /* no DIFLAG for this */
/* the read-only stuff doesn't really belong here, but any other place is
diff --git a/tools/perf/trace/beauty/include/uapi/linux/mount.h b/tools/perf/trace/beauty/include/uapi/linux/mount.h
index 5d3f8c9e3a62..d9d86598d100 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/mount.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/mount.h
@@ -61,7 +61,8 @@
/*
* open_tree() flags.
*/
-#define OPEN_TREE_CLONE 1 /* Clone the target tree and attach the clone */
+#define OPEN_TREE_CLONE (1 << 0) /* Clone the target tree and attach the clone */
+#define OPEN_TREE_NAMESPACE (1 << 1) /* Clone the target tree into a new mount namespace */
#define OPEN_TREE_CLOEXEC O_CLOEXEC /* Close the file on execve() */
/*
@@ -197,7 +198,10 @@ struct statmount {
*/
struct mnt_id_req {
__u32 size;
- __u32 mnt_ns_fd;
+ union {
+ __u32 mnt_ns_fd;
+ __u32 mnt_fd;
+ };
__u64 mnt_id;
__u64 param;
__u64 mnt_ns_id;
@@ -232,4 +236,9 @@ struct mnt_id_req {
#define LSMT_ROOT 0xffffffffffffffff /* root mount */
#define LISTMOUNT_REVERSE (1 << 0) /* List later mounts first */
+/*
+ * @flag bits for statmount(2)
+ */
+#define STATMOUNT_BY_FD 0x00000001U /* want mountinfo for given fd */
+
#endif /* _UAPI_LINUX_MOUNT_H */
diff --git a/tools/perf/trace/beauty/include/uapi/linux/prctl.h b/tools/perf/trace/beauty/include/uapi/linux/prctl.h
index 51c4e8c82b1e..55b0446fff9d 100644
--- a/tools/perf/trace/beauty/include/uapi/linux/prctl.h
+++ b/tools/perf/trace/beauty/include/uapi/linux/prctl.h
@@ -386,4 +386,41 @@ struct prctl_mm_map {
# define PR_FUTEX_HASH_SET_SLOTS 1
# define PR_FUTEX_HASH_GET_SLOTS 2
+/* RSEQ time slice extensions */
+#define PR_RSEQ_SLICE_EXTENSION 79
+# define PR_RSEQ_SLICE_EXTENSION_GET 1
+# define PR_RSEQ_SLICE_EXTENSION_SET 2
+/*
+ * Bits for RSEQ_SLICE_EXTENSION_GET/SET
+ * PR_RSEQ_SLICE_EXT_ENABLE: Enable
+ */
+# define PR_RSEQ_SLICE_EXT_ENABLE 0x01
+
+/*
+ * Get the current indirect branch tracking configuration for the current
+ * thread, this will be the value configured via PR_SET_INDIR_BR_LP_STATUS.
+ */
+#define PR_GET_INDIR_BR_LP_STATUS 80
+
+/*
+ * Set the indirect branch tracking configuration. PR_INDIR_BR_LP_ENABLE will
+ * enable cpu feature for user thread, to track all indirect branches and ensure
+ * they land on arch defined landing pad instruction.
+ * x86 - If enabled, an indirect branch must land on an ENDBRANCH instruction.
+ * arch64 - If enabled, an indirect branch must land on a BTI instruction.
+ * riscv - If enabled, an indirect branch must land on an lpad instruction.
+ * PR_INDIR_BR_LP_DISABLE will disable feature for user thread and indirect
+ * branches will no more be tracked by cpu to land on arch defined landing pad
+ * instruction.
+ */
+#define PR_SET_INDIR_BR_LP_STATUS 81
+# define PR_INDIR_BR_LP_ENABLE (1UL << 0)
+
+/*
+ * Prevent further changes to the specified indirect branch tracking
+ * configuration. All bits may be locked via this call, including
+ * undefined bits.
+ */
+#define PR_LOCK_INDIR_BR_LP_STATUS 82
+
#endif /* _LINUX_PRCTL_H */
diff --git a/tools/perf/util/annotate-arch/annotate-loongarch.c b/tools/perf/util/annotate-arch/annotate-loongarch.c
index 3aeab453a059..950f34e59e5c 100644
--- a/tools/perf/util/annotate-arch/annotate-loongarch.c
+++ b/tools/perf/util/annotate-arch/annotate-loongarch.c
@@ -93,7 +93,7 @@ static int loongarch_jump__parse(const struct arch *arch, struct ins_operands *o
start = map__unmap_ip(map, sym->start);
end = map__unmap_ip(map, sym->end);
- ops->target.outside = target.addr < start || target.addr > end;
+ ops->target.outside = target.addr < start || target.addr >= end;
if (maps__find_ams(thread__maps(ms->thread), &target) == 0 &&
map__rip_2objdump(target.ms.map, map__map_ip(target.ms.map, target.addr)) == ops->target.addr)
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 2e3522905046..63f0ee9d4c03 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -44,6 +44,7 @@
#include "strbuf.h"
#include <regex.h>
#include <linux/bitops.h>
+#include <linux/err.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/zalloc.h>
@@ -137,8 +138,10 @@ static int annotated_source__alloc_histograms(struct annotated_source *src,
return -1;
src->samples = hashmap__new(sym_hist_hash, sym_hist_equal, NULL);
- if (src->samples == NULL)
+ if (IS_ERR(src->samples)) {
zfree(&src->histograms);
+ src->samples = NULL;
+ }
return src->histograms ? 0 : -1;
}
diff --git a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
index 3050fe212666..212f17a3dc72 100644
--- a/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
+++ b/tools/perf/util/cs-etm-decoder/cs-etm-decoder.c
@@ -549,7 +549,7 @@ cs_etm_decoder__set_tid(struct cs_etm_queue *etmq,
/*
* Process the PE_CONTEXT packets if we have a valid contextID or VMID.
* If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2
- * as VMID, Bit ETM_OPT_CTXTID2 is set in this case.
+ * as VMID, Format attribute 'contextid2' is set in this case.
*/
switch (cs_etm__get_pid_fmt(etmq)) {
case CS_ETM_PIDFMT_CTXTID:
diff --git a/tools/perf/util/cs-etm.c b/tools/perf/util/cs-etm.c
index 95f439c96180..8a639d2e51a4 100644
--- a/tools/perf/util/cs-etm.c
+++ b/tools/perf/util/cs-etm.c
@@ -194,7 +194,7 @@ int cs_etm__get_cpu(struct cs_etm_queue *etmq, u8 trace_chan_id, int *cpu)
* CS_ETM_PIDFMT_CTXTID2: CONTEXTIDR_EL2 is traced.
* CS_ETM_PIDFMT_NONE: No context IDs
*
- * It's possible that the two bits ETM_OPT_CTXTID and ETM_OPT_CTXTID2
+ * It's possible that the two format attributes 'contextid1' and 'contextid2'
* are enabled at the same time when the session runs on an EL2 kernel.
* This means the CONTEXTIDR_EL1 and CONTEXTIDR_EL2 both will be
* recorded in the trace data, the tool will selectively use
@@ -210,15 +210,15 @@ static enum cs_etm_pid_fmt cs_etm__init_pid_fmt(u64 *metadata)
if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
val = metadata[CS_ETM_ETMCR];
/* CONTEXTIDR is traced */
- if (val & BIT(ETM_OPT_CTXTID))
+ if (val & ETMCR_CTXTID)
return CS_ETM_PIDFMT_CTXTID;
} else {
val = metadata[CS_ETMV4_TRCCONFIGR];
/* CONTEXTIDR_EL2 is traced */
- if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT)))
+ if (val & (TRCCONFIGR_VMID | TRCCONFIGR_VMIDOPT))
return CS_ETM_PIDFMT_CTXTID2;
/* CONTEXTIDR_EL1 is traced */
- else if (val & BIT(ETM4_CFG_BIT_CTXTID))
+ else if (val & TRCCONFIGR_CID)
return CS_ETM_PIDFMT_CTXTID;
}
@@ -2914,29 +2914,21 @@ static int cs_etm__process_auxtrace_event(struct perf_session *session,
return 0;
}
-static int cs_etm__setup_timeless_decoding(struct cs_etm_auxtrace *etm)
+static void cs_etm__setup_timeless_decoding(struct cs_etm_auxtrace *etm)
{
- struct evsel *evsel;
- struct evlist *evlist = etm->session->evlist;
+ /* Take first ETM as all options will be the same for all ETMs */
+ u64 *metadata = etm->metadata[0];
/* Override timeless mode with user input from --itrace=Z */
if (etm->synth_opts.timeless_decoding) {
etm->timeless_decoding = true;
- return 0;
+ return;
}
- /*
- * Find the cs_etm evsel and look at what its timestamp setting was
- */
- evlist__for_each_entry(evlist, evsel)
- if (cs_etm__evsel_is_auxtrace(etm->session, evsel)) {
- etm->timeless_decoding =
- !(evsel->core.attr.config & BIT(ETM_OPT_TS));
- return 0;
- }
-
- pr_err("CS ETM: Couldn't find ETM evsel\n");
- return -EINVAL;
+ if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic)
+ etm->timeless_decoding = !(metadata[CS_ETM_ETMCR] & ETMCR_TIMESTAMP_EN);
+ else
+ etm->timeless_decoding = !(metadata[CS_ETMV4_TRCCONFIGR] & TRCCONFIGR_TS);
}
/*
@@ -3499,9 +3491,7 @@ int cs_etm__process_auxtrace_info_full(union perf_event *event,
etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
session->auxtrace = &etm->auxtrace;
- err = cs_etm__setup_timeless_decoding(etm);
- if (err)
- return err;
+ cs_etm__setup_timeless_decoding(etm);
etm->tc.time_shift = tc->time_shift;
etm->tc.time_mult = tc->time_mult;
diff --git a/tools/perf/util/cs-etm.h b/tools/perf/util/cs-etm.h
index a8caeea720aa..aa9bb4a32eca 100644
--- a/tools/perf/util/cs-etm.h
+++ b/tools/perf/util/cs-etm.h
@@ -230,6 +230,21 @@ struct cs_etm_packet_queue {
/* CoreSight trace ID is currently the bottom 7 bits of the value */
#define CORESIGHT_TRACE_ID_VAL_MASK GENMASK(6, 0)
+/* ETMv4 CONFIGR register bits */
+#define TRCCONFIGR_BB BIT(3)
+#define TRCCONFIGR_CCI BIT(4)
+#define TRCCONFIGR_CID BIT(6)
+#define TRCCONFIGR_VMID BIT(7)
+#define TRCCONFIGR_TS BIT(11)
+#define TRCCONFIGR_RS BIT(12)
+#define TRCCONFIGR_VMIDOPT BIT(15)
+
+/* ETMv3 ETMCR register bits */
+#define ETMCR_CYC_ACC BIT(12)
+#define ETMCR_CTXTID BIT(14)
+#define ETMCR_TIMESTAMP_EN BIT(28)
+#define ETMCR_RETURN_STACK BIT(29)
+
int cs_etm__process_auxtrace_info(union perf_event *event,
struct perf_session *session);
void cs_etm_get_default_config(const struct perf_pmu *pmu, struct perf_event_attr *attr);
diff --git a/tools/perf/util/disasm.c b/tools/perf/util/disasm.c
index ddcc488f2e5f..9e0420e14be1 100644
--- a/tools/perf/util/disasm.c
+++ b/tools/perf/util/disasm.c
@@ -384,7 +384,7 @@ static int jump__parse(const struct arch *arch, struct ins_operands *ops, struct
start = map__unmap_ip(map, sym->start);
end = map__unmap_ip(map, sym->end);
- ops->target.outside = target.addr < start || target.addr > end;
+ ops->target.outside = target.addr < start || target.addr >= end;
/*
* FIXME: things like this in _cpp_lex_token (gcc's cc1 program):
diff --git a/tools/perf/util/synthetic-events.c b/tools/perf/util/synthetic-events.c
index ef79433ebc3a..ddf1cbda1902 100644
--- a/tools/perf/util/synthetic-events.c
+++ b/tools/perf/util/synthetic-events.c
@@ -703,6 +703,11 @@ static int perf_event__synthesize_modules_maps_cb(struct map *map, void *data)
memcpy(event->mmap2.filename, dso__long_name(dso), dso__long_name_len(dso) + 1);
+ /* Clear stale build ID from previous module iteration */
+ event->mmap2.header.misc &= ~PERF_RECORD_MISC_MMAP_BUILD_ID;
+ memset(event->mmap2.build_id, 0, sizeof(event->mmap2.build_id));
+ event->mmap2.build_id_size = 0;
+
perf_record_mmap2__read_build_id(&event->mmap2, args->machine, false);
} else {
size = PERF_ALIGN(dso__long_name_len(dso) + 1, sizeof(u64));
diff --git a/tools/power/cpupower/cpupower-service.conf b/tools/power/cpupower/cpupower-service.conf
index 02eabe8e3614..abbb46967565 100644
--- a/tools/power/cpupower/cpupower-service.conf
+++ b/tools/power/cpupower/cpupower-service.conf
@@ -30,3 +30,8 @@
# its policy for the relative importance of performance versus energy savings to
# the processor. See man CPUPOWER-SET(1) for additional details
#PERF_BIAS=
+
+# Set the Energy Performance Preference
+# Available options can be read from
+# /sys/devices/system/cpu/cpufreq/policy0/energy_performance_available_preferences
+#EPP=
diff --git a/tools/power/cpupower/cpupower.sh b/tools/power/cpupower/cpupower.sh
index a37dd4cfdb2b..6283e8bf275d 100644
--- a/tools/power/cpupower/cpupower.sh
+++ b/tools/power/cpupower/cpupower.sh
@@ -23,4 +23,10 @@ then
cpupower set -b "$PERF_BIAS" > /dev/null || ESTATUS=1
fi
+# apply Energy Performance Preference
+if test -n "$EPP"
+then
+ cpupower set -e "$EPP" > /dev/null || ESTATUS=1
+fi
+
exit $ESTATUS
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
index c2117e5650dd..550a942e72ce 100644
--- a/tools/power/cpupower/utils/cpupower-set.c
+++ b/tools/power/cpupower/utils/cpupower-set.c
@@ -124,7 +124,11 @@ int cmd_set(int argc, char **argv)
}
if (params.turbo_boost) {
- ret = cpupower_set_turbo_boost(turbo_boost);
+ if (cpupower_cpu_info.vendor == X86_VENDOR_INTEL)
+ ret = cpupower_set_intel_turbo_boost(turbo_boost);
+ else
+ ret = cpupower_set_generic_turbo_boost(turbo_boost);
+
if (ret)
fprintf(stderr, "Error setting turbo-boost\n");
}
diff --git a/tools/power/cpupower/utils/helpers/helpers.h b/tools/power/cpupower/utils/helpers/helpers.h
index 82ea62bdf5a2..a3ad80b9c2c2 100644
--- a/tools/power/cpupower/utils/helpers/helpers.h
+++ b/tools/power/cpupower/utils/helpers/helpers.h
@@ -104,7 +104,7 @@ extern struct cpupower_cpu_info cpupower_cpu_info;
/* cpuid and cpuinfo helpers **************************/
int cpufreq_has_generic_boost_support(bool *active);
-int cpupower_set_turbo_boost(int turbo_boost);
+int cpupower_set_generic_turbo_boost(int turbo_boost);
/* X86 ONLY ****************************************/
#if defined(__i386__) || defined(__x86_64__)
@@ -143,6 +143,7 @@ extern int decode_pstates(unsigned int cpu, int boost_states,
int cpufreq_has_x86_boost_support(unsigned int cpu, int *support,
int *active, int *states);
+int cpupower_set_intel_turbo_boost(int turbo_boost);
/* AMD P-State stuff **************************/
bool cpupower_amd_pstate_enabled(void);
@@ -189,6 +190,8 @@ static inline int cpupower_set_amd_pstate_mode(char *mode)
static inline int cpufreq_has_x86_boost_support(unsigned int cpu, int *support,
int *active, int *states)
{ return -1; }
+static inline int cpupower_set_intel_turbo_boost(int turbo_boost)
+{ return -1; }
static inline bool cpupower_amd_pstate_enabled(void)
{ return false; }
diff --git a/tools/power/cpupower/utils/helpers/misc.c b/tools/power/cpupower/utils/helpers/misc.c
index 166dc1e470ea..eebfc79a4889 100644
--- a/tools/power/cpupower/utils/helpers/misc.c
+++ b/tools/power/cpupower/utils/helpers/misc.c
@@ -19,6 +19,9 @@ int cpufreq_has_x86_boost_support(unsigned int cpu, int *support, int *active,
{
int ret;
unsigned long long val;
+ char linebuf[MAX_LINE_LEN];
+ char path[SYSFS_PATH_MAX];
+ char *endp;
*support = *active = *states = 0;
@@ -42,8 +45,42 @@ int cpufreq_has_x86_boost_support(unsigned int cpu, int *support, int *active,
}
} else if (cpupower_cpu_info.caps & CPUPOWER_CAP_AMD_PSTATE) {
amd_pstate_boost_init(cpu, support, active);
- } else if (cpupower_cpu_info.caps & CPUPOWER_CAP_INTEL_IDA)
+ } else if (cpupower_cpu_info.caps & CPUPOWER_CAP_INTEL_IDA) {
*support = *active = 1;
+
+ snprintf(path, sizeof(path), PATH_TO_CPU "intel_pstate/no_turbo");
+
+ if (!is_valid_path(path))
+ return 0;
+
+ if (cpupower_read_sysfs(path, linebuf, MAX_LINE_LEN) == 0)
+ return -1;
+
+ val = strtol(linebuf, &endp, 0);
+ if (endp == linebuf || errno == ERANGE)
+ return -1;
+
+ *active = !val;
+ }
+ return 0;
+}
+
+int cpupower_set_intel_turbo_boost(int turbo_boost)
+{
+ char path[SYSFS_PATH_MAX];
+ char linebuf[2] = {};
+
+ snprintf(path, sizeof(path), PATH_TO_CPU "intel_pstate/no_turbo");
+
+ /* Fallback to generic solution when intel_pstate driver not running */
+ if (!is_valid_path(path))
+ return cpupower_set_generic_turbo_boost(turbo_boost);
+
+ snprintf(linebuf, sizeof(linebuf), "%d", !turbo_boost);
+
+ if (cpupower_write_sysfs(path, linebuf, 2) <= 0)
+ return -1;
+
return 0;
}
@@ -274,7 +311,7 @@ void print_speed(unsigned long speed, int no_rounding)
}
}
-int cpupower_set_turbo_boost(int turbo_boost)
+int cpupower_set_generic_turbo_boost(int turbo_boost)
{
char path[SYSFS_PATH_MAX];
char linebuf[2] = {};
diff --git a/tools/power/cpupower/utils/powercap-info.c b/tools/power/cpupower/utils/powercap-info.c
index 3ea4486f1a0e..e53033488218 100644
--- a/tools/power/cpupower/utils/powercap-info.c
+++ b/tools/power/cpupower/utils/powercap-info.c
@@ -38,11 +38,11 @@ static int powercap_print_one_zone(struct powercap_zone *zone)
printf(" (%s)\n", mode ? "enabled" : "disabled");
if (zone->has_power_uw)
- printf(_("%sPower can be monitored in micro Jules\n"),
+ printf(_("%sPower can be monitored in micro Watts\n"),
pr_prefix);
if (zone->has_energy_uj)
- printf(_("%sPower can be monitored in micro Watts\n"),
+ printf(_("%sPower can be monitored in micro Jules\n"),
pr_prefix);
printf("\n");
diff --git a/tools/scripts/syscall.tbl b/tools/scripts/syscall.tbl
index e74868be513c..7a42b32b6577 100644
--- a/tools/scripts/syscall.tbl
+++ b/tools/scripts/syscall.tbl
@@ -411,3 +411,4 @@
468 common file_getattr sys_file_getattr
469 common file_setattr sys_file_setattr
470 common listns sys_listns
+471 common rseq_slice_yield sys_rseq_slice_yield
diff --git a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
index 80ab60905865..cdca912f3afd 100644
--- a/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
+++ b/tools/testing/selftests/hid/progs/hid_bpf_helpers.h
@@ -6,8 +6,10 @@
#define __HID_BPF_HELPERS_H
/* "undefine" structs and enums in vmlinux.h, because we "override" them below */
+#define bpf_wq bpf_wq___not_used
#define hid_bpf_ctx hid_bpf_ctx___not_used
#define hid_bpf_ops hid_bpf_ops___not_used
+#define hid_device hid_device___not_used
#define hid_report_type hid_report_type___not_used
#define hid_class_request hid_class_request___not_used
#define hid_bpf_attach_flags hid_bpf_attach_flags___not_used
@@ -27,8 +29,10 @@
#include "vmlinux.h"
+#undef bpf_wq
#undef hid_bpf_ctx
#undef hid_bpf_ops
+#undef hid_device
#undef hid_report_type
#undef hid_class_request
#undef hid_bpf_attach_flags
@@ -55,6 +59,14 @@ enum hid_report_type {
HID_REPORT_TYPES,
};
+struct hid_device {
+ unsigned int id;
+} __attribute__((preserve_access_index));
+
+struct bpf_wq {
+ __u64 __opaque[2];
+};
+
struct hid_bpf_ctx {
struct hid_device *hid;
__u32 allocated_size;
diff --git a/tools/testing/selftests/kvm/Makefile.kvm b/tools/testing/selftests/kvm/Makefile.kvm
index fdec90e85467..dc68371f76a3 100644
--- a/tools/testing/selftests/kvm/Makefile.kvm
+++ b/tools/testing/selftests/kvm/Makefile.kvm
@@ -71,6 +71,7 @@ TEST_GEN_PROGS_x86 += x86/cpuid_test
TEST_GEN_PROGS_x86 += x86/cr4_cpuid_sync_test
TEST_GEN_PROGS_x86 += x86/dirty_log_page_splitting_test
TEST_GEN_PROGS_x86 += x86/feature_msrs_test
+TEST_GEN_PROGS_x86 += x86/evmcs_smm_controls_test
TEST_GEN_PROGS_x86 += x86/exit_on_emulation_failure_test
TEST_GEN_PROGS_x86 += x86/fastops_test
TEST_GEN_PROGS_x86 += x86/fix_hypercall_test
diff --git a/tools/testing/selftests/kvm/guest_memfd_test.c b/tools/testing/selftests/kvm/guest_memfd_test.c
index 618c937f3c90..cc329b57ce2e 100644
--- a/tools/testing/selftests/kvm/guest_memfd_test.c
+++ b/tools/testing/selftests/kvm/guest_memfd_test.c
@@ -80,7 +80,7 @@ static void test_mbind(int fd, size_t total_size)
{
const unsigned long nodemask_0 = 1; /* nid: 0 */
unsigned long nodemask = 0;
- unsigned long maxnode = 8;
+ unsigned long maxnode = BITS_PER_TYPE(nodemask);
int policy;
char *mem;
int ret;
diff --git a/tools/testing/selftests/kvm/include/x86/processor.h b/tools/testing/selftests/kvm/include/x86/processor.h
index 4ebae4269e68..469a22122157 100644
--- a/tools/testing/selftests/kvm/include/x86/processor.h
+++ b/tools/testing/selftests/kvm/include/x86/processor.h
@@ -557,6 +557,11 @@ static inline uint64_t get_cr0(void)
return cr0;
}
+static inline void set_cr0(uint64_t val)
+{
+ __asm__ __volatile__("mov %0, %%cr0" : : "r" (val) : "memory");
+}
+
static inline uint64_t get_cr3(void)
{
uint64_t cr3;
@@ -566,6 +571,11 @@ static inline uint64_t get_cr3(void)
return cr3;
}
+static inline void set_cr3(uint64_t val)
+{
+ __asm__ __volatile__("mov %0, %%cr3" : : "r" (val) : "memory");
+}
+
static inline uint64_t get_cr4(void)
{
uint64_t cr4;
@@ -580,6 +590,19 @@ static inline void set_cr4(uint64_t val)
__asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
}
+static inline uint64_t get_cr8(void)
+{
+ uint64_t cr8;
+
+ __asm__ __volatile__("mov %%cr8, %[cr8]" : [cr8]"=r"(cr8));
+ return cr8;
+}
+
+static inline void set_cr8(uint64_t val)
+{
+ __asm__ __volatile__("mov %0, %%cr8" : : "r" (val) : "memory");
+}
+
static inline void set_idt(const struct desc_ptr *idt_desc)
{
__asm__ __volatile__("lidt %0"::"m"(*idt_desc));
diff --git a/tools/testing/selftests/kvm/include/x86/smm.h b/tools/testing/selftests/kvm/include/x86/smm.h
new file mode 100644
index 000000000000..19337c34f13e
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/x86/smm.h
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#ifndef SELFTEST_KVM_SMM_H
+#define SELFTEST_KVM_SMM_H
+
+#include "kvm_util.h"
+
+#define SMRAM_SIZE 65536
+#define SMRAM_MEMSLOT ((1 << 16) | 1)
+#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
+
+void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
+ uint64_t smram_gpa,
+ const void *smi_handler, size_t handler_size);
+
+void inject_smi(struct kvm_vcpu *vcpu);
+
+#endif /* SELFTEST_KVM_SMM_H */
diff --git a/tools/testing/selftests/kvm/lib/x86/processor.c b/tools/testing/selftests/kvm/lib/x86/processor.c
index fab18e9be66c..23a44941e283 100644
--- a/tools/testing/selftests/kvm/lib/x86/processor.c
+++ b/tools/testing/selftests/kvm/lib/x86/processor.c
@@ -8,6 +8,7 @@
#include "kvm_util.h"
#include "pmu.h"
#include "processor.h"
+#include "smm.h"
#include "svm_util.h"
#include "sev.h"
#include "vmx.h"
@@ -1444,3 +1445,28 @@ bool kvm_arch_has_default_irqchip(void)
{
return true;
}
+
+void setup_smram(struct kvm_vm *vm, struct kvm_vcpu *vcpu,
+ uint64_t smram_gpa,
+ const void *smi_handler, size_t handler_size)
+{
+ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, smram_gpa,
+ SMRAM_MEMSLOT, SMRAM_PAGES, 0);
+ TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, smram_gpa,
+ SMRAM_MEMSLOT) == smram_gpa,
+ "Could not allocate guest physical addresses for SMRAM");
+
+ memset(addr_gpa2hva(vm, smram_gpa), 0x0, SMRAM_SIZE);
+ memcpy(addr_gpa2hva(vm, smram_gpa) + 0x8000, smi_handler, handler_size);
+ vcpu_set_msr(vcpu, MSR_IA32_SMBASE, smram_gpa);
+}
+
+void inject_smi(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu_events events;
+
+ vcpu_events_get(vcpu, &events);
+ events.smi.pending = 1;
+ events.flags |= KVM_VCPUEVENT_VALID_SMM;
+ vcpu_events_set(vcpu, &events);
+}
diff --git a/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c b/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c
new file mode 100644
index 000000000000..af7c90103396
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86/evmcs_smm_controls_test.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2026, Red Hat, Inc.
+ *
+ * Test that vmx_leave_smm() validates vmcs12 controls before re-entering
+ * nested guest mode on RSM.
+ */
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/ioctl.h>
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "smm.h"
+#include "hyperv.h"
+#include "vmx.h"
+
+#define SMRAM_GPA 0x1000000
+#define SMRAM_STAGE 0xfe
+
+#define SYNC_PORT 0xe
+
+#define STR(x) #x
+#define XSTR(s) STR(s)
+
+/*
+ * SMI handler: runs in real-address mode.
+ * Reports SMRAM_STAGE via port IO, then does RSM.
+ */
+static uint8_t smi_handler[] = {
+ 0xb0, SMRAM_STAGE, /* mov $SMRAM_STAGE, %al */
+ 0xe4, SYNC_PORT, /* in $SYNC_PORT, %al */
+ 0x0f, 0xaa, /* rsm */
+};
+
+static inline void sync_with_host(uint64_t phase)
+{
+ asm volatile("in $" XSTR(SYNC_PORT) ", %%al \n"
+ : "+a" (phase));
+}
+
+static void l2_guest_code(void)
+{
+ sync_with_host(1);
+
+ /* After SMI+RSM with invalid controls, we should not reach here. */
+ vmcall();
+}
+
+static void guest_code(struct vmx_pages *vmx_pages,
+ struct hyperv_test_pages *hv_pages)
+{
+#define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+
+ /* Set up Hyper-V enlightenments and eVMCS */
+ wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
+ enable_vp_assist(hv_pages->vp_assist_gpa, hv_pages->vp_assist);
+ evmcs_enable();
+
+ GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
+ GUEST_ASSERT(load_evmcs(hv_pages));
+ prepare_vmcs(vmx_pages, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ GUEST_ASSERT(!vmlaunch());
+
+ /* L2 exits via vmcall if test fails */
+ sync_with_host(2);
+}
+
+int main(int argc, char *argv[])
+{
+ vm_vaddr_t vmx_pages_gva = 0, hv_pages_gva = 0;
+ struct hyperv_test_pages *hv;
+ struct hv_enlightened_vmcs *evmcs;
+ struct kvm_vcpu *vcpu;
+ struct kvm_vm *vm;
+ struct kvm_regs regs;
+ int stage_reported;
+
+ TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE));
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS));
+ TEST_REQUIRE(kvm_has_cap(KVM_CAP_X86_SMM));
+
+ vm = vm_create_with_one_vcpu(&vcpu, guest_code);
+
+ setup_smram(vm, vcpu, SMRAM_GPA, smi_handler, sizeof(smi_handler));
+
+ vcpu_set_hv_cpuid(vcpu);
+ vcpu_enable_evmcs(vcpu);
+ vcpu_alloc_vmx(vm, &vmx_pages_gva);
+ hv = vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva);
+ vcpu_args_set(vcpu, 2, vmx_pages_gva, hv_pages_gva);
+
+ vcpu_run(vcpu);
+
+ /* L2 is running and syncs with host. */
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
+ vcpu_regs_get(vcpu, &regs);
+ stage_reported = regs.rax & 0xff;
+ TEST_ASSERT(stage_reported == 1,
+ "Expected stage 1, got %d", stage_reported);
+
+ /* Inject SMI while L2 is running. */
+ inject_smi(vcpu);
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
+ vcpu_regs_get(vcpu, &regs);
+ stage_reported = regs.rax & 0xff;
+ TEST_ASSERT(stage_reported == SMRAM_STAGE,
+ "Expected SMM handler stage %#x, got %#x",
+ SMRAM_STAGE, stage_reported);
+
+ /*
+ * Guest is now paused in the SMI handler, about to execute RSM.
+ * Hack the eVMCS page to set-up invalid pin-based execution
+ * control (PIN_BASED_VIRTUAL_NMIS without PIN_BASED_NMI_EXITING).
+ */
+ evmcs = hv->enlightened_vmcs_hva;
+ evmcs->pin_based_vm_exec_control |= PIN_BASED_VIRTUAL_NMIS;
+ evmcs->hv_clean_fields = 0;
+
+ /*
+ * Trigger copy_enlightened_to_vmcs12() via KVM_GET_NESTED_STATE,
+ * copying the invalid pin_based_vm_exec_control into cached_vmcs12.
+ */
+ union {
+ struct kvm_nested_state state;
+ char state_[16384];
+ } nested_state_buf;
+
+ memset(&nested_state_buf, 0, sizeof(nested_state_buf));
+ nested_state_buf.state.size = sizeof(nested_state_buf);
+ vcpu_nested_state_get(vcpu, &nested_state_buf.state);
+
+ /*
+ * Resume the guest. The SMI handler executes RSM, which calls
+ * vmx_leave_smm(). nested_vmx_check_controls() should detect
+ * VIRTUAL_NMIS without NMI_EXITING and cause a triple fault.
+ */
+ vcpu_run(vcpu);
+ TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN);
+
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/kvm/x86/sev_smoke_test.c b/tools/testing/selftests/kvm/x86/sev_smoke_test.c
index 86ad1c7d068f..8bd37a476f15 100644
--- a/tools/testing/selftests/kvm/x86/sev_smoke_test.c
+++ b/tools/testing/selftests/kvm/x86/sev_smoke_test.c
@@ -13,6 +13,30 @@
#include "linux/psp-sev.h"
#include "sev.h"
+static void guest_sev_test_msr(uint32_t msr)
+{
+ uint64_t val = rdmsr(msr);
+
+ wrmsr(msr, val);
+ GUEST_ASSERT(val == rdmsr(msr));
+}
+
+#define guest_sev_test_reg(reg) \
+do { \
+ uint64_t val = get_##reg(); \
+ \
+ set_##reg(val); \
+ GUEST_ASSERT(val == get_##reg()); \
+} while (0)
+
+static void guest_sev_test_regs(void)
+{
+ guest_sev_test_msr(MSR_EFER);
+ guest_sev_test_reg(cr0);
+ guest_sev_test_reg(cr3);
+ guest_sev_test_reg(cr4);
+ guest_sev_test_reg(cr8);
+}
#define XFEATURE_MASK_X87_AVX (XFEATURE_MASK_FP | XFEATURE_MASK_SSE | XFEATURE_MASK_YMM)
@@ -24,6 +48,8 @@ static void guest_snp_code(void)
GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_ES_ENABLED);
GUEST_ASSERT(sev_msr & MSR_AMD64_SEV_SNP_ENABLED);
+ guest_sev_test_regs();
+
wrmsr(MSR_AMD64_SEV_ES_GHCB, GHCB_MSR_TERM_REQ);
vmgexit();
}
@@ -34,6 +60,8 @@ static void guest_sev_es_code(void)
GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ES_ENABLED);
+ guest_sev_test_regs();
+
/*
* TODO: Add GHCB and ucall support for SEV-ES guests. For now, simply
* force "termination" to signal "done" via the GHCB MSR protocol.
@@ -47,6 +75,8 @@ static void guest_sev_code(void)
GUEST_ASSERT(this_cpu_has(X86_FEATURE_SEV));
GUEST_ASSERT(rdmsr(MSR_AMD64_SEV) & MSR_AMD64_SEV_ENABLED);
+ guest_sev_test_regs();
+
GUEST_DONE();
}
diff --git a/tools/testing/selftests/kvm/x86/smm_test.c b/tools/testing/selftests/kvm/x86/smm_test.c
index 55c88d664a94..ade8412bf94a 100644
--- a/tools/testing/selftests/kvm/x86/smm_test.c
+++ b/tools/testing/selftests/kvm/x86/smm_test.c
@@ -14,13 +14,11 @@
#include "test_util.h"
#include "kvm_util.h"
+#include "smm.h"
#include "vmx.h"
#include "svm_util.h"
-#define SMRAM_SIZE 65536
-#define SMRAM_MEMSLOT ((1 << 16) | 1)
-#define SMRAM_PAGES (SMRAM_SIZE / PAGE_SIZE)
#define SMRAM_GPA 0x1000000
#define SMRAM_STAGE 0xfe
@@ -113,18 +111,6 @@ static void guest_code(void *arg)
sync_with_host(DONE);
}
-void inject_smi(struct kvm_vcpu *vcpu)
-{
- struct kvm_vcpu_events events;
-
- vcpu_events_get(vcpu, &events);
-
- events.smi.pending = 1;
- events.flags |= KVM_VCPUEVENT_VALID_SMM;
-
- vcpu_events_set(vcpu, &events);
-}
-
int main(int argc, char *argv[])
{
vm_vaddr_t nested_gva = 0;
@@ -140,16 +126,7 @@ int main(int argc, char *argv[])
/* Create VM */
vm = vm_create_with_one_vcpu(&vcpu, guest_code);
- vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA,
- SMRAM_MEMSLOT, SMRAM_PAGES, 0);
- TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT)
- == SMRAM_GPA, "could not allocate guest physical addresses?");
-
- memset(addr_gpa2hva(vm, SMRAM_GPA), 0x0, SMRAM_SIZE);
- memcpy(addr_gpa2hva(vm, SMRAM_GPA) + 0x8000, smi_handler,
- sizeof(smi_handler));
-
- vcpu_set_msr(vcpu, MSR_IA32_SMBASE, SMRAM_GPA);
+ setup_smram(vm, vcpu, SMRAM_GPA, smi_handler, sizeof(smi_handler));
if (kvm_has_cap(KVM_CAP_NESTED_STATE)) {
if (kvm_cpu_has(X86_FEATURE_SVM))
diff --git a/tools/testing/selftests/powerpc/copyloops/.gitignore b/tools/testing/selftests/powerpc/copyloops/.gitignore
index 7283e8b07b75..80d4270a71ac 100644
--- a/tools/testing/selftests/powerpc/copyloops/.gitignore
+++ b/tools/testing/selftests/powerpc/copyloops/.gitignore
@@ -2,8 +2,8 @@
copyuser_64_t0
copyuser_64_t1
copyuser_64_t2
-copyuser_p7_t0
-copyuser_p7_t1
+copyuser_p7
+copyuser_p7_vmx
memcpy_64_t0
memcpy_64_t1
memcpy_64_t2
diff --git a/tools/testing/selftests/powerpc/copyloops/Makefile b/tools/testing/selftests/powerpc/copyloops/Makefile
index 42940f92d832..0c8efb0bddeb 100644
--- a/tools/testing/selftests/powerpc/copyloops/Makefile
+++ b/tools/testing/selftests/powerpc/copyloops/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
TEST_GEN_PROGS := copyuser_64_t0 copyuser_64_t1 copyuser_64_t2 \
- copyuser_p7_t0 copyuser_p7_t1 \
+ copyuser_p7 copyuser_p7_vmx \
memcpy_64_t0 memcpy_64_t1 memcpy_64_t2 \
memcpy_p7_t0 memcpy_p7_t1 copy_mc_64 \
copyuser_64_exc_t0 copyuser_64_exc_t1 copyuser_64_exc_t2 \
@@ -28,10 +28,15 @@ $(OUTPUT)/copyuser_64_t%: copyuser_64.S $(EXTRA_SOURCES)
-D SELFTEST_CASE=$(subst copyuser_64_t,,$(notdir $@)) \
-o $@ $^
-$(OUTPUT)/copyuser_p7_t%: copyuser_power7.S $(EXTRA_SOURCES)
+$(OUTPUT)/copyuser_p7: copyuser_power7.S $(EXTRA_SOURCES)
$(CC) $(CPPFLAGS) $(CFLAGS) \
-D COPY_LOOP=test___copy_tofrom_user_power7 \
- -D SELFTEST_CASE=$(subst copyuser_p7_t,,$(notdir $@)) \
+ -o $@ $^
+
+$(OUTPUT)/copyuser_p7_vmx: copyuser_power7.S $(EXTRA_SOURCES) ../utils.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) \
+ -D COPY_LOOP=test___copy_tofrom_user_power7_vmx \
+ -D VMX_TEST \
-o $@ $^
# Strictly speaking, we only need the memcpy_64 test cases for big-endian
diff --git a/tools/testing/selftests/powerpc/copyloops/stubs.S b/tools/testing/selftests/powerpc/copyloops/stubs.S
index ec8bcf2bf1c2..3a9cb8c9a3ee 100644
--- a/tools/testing/selftests/powerpc/copyloops/stubs.S
+++ b/tools/testing/selftests/powerpc/copyloops/stubs.S
@@ -1,13 +1,5 @@
#include <asm/ppc_asm.h>
-FUNC_START(enter_vmx_usercopy)
- li r3,1
- blr
-
-FUNC_START(exit_vmx_usercopy)
- li r3,0
- blr
-
FUNC_START(enter_vmx_ops)
li r3,1
blr
diff --git a/tools/testing/selftests/powerpc/copyloops/validate.c b/tools/testing/selftests/powerpc/copyloops/validate.c
index 0f6873618552..fb822534fbe9 100644
--- a/tools/testing/selftests/powerpc/copyloops/validate.c
+++ b/tools/testing/selftests/powerpc/copyloops/validate.c
@@ -12,6 +12,10 @@
#define BUFLEN (MAX_LEN+MAX_OFFSET+2*MIN_REDZONE)
#define POISON 0xa5
+#ifdef VMX_TEST
+#define VMX_COPY_THRESHOLD 3328
+#endif
+
unsigned long COPY_LOOP(void *to, const void *from, unsigned long size);
static void do_one(char *src, char *dst, unsigned long src_off,
@@ -81,8 +85,12 @@ int test_copy_loop(void)
/* Fill with sequential bytes */
for (i = 0; i < BUFLEN; i++)
fill[i] = i & 0xff;
-
+#ifdef VMX_TEST
+ /* Force sizes above kernel VMX threshold (3328) */
+ for (len = VMX_COPY_THRESHOLD + 1; len < MAX_LEN; len++) {
+#else
for (len = 1; len < MAX_LEN; len++) {
+#endif
for (src_off = 0; src_off < MAX_OFFSET; src_off++) {
for (dst_off = 0; dst_off < MAX_OFFSET; dst_off++) {
do_one(src, dst, src_off, dst_off, len,
@@ -96,5 +104,10 @@ int test_copy_loop(void)
int main(void)
{
+#ifdef VMX_TEST
+ /* Skip if Altivec not present */
+ SKIP_IF_MSG(!have_hwcap(PPC_FEATURE_HAS_ALTIVEC), "ALTIVEC not supported");
+#endif
+
return test_harness(test_copy_loop, str(COPY_LOOP));
}
diff --git a/tools/testing/selftests/sched_ext/util.c b/tools/testing/selftests/sched_ext/util.c
index e47769c91918..2111329ed289 100644
--- a/tools/testing/selftests/sched_ext/util.c
+++ b/tools/testing/selftests/sched_ext/util.c
@@ -60,11 +60,11 @@ int file_write_long(const char *path, long val)
char buf[64];
int ret;
- ret = sprintf(buf, "%lu", val);
+ ret = sprintf(buf, "%ld", val);
if (ret < 0)
return ret;
- if (write_text(path, buf, sizeof(buf)) <= 0)
+ if (write_text(path, buf, ret) <= 0)
return -1;
return 0;
diff --git a/virt/kvm/binary_stats.c b/virt/kvm/binary_stats.c
index eefca6c69f51..76ce697c773b 100644
--- a/virt/kvm/binary_stats.c
+++ b/virt/kvm/binary_stats.c
@@ -50,7 +50,7 @@
* Return: the number of bytes that has been successfully read
*/
ssize_t kvm_stats_read(char *id, const struct kvm_stats_header *header,
- const struct _kvm_stats_desc *desc,
+ const struct kvm_stats_desc *desc,
void *stats, size_t size_stats,
char __user *user_buffer, size_t size, loff_t *offset)
{
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 1bc1da66b4b0..9093251beb39 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -973,9 +973,9 @@ static void kvm_free_memslots(struct kvm *kvm, struct kvm_memslots *slots)
kvm_free_memslot(kvm, memslot);
}
-static umode_t kvm_stats_debugfs_mode(const struct _kvm_stats_desc *pdesc)
+static umode_t kvm_stats_debugfs_mode(const struct kvm_stats_desc *desc)
{
- switch (pdesc->desc.flags & KVM_STATS_TYPE_MASK) {
+ switch (desc->flags & KVM_STATS_TYPE_MASK) {
case KVM_STATS_TYPE_INSTANT:
return 0444;
case KVM_STATS_TYPE_CUMULATIVE:
@@ -1010,7 +1010,7 @@ static int kvm_create_vm_debugfs(struct kvm *kvm, const char *fdname)
struct dentry *dent;
char dir_name[ITOA_MAX_LEN * 2];
struct kvm_stat_data *stat_data;
- const struct _kvm_stats_desc *pdesc;
+ const struct kvm_stats_desc *pdesc;
int i, ret = -ENOMEM;
int kvm_debugfs_num_entries = kvm_vm_stats_header.num_desc +
kvm_vcpu_stats_header.num_desc;
@@ -6171,11 +6171,11 @@ static int kvm_stat_data_get(void *data, u64 *val)
switch (stat_data->kind) {
case KVM_STAT_VM:
r = kvm_get_stat_per_vm(stat_data->kvm,
- stat_data->desc->desc.offset, val);
+ stat_data->desc->offset, val);
break;
case KVM_STAT_VCPU:
r = kvm_get_stat_per_vcpu(stat_data->kvm,
- stat_data->desc->desc.offset, val);
+ stat_data->desc->offset, val);
break;
}
@@ -6193,11 +6193,11 @@ static int kvm_stat_data_clear(void *data, u64 val)
switch (stat_data->kind) {
case KVM_STAT_VM:
r = kvm_clear_stat_per_vm(stat_data->kvm,
- stat_data->desc->desc.offset);
+ stat_data->desc->offset);
break;
case KVM_STAT_VCPU:
r = kvm_clear_stat_per_vcpu(stat_data->kvm,
- stat_data->desc->desc.offset);
+ stat_data->desc->offset);
break;
}
@@ -6345,7 +6345,7 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm)
static void kvm_init_debug(void)
{
const struct file_operations *fops;
- const struct _kvm_stats_desc *pdesc;
+ const struct kvm_stats_desc *pdesc;
int i;
kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
@@ -6358,7 +6358,7 @@ static void kvm_init_debug(void)
fops = &vm_stat_readonly_fops;
debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
kvm_debugfs_dir,
- (void *)(long)pdesc->desc.offset, fops);
+ (void *)(long)pdesc->offset, fops);
}
for (i = 0; i < kvm_vcpu_stats_header.num_desc; ++i) {
@@ -6369,7 +6369,7 @@ static void kvm_init_debug(void)
fops = &vcpu_stat_readonly_fops;
debugfs_create_file(pdesc->name, kvm_stats_debugfs_mode(pdesc),
kvm_debugfs_dir,
- (void *)(long)pdesc->desc.offset, fops);
+ (void *)(long)pdesc->offset, fops);
}
}