]> git.infradead.org Git - nvme.git/commitdiff
Merge tag 'lsm-pr-20240131' of git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/lsm
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 1 Feb 2024 18:00:28 +0000 (10:00 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 1 Feb 2024 18:00:28 +0000 (10:00 -0800)
Pull lsm fixes from Paul Moore:
 "Two small patches to fix some problems relating to LSM hook return
  values and how the individual LSMs interact"

* tag 'lsm-pr-20240131' of git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/lsm:
  lsm: fix default return value of the socket_getpeersec_*() hooks
  lsm: fix the logic in security_inode_getsecctx()

455 files changed:
CREDITS
Documentation/ABI/testing/sysfs-platform-silicom
Documentation/accel/introduction.rst
Documentation/admin-guide/kernel-parameters.rst
Documentation/admin-guide/kernel-per-CPU-kthreads.rst
Documentation/dev-tools/kunit/usage.rst
Documentation/devicetree/bindings/display/samsung/samsung,exynos-mixer.yaml
Documentation/devicetree/bindings/media/cnm,wave521c.yaml
Documentation/filesystems/overlayfs.rst
Documentation/sphinx/templates/kernel-toc.html
MAINTAINERS
Makefile
arch/Kconfig
arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-bletchley.dts
arch/arm/boot/dts/aspeed/aspeed-bmc-facebook-wedge400.dts
arch/arm/boot/dts/aspeed/aspeed-bmc-opp-tacoma.dts
arch/arm/boot/dts/aspeed/ast2600-facebook-netbmc-common.dtsi
arch/arm/boot/dts/nxp/imx/imx6ull-phytec-tauri.dtsi
arch/arm/boot/dts/nxp/imx/imx7d-flex-concentrator.dts
arch/arm/boot/dts/samsung/exynos4212-tab3.dtsi
arch/arm/boot/dts/ti/omap/am335x-moxa-uc-2100-common.dtsi
arch/arm64/boot/dts/exynos/google/gs101.dtsi
arch/arm64/boot/dts/freescale/imx8mm-phygate-tauri-l.dts
arch/arm64/boot/dts/freescale/imx8mm-venice-gw72xx.dtsi
arch/arm64/boot/dts/freescale/imx8mm-venice-gw73xx.dtsi
arch/arm64/boot/dts/freescale/imx8mp-beacon-kit.dts
arch/arm64/boot/dts/freescale/imx8mp-venice-gw72xx.dtsi
arch/arm64/boot/dts/freescale/imx8mp-venice-gw73xx.dtsi
arch/arm64/boot/dts/freescale/imx8mp-venice-gw74xx.dts
arch/arm64/boot/dts/freescale/imx8mq-kontron-pitx-imx8m.dts
arch/arm64/boot/dts/mediatek/mt8183-kukui.dtsi
arch/arm64/boot/dts/mediatek/mt8192-asurada.dtsi
arch/arm64/boot/dts/rockchip/rk3399-gru-bob.dts
arch/arm64/boot/dts/rockchip/rk3399-gru-scarlet.dtsi
arch/loongarch/include/asm/kvm_vcpu.h
arch/loongarch/kernel/smp.c
arch/loongarch/kvm/mmu.c
arch/loongarch/mm/tlb.c
arch/mips/alchemy/common/prom.c
arch/mips/alchemy/common/setup.c
arch/mips/bcm63xx/boards/board_bcm963xx.c
arch/mips/bcm63xx/dev-rng.c
arch/mips/bcm63xx/dev-uart.c
arch/mips/bcm63xx/dev-wdt.c
arch/mips/bcm63xx/irq.c
arch/mips/bcm63xx/setup.c
arch/mips/bcm63xx/timer.c
arch/mips/cobalt/setup.c
arch/mips/fw/arc/memory.c
arch/mips/include/asm/mach-au1x00/au1000.h
arch/mips/include/asm/mach-cobalt/cobalt.h
arch/mips/kernel/elf.c
arch/mips/kernel/traps.c
arch/mips/lantiq/prom.c
arch/mips/loongson64/init.c
arch/mips/loongson64/numa.c
arch/mips/sgi-ip27/Makefile
arch/mips/sgi-ip27/ip27-berr.c
arch/mips/sgi-ip27/ip27-common.h
arch/mips/sgi-ip27/ip27-hubio.c [deleted file]
arch/mips/sgi-ip27/ip27-irq.c
arch/mips/sgi-ip27/ip27-memory.c
arch/mips/sgi-ip27/ip27-nmi.c
arch/mips/sgi-ip30/ip30-console.c
arch/mips/sgi-ip30/ip30-setup.c
arch/mips/sgi-ip32/crime.c
arch/mips/sgi-ip32/ip32-berr.c
arch/mips/sgi-ip32/ip32-common.h [new file with mode: 0644]
arch/mips/sgi-ip32/ip32-irq.c
arch/mips/sgi-ip32/ip32-memory.c
arch/mips/sgi-ip32/ip32-reset.c
arch/mips/sgi-ip32/ip32-setup.c
arch/riscv/boot/dts/sophgo/sg2042.dtsi
arch/riscv/net/bpf_jit_comp64.c
arch/x86/include/asm/cpufeatures.h
arch/x86/include/asm/intel-family.h
arch/x86/include/asm/kmsan.h
arch/x86/include/asm/syscall_wrapper.h
arch/x86/kernel/alternative.c
arch/x86/kernel/cpu/amd.c
block/blk-map.c
block/ioctl.c
block/partitions/core.c
drivers/accel/ivpu/ivpu_debugfs.c
drivers/accel/ivpu/ivpu_drv.c
drivers/accel/ivpu/ivpu_drv.h
drivers/accel/ivpu/ivpu_gem.c
drivers/accel/ivpu/ivpu_gem.h
drivers/accel/ivpu/ivpu_hw_37xx.c
drivers/accel/ivpu/ivpu_hw_40xx.c
drivers/accel/ivpu/ivpu_ipc.c
drivers/accel/ivpu/ivpu_job.c
drivers/accel/ivpu/ivpu_job.h
drivers/accel/ivpu/ivpu_mmu.c
drivers/accel/ivpu/ivpu_mmu.h
drivers/accel/ivpu/ivpu_mmu_context.c
drivers/accel/ivpu/ivpu_pm.c
drivers/accel/ivpu/ivpu_pm.h
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/libata-sata.c
drivers/block/aoe/aoeblk.c
drivers/block/rbd.c
drivers/cpufreq/amd-pstate.c
drivers/cpufreq/intel_pstate.c
drivers/cxl/core/region.c
drivers/cxl/pci.c
drivers/dpll/dpll_core.c
drivers/dpll/dpll_core.h
drivers/dpll/dpll_netlink.c
drivers/firmware/arm_ffa/driver.c
drivers/firmware/arm_scmi/clock.c
drivers/firmware/arm_scmi/common.h
drivers/firmware/arm_scmi/mailbox.c
drivers/firmware/arm_scmi/perf.c
drivers/firmware/arm_scmi/raw_mode.c
drivers/firmware/arm_scmi/shmem.c
drivers/firmware/sysfb.c
drivers/gpio/gpio-eic-sprd.c
drivers/gpio/gpiolib-acpi.c
drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c
drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
drivers/gpu/drm/amd/amdgpu/gfx_v11_0.c
drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dc_types.h
drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c
drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c
drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
drivers/gpu/drm/amd/display/modules/power/power_helpers.c
drivers/gpu/drm/amd/display/modules/power/power_helpers.h
drivers/gpu/drm/amd/include/amd_shared.h
drivers/gpu/drm/amd/include/amdgpu_reg_state.h
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_6_ppt.c
drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
drivers/gpu/drm/bridge/analogix/anx7625.c
drivers/gpu/drm/bridge/analogix/anx7625.h
drivers/gpu/drm/bridge/parade-ps8640.c
drivers/gpu/drm/bridge/samsung-dsim.c
drivers/gpu/drm/bridge/sii902x.c
drivers/gpu/drm/display/drm_dp_mst_topology.c
drivers/gpu/drm/exynos/exynos5433_drm_decon.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_gsc.c
drivers/gpu/drm/i915/Makefile
drivers/gpu/drm/i915/display/icl_dsi.c
drivers/gpu/drm/i915/display/intel_psr.c
drivers/gpu/drm/nouveau/nouveau_fence.c
drivers/gpu/drm/nouveau/nouveau_fence.h
drivers/gpu/drm/panel/Kconfig
drivers/gpu/drm/panel/panel-samsung-s6d7aa0.c
drivers/gpu/drm/panel/panel-simple.c
drivers/gpu/drm/scheduler/sched_main.c
drivers/gpu/drm/tests/drm_mm_test.c
drivers/gpu/drm/ttm/ttm_device.c
drivers/gpu/drm/v3d/v3d_submit.c
drivers/gpu/drm/xe/compat-i915-headers/gem/i915_gem_object.h
drivers/gpu/drm/xe/tests/xe_wa_test.c
drivers/gpu/drm/xe/xe_device.c
drivers/gpu/drm/xe/xe_dma_buf.c
drivers/gpu/drm/xe/xe_hwmon.c
drivers/gpu/drm/xe/xe_migrate.c
drivers/gpu/drm/xe/xe_mmio.c
drivers/gpu/drm/xe/xe_vm.c
drivers/md/raid1.c
drivers/media/common/videobuf2/videobuf2-core.c
drivers/media/common/videobuf2/videobuf2-v4l2.c
drivers/media/platform/chips-media/wave5/wave5-vpu.c
drivers/net/ethernet/8390/8390.c
drivers/net/ethernet/8390/8390p.c
drivers/net/ethernet/8390/apne.c
drivers/net/ethernet/8390/hydra.c
drivers/net/ethernet/8390/stnic.c
drivers/net/ethernet/8390/zorro8390.c
drivers/net/ethernet/broadcom/bcm4908_enet.c
drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
drivers/net/ethernet/broadcom/bgmac-bcma.c
drivers/net/ethernet/broadcom/bgmac-platform.c
drivers/net/ethernet/broadcom/bgmac.c
drivers/net/ethernet/broadcom/bnxt/bnxt.c
drivers/net/ethernet/broadcom/bnxt/bnxt.h
drivers/net/ethernet/broadcom/bnxt/bnxt_dcb.c
drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c
drivers/net/ethernet/cavium/liquidio/lio_core.c
drivers/net/ethernet/cirrus/ep93xx_eth.c
drivers/net/ethernet/engleder/tsnep_main.c
drivers/net/ethernet/ezchip/nps_enet.c
drivers/net/ethernet/freescale/enetc/enetc.c
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/fsl_pq_mdio.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_xsk.c
drivers/net/ethernet/intel/ice/ice_base.c
drivers/net/ethernet/intel/ice/ice_txrx.c
drivers/net/ethernet/intel/ice/ice_txrx.h
drivers/net/ethernet/intel/ice/ice_txrx_lib.h
drivers/net/ethernet/intel/ice/ice_xsk.c
drivers/net/ethernet/intel/idpf/idpf_lib.c
drivers/net/ethernet/litex/litex_liteeth.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/octeontx2/af/mbox.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/en.h
drivers/net/ethernet/mellanox/mlx5/core/en/fs_tt_redirect.c
drivers/net/ethernet/mellanox/mlx5/core/en/params.c
drivers/net/ethernet/mellanox/mlx5/core/en/ptp.c
drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec.c
drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
drivers/net/ethernet/mellanox/mlx5/core/en_common.c
drivers/net/ethernet/mellanox/mlx5/core/en_main.c
drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
drivers/net/ethernet/mellanox/mlx5/core/esw/bridge_mcast.c
drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c
drivers/net/ethernet/mellanox/mlx5/core/lib/aso.c
drivers/net/ethernet/mellanox/mlx5/core/steering/dr_action.c
drivers/net/ethernet/mellanox/mlx5/core/vport.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/fjes/fjes_hw.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/macsec.c
drivers/net/phy/micrel.c
drivers/net/tun.c
drivers/net/wireless/ath/ath11k/core.h
drivers/net/wireless/ath/ath11k/debugfs.c
drivers/net/wireless/ath/ath11k/debugfs.h
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/intel/iwlwifi/iwl-dbg-tlv.c
drivers/net/wireless/intersil/p54/fwio.c
drivers/net/xen-netback/netback.c
drivers/platform/mellanox/mlxbf-pmc.c
drivers/platform/mellanox/mlxbf-tmfifo.c
drivers/platform/x86/amd/pmf/Kconfig
drivers/platform/x86/amd/pmf/spc.c
drivers/platform/x86/amd/pmf/tee-if.c
drivers/platform/x86/intel/ifs/load.c
drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.c
drivers/platform/x86/intel/uncore-frequency/uncore-frequency-common.h
drivers/platform/x86/intel/wmi/sbl-fw-update.c
drivers/platform/x86/p2sb.c
drivers/platform/x86/touchscreen_dmi.c
drivers/platform/x86/wmi.c
drivers/scsi/initio.c
drivers/scsi/isci/request.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_priv.h
drivers/scsi/storvsc_drv.c
drivers/scsi/virtio_scsi.c
drivers/soc/apple/mailbox.c
drivers/spi/spi-bcm-qspi.c
drivers/spi/spi-cadence.c
drivers/spi/spi-cs42l43.c
drivers/spi/spi-hisi-sfc-v3xx.c
drivers/spi/spi-imx.c
drivers/spi/spi-intel-pci.c
drivers/spi/spi.c
drivers/thermal/intel/intel_powerclamp.c
drivers/video/fbdev/core/fbcon.c
drivers/video/fbdev/savage/savagefb_driver.c
drivers/video/fbdev/sis/sis_main.c
drivers/video/fbdev/stifb.c
drivers/video/fbdev/vt8500lcdfb.c
fs/afs/dir.c
fs/afs/dynroot.c
fs/afs/proc.c
fs/bcachefs/alloc_background.c
fs/bcachefs/btree_locking.c
fs/bcachefs/debug.c
fs/bcachefs/fs-io.c
fs/bcachefs/fsck.c
fs/bcachefs/journal.c
fs/bcachefs/journal_io.c
fs/bcachefs/str_hash.h
fs/bcachefs/util.c
fs/bcachefs/util.h
fs/btrfs/compression.c
fs/btrfs/compression.h
fs/btrfs/extent-tree.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/lzo.c
fs/btrfs/ref-verify.c
fs/btrfs/scrub.c
fs/btrfs/send.c
fs/btrfs/subpage.c
fs/btrfs/super.c
fs/btrfs/tree-checker.c
fs/btrfs/volumes.c
fs/btrfs/zlib.c
fs/btrfs/zoned.c
fs/cachefiles/ondemand.c
fs/erofs/compress.h
fs/erofs/decompressor.c
fs/erofs/decompressor_deflate.c
fs/erofs/decompressor_lzma.c
fs/erofs/fscache.c
fs/erofs/inode.c
fs/erofs/utils.c
fs/erofs/zdata.c
fs/exec.c
fs/hugetlbfs/inode.c
fs/jfs/jfs_dmap.c
fs/netfs/buffered_read.c
fs/netfs/buffered_write.c
fs/netfs/fscache_cache.c
fs/netfs/io.c
fs/netfs/misc.c
fs/nfsd/nfs4state.c
fs/overlayfs/namei.c
fs/overlayfs/overlayfs.h
fs/overlayfs/ovl_entry.h
fs/overlayfs/readdir.c
fs/overlayfs/super.c
fs/overlayfs/util.c
fs/smb/client/cached_dir.c
fs/smb/client/cifsencrypt.c
fs/smb/client/cifsfs.c
fs/smb/client/cifsglob.h
fs/smb/client/file.c
fs/smb/client/inode.c
fs/smb/client/readdir.c
fs/smb/client/smb2inode.c
fs/smb/client/smb2ops.c
fs/smb/client/smb2pdu.c
fs/smb/client/smb2proto.h
fs/smb/client/smbencrypt.c
fs/smb/client/transport.c
fs/smb/server/ksmbd_netlink.h
fs/smb/server/transport_ipc.c
fs/smb/server/transport_tcp.c
fs/tracefs/event_inode.c
fs/tracefs/internal.h
fs/xfs/xfs_super.c
include/linux/libata.h
include/linux/mlx5/driver.h
include/linux/mlx5/fs.h
include/linux/mlx5/mlx5_ifc.h
include/linux/mlx5/vport.h
include/linux/mman.h
include/linux/mmzone.h
include/linux/sched.h
include/linux/skmsg.h
include/linux/spi/spi.h
include/linux/syscalls.h
include/net/inet_connection_sock.h
include/net/inet_sock.h
include/net/llc_pdu.h
include/net/netfilter/nf_tables.h
include/net/sch_generic.h
include/net/sock.h
include/net/xdp_sock_drv.h
include/trace/events/afs.h
include/uapi/drm/ivpu_accel.h
include/uapi/linux/btrfs.h
init/Kconfig
io_uring/opdef.c
io_uring/openclose.c
kernel/events/uprobes.c
kernel/fork.c
kernel/futex/core.c
kernel/futex/pi.c
kernel/irq/irqdesc.c
kernel/rcu/tree.c
kernel/rcu/tree_exp.h
kernel/time/clocksource.c
kernel/time/tick-sched.c
kernel/trace/trace_events_trigger.c
kernel/trace/tracing_map.c
lib/kunit/device.c
lib/kunit/executor.c
lib/kunit/kunit-test.c
lib/kunit/test.c
lib/stackdepot.c
mm/huge_memory.c
mm/memblock.c
mm/memcontrol.c
mm/memory-failure.c
mm/memory.c
mm/mmap.c
mm/page-writeback.c
mm/readahead.c
mm/userfaultfd.c
net/8021q/vlan_netlink.c
net/core/dev.c
net/core/dev.h
net/core/filter.c
net/core/request_sock.c
net/core/sock.c
net/ipv4/af_inet.c
net/ipv4/inet_connection_sock.c
net/ipv4/tcp.c
net/ipv6/af_inet6.c
net/llc/af_llc.c
net/llc/llc_core.c
net/mac80211/Kconfig
net/mac80211/sta_info.c
net/mac80211/tx.c
net/netfilter/nf_tables_api.c
net/netfilter/nft_chain_filter.c
net/netfilter/nft_compat.c
net/netfilter/nft_flow_offload.c
net/netfilter/nft_limit.c
net/netfilter/nft_nat.c
net/netfilter/nft_rt.c
net/netfilter/nft_socket.c
net/netfilter/nft_synproxy.c
net/netfilter/nft_tproxy.c
net/netfilter/nft_xfrm.c
net/netlink/af_netlink.c
net/rds/af_rds.c
net/sched/cls_api.c
net/sched/cls_flower.c
net/smc/smc_diag.c
net/sunrpc/svcsock.c
net/wireless/Kconfig
net/wireless/nl80211.c
net/xdp/xsk.c
net/xdp/xsk_buff_pool.c
samples/cgroup/.gitignore [new file with mode: 0644]
scripts/Makefile.extrawarn
security/apparmor/lsm.c
security/keys/encrypted-keys/encrypted.c
security/tomoyo/tomoyo.c
tools/power/cpupower/bench/Makefile
tools/testing/cxl/Kbuild
tools/testing/cxl/test/Kbuild
tools/testing/nvdimm/Kbuild
tools/testing/selftests/drivers/net/bonding/bond_options.sh
tools/testing/selftests/drivers/net/bonding/settings
tools/testing/selftests/drivers/net/netdevsim/udp_tunnel_nic.sh
tools/testing/selftests/livepatch/functions.sh
tools/testing/selftests/mm/charge_reserved_hugetlb.sh
tools/testing/selftests/mm/ksm_tests.c
tools/testing/selftests/mm/map_hugetlb.c
tools/testing/selftests/mm/mremap_test.c
tools/testing/selftests/mm/va_high_addr_switch.sh
tools/testing/selftests/mm/write_hugetlb_memory.sh
tools/testing/selftests/net/config
tools/testing/selftests/net/rps_default_mask.sh
tools/testing/selftests/net/so_incoming_cpu.c
tools/testing/selftests/rseq/basic_percpu_ops_test.c
tools/testing/selftests/rseq/param_test.c
tools/testing/selftests/seccomp/seccomp_benchmark.c

diff --git a/CREDITS b/CREDITS
index 5797e8f7e92b06f8736c01c6c191815c4802b6fd..df8d6946739f68655a8b077f0ebcc4bf4612944b 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -2161,6 +2161,19 @@ N: Mike Kravetz
 E: mike.kravetz@oracle.com
 D: Maintenance and development of the hugetlb subsystem
 
+N: Seth Jennings
+E: sjenning@redhat.com
+D: Creation and maintenance of zswap
+
+N: Dan Streetman
+E: ddstreet@ieee.org
+D: Maintenance and development of zswap
+D: Creation and maintenance of the zpool API
+
+N: Vitaly Wool
+E: vitaly.wool@konsulko.com
+D: Maintenance and development of zswap
+
 N: Andreas S. Krebs
 E: akrebs@altavista.net
 D: CYPRESS CY82C693 chipset IDE, Digital's PC-Alpha 164SX boards
index 2288b3665d160a87b352def3e122461256a21761..4d1cc5bdbcc5f9f945dd825aed99cad89982fdd4 100644 (file)
@@ -10,6 +10,7 @@ What:         /sys/devices/platform/silicom-platform/power_cycle
 Date:          November 2023
 KernelVersion: 6.7
 Contact:       Henry Shi <henrys@silicom-usa.com>
+Description:
                This file allow user to power cycle the platform.
                Default value is 0; when set to 1, it powers down
                the platform, waits 5 seconds, then powers on the
index 89984dfececf0b0b07a937179808d27b8268cf4b..ae30301366379d067e5cb71b4fcb534bc53c4d40 100644 (file)
@@ -101,8 +101,8 @@ External References
 email threads
 -------------
 
-* `Initial discussion on the New subsystem for acceleration devices <https://lkml.org/lkml/2022/7/31/83>`_ - Oded Gabbay (2022)
-* `patch-set to add the new subsystem <https://lkml.org/lkml/2022/10/22/544>`_ - Oded Gabbay (2022)
+* `Initial discussion on the New subsystem for acceleration devices <https://lore.kernel.org/lkml/CAFCwf11=9qpNAepL7NL+YAV_QO=Wv6pnWPhKHKAepK3fNn+2Dg@mail.gmail.com/>`_ - Oded Gabbay (2022)
+* `patch-set to add the new subsystem <https://lore.kernel.org/lkml/20221022214622.18042-1-ogabbay@kernel.org/>`_ - Oded Gabbay (2022)
 
 Conference talks
 ----------------
index 102937bc8443a23d88b952b4d7278e5e6cd25c21..4410384596a90b0ab26b4cf43bac54aaf78193fe 100644 (file)
@@ -218,8 +218,3 @@ bytes respectively. Such letter suffixes can also be entirely omitted:
 
 .. include:: kernel-parameters.txt
    :literal:
-
-Todo
-----
-
-       Add more DRM drivers.
index 993c2a05f5eeab65f9e3d3a5464ac26513452472..b6aeae3327ceb537b78fdbd86961ae670614395b 100644 (file)
@@ -243,13 +243,9 @@ To reduce its OS jitter, do any of the following:
 3.     Do any of the following needed to avoid jitter that your
        application cannot tolerate:
 
-       a.      Build your kernel with CONFIG_SLUB=y rather than
-               CONFIG_SLAB=y, thus avoiding the slab allocator's periodic
-               use of each CPU's workqueues to run its cache_reap()
-               function.
-       b.      Avoid using oprofile, thus avoiding OS jitter from
+       a.      Avoid using oprofile, thus avoiding OS jitter from
                wq_sync_buffer().
-       c.      Limit your CPU frequency so that a CPU-frequency
+       b.      Limit your CPU frequency so that a CPU-frequency
                governor is not required, possibly enlisting the aid of
                special heatsinks or other cooling technologies.  If done
                correctly, and if you CPU architecture permits, you should
@@ -259,7 +255,7 @@ To reduce its OS jitter, do any of the following:
 
                WARNING:  Please check your CPU specifications to
                make sure that this is safe on your particular system.
-       d.      As of v3.18, Christoph Lameter's on-demand vmstat workers
+       c.      As of v3.18, Christoph Lameter's on-demand vmstat workers
                commit prevents OS jitter due to vmstat_update() on
                CONFIG_SMP=y systems.  Before v3.18, is not possible
                to entirely get rid of the OS jitter, but you can
@@ -274,7 +270,7 @@ To reduce its OS jitter, do any of the following:
                (based on an earlier one from Gilad Ben-Yossef) that
                reduces or even eliminates vmstat overhead for some
                workloads at https://lore.kernel.org/r/00000140e9dfd6bd-40db3d4f-c1be-434f-8132-7820f81bb586-000000@email.amazonses.com.
-       e.      If running on high-end powerpc servers, build with
+       d.      If running on high-end powerpc servers, build with
                CONFIG_PPC_RTAS_DAEMON=n.  This prevents the RTAS
                daemon from running on each CPU every second or so.
                (This will require editing Kconfig files and will defeat
@@ -282,12 +278,12 @@ To reduce its OS jitter, do any of the following:
                due to the rtas_event_scan() function.
                WARNING:  Please check your CPU specifications to
                make sure that this is safe on your particular system.
-       f.      If running on Cell Processor, build your kernel with
+       e.      If running on Cell Processor, build your kernel with
                CBE_CPUFREQ_SPU_GOVERNOR=n to avoid OS jitter from
                spu_gov_work().
                WARNING:  Please check your CPU specifications to
                make sure that this is safe on your particular system.
-       g.      If running on PowerMAC, build your kernel with
+       f.      If running on PowerMAC, build your kernel with
                CONFIG_PMAC_RACKMETER=n to disable the CPU-meter,
                avoiding OS jitter from rackmeter_do_timer().
 
index a9efab50eed83e06a89549aeb1fb4da1b2eba1d9..22955d56b3799bfc3f3b92874b638aa24c1edaa6 100644 (file)
@@ -671,8 +671,23 @@ Testing Static Functions
 ------------------------
 
 If we do not want to expose functions or variables for testing, one option is to
-conditionally ``#include`` the test file at the end of your .c file. For
-example:
+conditionally export the used symbol. For example:
+
+.. code-block:: c
+
+       /* In my_file.c */
+
+       VISIBLE_IF_KUNIT int do_interesting_thing();
+       EXPORT_SYMBOL_IF_KUNIT(do_interesting_thing);
+
+       /* In my_file.h */
+
+       #if IS_ENABLED(CONFIG_KUNIT)
+               int do_interesting_thing(void);
+       #endif
+
+Alternatively, you could conditionally ``#include`` the test file at the end of
+your .c file. For example:
 
 .. code-block:: c
 
index 25d53fde92e1104490e3f8e604184b9449150be3..597c9cc6a312acb66b0355f84f9dd8977dbb2197 100644 (file)
@@ -85,7 +85,7 @@ allOf:
         clocks:
           minItems: 6
           maxItems: 6
-        regs:
+        reg:
           minItems: 2
           maxItems: 2
 
@@ -99,7 +99,7 @@ allOf:
         clocks:
           minItems: 4
           maxItems: 4
-        regs:
+        reg:
           minItems: 2
           maxItems: 2
 
@@ -116,7 +116,7 @@ allOf:
         clocks:
           minItems: 3
           maxItems: 3
-        regs:
+        reg:
           minItems: 1
           maxItems: 1
 
index 6d5569e77b7a1239219c13ef2a163849ce5bfd86..6a11c1d11fb5f9a9ccd343c2cb461bd7f3411121 100644 (file)
@@ -17,7 +17,7 @@ properties:
   compatible:
     items:
       - enum:
-          - ti,k3-j721s2-wave521c
+          - ti,j721s2-wave521c
       - const: cnm,wave521c
 
   reg:
@@ -53,7 +53,7 @@ additionalProperties: false
 examples:
   - |
     vpu: video-codec@12345678 {
-        compatible = "ti,k3-j721s2-wave521c", "cnm,wave521c";
+        compatible = "ti,j721s2-wave521c", "cnm,wave521c";
         reg = <0x12345678 0x1000>;
         clocks = <&clks 42>;
         interrupts = <42>;
index 1c244866041a3cb985568ce5f19e67d947eed5e6..16551440144183c58517fe27e750233619ebf89d 100644 (file)
@@ -145,7 +145,9 @@ filesystem, an overlay filesystem needs to record in the upper filesystem
 that files have been removed.  This is done using whiteouts and opaque
 directories (non-directories are always opaque).
 
-A whiteout is created as a character device with 0/0 device number.
+A whiteout is created as a character device with 0/0 device number or
+as a zero-size regular file with the xattr "trusted.overlay.whiteout".
+
 When a whiteout is found in the upper level of a merged directory, any
 matching name in the lower level is ignored, and the whiteout itself
 is also hidden.
@@ -154,6 +156,13 @@ A directory is made opaque by setting the xattr "trusted.overlay.opaque"
 to "y".  Where the upper filesystem contains an opaque directory, any
 directory in the lower filesystem with the same name is ignored.
 
+An opaque directory should not conntain any whiteouts, because they do not
+serve any purpose.  A merge directory containing regular files with the xattr
+"trusted.overlay.whiteout", should be additionally marked by setting the xattr
+"trusted.overlay.opaque" to "x" on the merge directory itself.
+This is needed to avoid the overhead of checking the "trusted.overlay.whiteout"
+on all entries during readdir in the common case.
+
 readdir
 -------
 
@@ -534,8 +543,9 @@ A lower dir with a regular whiteout will always be handled by the overlayfs
 mount, so to support storing an effective whiteout file in an overlayfs mount an
 alternative form of whiteout is supported. This form is a regular, zero-size
 file with the "overlay.whiteout" xattr set, inside a directory with the
-"overlay.whiteouts" xattr set. Such whiteouts are never created by overlayfs,
-but can be used by userspace tools (like containers) that generate lower layers.
+"overlay.opaque" xattr set to "x" (see `whiteouts and opaque directories`_).
+These alternative whiteouts are never created by overlayfs, but can be used by
+userspace tools (like containers) that generate lower layers.
 These alternative whiteouts can be escaped using the standard xattr escape
 mechanism in order to properly nest to any depth.
 
index b58efa99df527d3d870d9572e6ee7f18912fe99f..41f1efbe64bb2898f1770deb128630b316a68a08 100644 (file)
@@ -12,5 +12,7 @@
 <script type="text/javascript"> <!--
   var sbar = document.getElementsByClassName("sphinxsidebar")[0];
   let currents = document.getElementsByClassName("current")
-  sbar.scrollTop = currents[currents.length - 1].offsetTop;
+  if (currents.length) {
+    sbar.scrollTop = currents[currents.length - 1].offsetTop;
+  }
   --> </script>
index 8d1052fa6a6924d17a4d2681fa7907c544e35186..61117c3afa8068e3f0ccc246678bd1a7e47584e4 100644 (file)
@@ -3168,10 +3168,10 @@ F:      drivers/hwmon/asus-ec-sensors.c
 
 ASUS NOTEBOOKS AND EEEPC ACPI/WMI EXTRAS DRIVERS
 M:     Corentin Chary <corentin.chary@gmail.com>
-L:     acpi4asus-user@lists.sourceforge.net
+M:     Luke D. Jones <luke@ljones.dev>
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
-W:     http://acpi4asus.sf.net
+W:     https://asus-linux.org/
 F:     drivers/platform/x86/asus*.c
 F:     drivers/platform/x86/eeepc*.c
 
@@ -4547,7 +4547,7 @@ F:        drivers/net/ieee802154/ca8210.c
 
 CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
 M:     David Howells <dhowells@redhat.com>
-L:     linux-cachefs@redhat.com (moderated for non-subscribers)
+L:     netfs@lists.linux.dev
 S:     Supported
 F:     Documentation/filesystems/caching/cachefiles.rst
 F:     fs/cachefiles/
@@ -5958,7 +5958,6 @@ S:        Maintained
 F:     drivers/platform/x86/dell/dell-wmi-descriptor.c
 
 DELL WMI HARDWARE PRIVACY SUPPORT
-M:     Perry Yuan <Perry.Yuan@dell.com>
 L:     Dell.Client.Kernel@dell.com
 L:     platform-driver-x86@vger.kernel.org
 S:     Maintained
@@ -7955,12 +7954,13 @@ L:      rust-for-linux@vger.kernel.org
 S:     Maintained
 F:     rust/kernel/net/phy.rs
 
-EXEC & BINFMT API
+EXEC & BINFMT API, ELF
 R:     Eric Biederman <ebiederm@xmission.com>
 R:     Kees Cook <keescook@chromium.org>
 L:     linux-mm@kvack.org
 S:     Supported
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux.git for-next/execve
+F:     Documentation/userspace-api/ELF.rst
 F:     fs/*binfmt_*.c
 F:     fs/exec.c
 F:     include/linux/binfmts.h
@@ -8223,7 +8223,8 @@ F:        include/linux/iomap.h
 
 FILESYSTEMS [NETFS LIBRARY]
 M:     David Howells <dhowells@redhat.com>
-L:     linux-cachefs@redhat.com (moderated for non-subscribers)
+R:     Jeff Layton <jlayton@kernel.org>
+L:     netfs@lists.linux.dev
 L:     linux-fsdevel@vger.kernel.org
 S:     Supported
 F:     Documentation/filesystems/caching/
@@ -10282,7 +10283,7 @@ F:      drivers/scsi/ibmvscsi/ibmvscsi*
 F:     include/scsi/viosrp.h
 
 IBM Power Virtual SCSI Device Target Driver
-M:     Michael Cyr <mikecyr@linux.ibm.com>
+M:     Tyrel Datwyler <tyreld@linux.ibm.com>
 L:     linux-scsi@vger.kernel.org
 L:     target-devel@vger.kernel.org
 S:     Supported
@@ -11724,6 +11725,7 @@ F:      fs/smb/server/
 KERNEL UNIT TESTING FRAMEWORK (KUnit)
 M:     Brendan Higgins <brendanhiggins@google.com>
 M:     David Gow <davidgow@google.com>
+R:     Rae Moar <rmoar@google.com>
 L:     linux-kselftest@vger.kernel.org
 L:     kunit-dev@googlegroups.com
 S:     Maintained
@@ -12902,6 +12904,8 @@ M:      Alejandro Colomar <alx@kernel.org>
 L:     linux-man@vger.kernel.org
 S:     Maintained
 W:     http://www.kernel.org/doc/man-pages
+T:     git git://git.kernel.org/pub/scm/docs/man-pages/man-pages.git
+T:     git git://www.alejandro-colomar.es/src/alx/linux/man-pages/man-pages.git
 
 MANAGEMENT COMPONENT TRANSPORT PROTOCOL (MCTP)
 M:     Jeremy Kerr <jk@codeconstruct.com.au>
@@ -20549,6 +20553,7 @@ F:      Documentation/translations/sp_SP/
 
 SPARC + UltraSPARC (sparc/sparc64)
 M:     "David S. Miller" <davem@davemloft.net>
+M:     Andreas Larsson <andreas@gaisler.com>
 L:     sparclinux@vger.kernel.org
 S:     Maintained
 Q:     http://patchwork.ozlabs.org/project/sparclinux/list/
@@ -24339,13 +24344,6 @@ T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dlemoal/zonefs.git
 F:     Documentation/filesystems/zonefs.rst
 F:     fs/zonefs/
 
-ZPOOL COMPRESSED PAGE STORAGE API
-M:     Dan Streetman <ddstreet@ieee.org>
-L:     linux-mm@kvack.org
-S:     Maintained
-F:     include/linux/zpool.h
-F:     mm/zpool.c
-
 ZR36067 VIDEO FOR LINUX DRIVER
 M:     Corentin Labbe <clabbe@baylibre.com>
 L:     mjpeg-users@lists.sourceforge.net
@@ -24397,7 +24395,9 @@ M:      Nhat Pham <nphamcs@gmail.com>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     Documentation/admin-guide/mm/zswap.rst
+F:     include/linux/zpool.h
 F:     include/linux/zswap.h
+F:     mm/zpool.c
 F:     mm/zswap.c
 
 THE REST
index 9869f57c3fb3e6a37e40aa267737a044a5e099d8..6c0a4d294444cb41b174651ca2b66c856a8f1a55 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
 VERSION = 6
 PATCHLEVEL = 8
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
 NAME = Hurr durr I'ma ninja sloth
 
 # *DOCUMENTATION*
@@ -986,6 +986,10 @@ NOSTDINC_FLAGS += -nostdinc
 # perform bounds checking.
 KBUILD_CFLAGS += $(call cc-option, -fstrict-flex-arrays=3)
 
+#Currently, disable -Wstringop-overflow for GCC 11, globally.
+KBUILD_CFLAGS-$(CONFIG_CC_NO_STRINGOP_OVERFLOW) += $(call cc-option, -Wno-stringop-overflow)
+KBUILD_CFLAGS-$(CONFIG_CC_STRINGOP_OVERFLOW) += $(call cc-option, -Wstringop-overflow)
+
 # disable invalid "can't wrap" optimizations for signed / pointers
 KBUILD_CFLAGS  += -fno-strict-overflow
 
index c91917b508736d1fa0d37d5bf3b1e4bf5550e211..a5af0edd3eb8f3b64e6e51bffb2ac491cb31bc26 100644 (file)
@@ -673,6 +673,7 @@ config SHADOW_CALL_STACK
        bool "Shadow Call Stack"
        depends on ARCH_SUPPORTS_SHADOW_CALL_STACK
        depends on DYNAMIC_FTRACE_WITH_ARGS || DYNAMIC_FTRACE_WITH_REGS || !FUNCTION_GRAPH_TRACER
+       depends on MMU
        help
          This option enables the compiler's Shadow Call Stack, which
          uses a shadow stack to protect function return addresses from
index e899de681f4752d4077b55a0cd4f8858c6e23df0..5be0e8fd2633c20e2d87abc843b53fca437942be 100644 (file)
@@ -45,8 +45,8 @@
                num-chipselects = <1>;
                cs-gpios = <&gpio0 ASPEED_GPIO(Z, 0) GPIO_ACTIVE_LOW>;
 
-               tpmdev@0 {
-                       compatible = "tcg,tpm_tis-spi";
+               tpm@0 {
+                       compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
                        spi-max-frequency = <33000000>;
                        reg = <0>;
                };
index a677c827e758fe2042fcf14a192832668e3ffbd0..5a8169bbda8792c76c1da960508c8a0c6bdd4b86 100644 (file)
@@ -80,8 +80,8 @@
                gpio-miso = <&gpio ASPEED_GPIO(R, 5) GPIO_ACTIVE_HIGH>;
                num-chipselects = <1>;
 
-               tpmdev@0 {
-                       compatible = "tcg,tpm_tis-spi";
+               tpm@0 {
+                       compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
                        spi-max-frequency = <33000000>;
                        reg = <0>;
                };
index 3f6010ef2b86f264fe88935a737b3ce9c60d762b..213023bc5aec4144751c9e7bc8e3e05c156386c8 100644 (file)
        status = "okay";
 
        tpm: tpm@2e {
-               compatible = "tcg,tpm-tis-i2c";
+               compatible = "nuvoton,npct75x", "tcg,tpm-tis-i2c";
                reg = <0x2e>;
        };
 };
index 31590d3186a2e099e44c663c46a87975b60aae27..00e5887c926f181d57bebe6b0b781ad2f2e8a514 100644 (file)
@@ -35,8 +35,8 @@
                gpio-mosi = <&gpio0 ASPEED_GPIO(X, 4) GPIO_ACTIVE_HIGH>;
                gpio-miso = <&gpio0 ASPEED_GPIO(X, 5) GPIO_ACTIVE_HIGH>;
 
-               tpmdev@0 {
-                       compatible = "tcg,tpm_tis-spi";
+               tpm@0 {
+                       compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
                        spi-max-frequency = <33000000>;
                        reg = <0>;
                };
index 44cc4ff1d0df358ab66bb036d127175da1be74b6..d12fb44aeb140cfacf05a5b257d2106c79392279 100644 (file)
        tpm_tis: tpm@1 {
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_tpm>;
-               compatible = "tcg,tpm_tis-spi";
+               compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
                reg = <1>;
                spi-max-frequency = <20000000>;
                interrupt-parent = <&gpio5>;
index 3a723843d5626f6cc4b9ee2750968c01e46306db..9984b343cdf0cad1abd9e0d4d142ded838c47980 100644 (file)
         * TCG specification - Section 6.4.1 Clocking:
         * TPM shall support a SPI clock frequency range of 10-24 MHz.
         */
-       st33htph: tpm-tis@0 {
+       st33htph: tpm@0 {
                compatible = "st,st33htpm-spi", "tcg,tpm_tis-spi";
                reg = <0>;
                spi-max-frequency = <24000000>;
index d7954ff466b491b32acf6962ab5d64f4843f8157..e5254e32aa8fc326dfcabce33705a9b25e272052 100644 (file)
 };
 
 &fimd {
+       samsung,invert-vclk;
        status = "okay";
 };
 
index b8730aa52ce6fe521a1b531be42c4ef891c969b5..a59331aa58e55e3ef514fc06b5a36472c901dcd3 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&spi1_pins>;
 
-       tpm_spi_tis@0 {
+       tpm@0 {
                compatible = "tcg,tpm_tis-spi";
                reg = <0>;
                spi-max-frequency = <500000>;
index 9747cb3fa03ac5c141b9bf660da3531ca2082def..d838e3a7af6e5ddda3751cc6f0bf4c73bccacc03 100644 (file)
                        #clock-cells = <1>;
                        clocks = <&cmu_top CLK_DOUT_CMU_MISC_BUS>,
                                 <&cmu_top CLK_DOUT_CMU_MISC_SSS>;
-                       clock-names = "dout_cmu_misc_bus", "dout_cmu_misc_sss";
+                       clock-names = "bus", "sss";
                };
 
                watchdog_cl0: watchdog@10060000 {
index 968f475b9a96c3c7334d670fd004ddcde08eed6f..27a902569e2a28434af3b6b15dcdb3a43f7a9606 100644 (file)
        };
 
        tpm: tpm@1 {
-               compatible = "tcg,tpm_tis-spi";
+               compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
                interrupts = <11 IRQ_TYPE_LEVEL_LOW>;
                interrupt-parent = <&gpio2>;
                pinctrl-names = "default";
index 3f3f2a2c89cd504f22548178b0d718ed61d122fa..752caa38eb03bfd6831e61f857b517beb5bfe1a1 100644 (file)
@@ -89,7 +89,7 @@
        status = "okay";
 
        tpm@1 {
-               compatible = "tcg,tpm_tis-spi";
+               compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
                reg = <0x1>;
                spi-max-frequency = <36000000>;
        };
index 06fed93769966367b02c0a3d5f44f8264c080617..2aa6c1090fc7d7b81f7774354286c13a5463c06b 100644 (file)
        status = "okay";
 
        tpm@1 {
-               compatible = "tcg,tpm_tis-spi";
+               compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
                reg = <0x1>;
                spi-max-frequency = <36000000>;
        };
index feae77e038354c687d69904fdb5b577f32cfe26d..a08057410bdef5b3a2572cb5c5e2fe6ea35b5522 100644 (file)
        status = "okay";
 
        tpm: tpm@0 {
-               compatible = "infineon,slb9670";
+               compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
                reg = <0>;
                pinctrl-names = "default";
                pinctrl-0 = <&pinctrl_tpm>;
index c24587c895e1f9734da4c4f4cf7becb697825f59..41c79d2ebdd6201dc10278204c064a4c01c71709 100644 (file)
        status = "okay";
 
        tpm@1 {
-               compatible = "tcg,tpm_tis-spi";
+               compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
                reg = <0x1>;
                spi-max-frequency = <36000000>;
        };
index 628ffba69862ad51f2072e88fc812b3a84e1b71c..d5c400b355af564123497cd1805e0b0ad56ded21 100644 (file)
        status = "okay";
 
        tpm@1 {
-               compatible = "tcg,tpm_tis-spi";
+               compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
                reg = <0x1>;
                spi-max-frequency = <36000000>;
        };
index 9caf7ca25444600a4a7979b3749d5175e32b0bbe..cae586cd45bdd59aa479e70bb290fc50b0392a3c 100644 (file)
        status = "okay";
 
        tpm@0 {
-               compatible = "tcg,tpm_tis-spi";
+               compatible = "atmel,attpm20p", "tcg,tpm_tis-spi";
                reg = <0x0>;
                spi-max-frequency = <36000000>;
        };
index 6376417e918c2083bb67c2f978d53602153d3cb9..d8cf1f27c3ec8a33b7ad527c1fc2b489747a2d84 100644 (file)
@@ -65,7 +65,7 @@
        status = "okay";
 
        tpm@0 {
-               compatible = "infineon,slb9670";
+               compatible = "infineon,slb9670", "tcg,tpm_tis-spi";
                reg = <0>;
                spi-max-frequency = <43000000>;
        };
index 5506de83f61d423634511fba3f783f67a8987792..1b3396b1cee394659d0a77c104f05e1e7762569f 100644 (file)
        status = "okay";
        cs-gpios = <&pio 86 GPIO_ACTIVE_LOW>;
 
-       cr50@0 {
+       tpm@0 {
                compatible = "google,cr50";
                reg = <0>;
                spi-max-frequency = <1000000>;
index f2281250ac35da2514d73191cbcdb2e195afcbcb..d87aab8d7a79ed4ac8365b951f16c370b2efcc91 100644 (file)
        pinctrl-names = "default";
        pinctrl-0 = <&spi5_pins>;
 
-       cr50@0 {
+       tpm@0 {
                compatible = "google,cr50";
                reg = <0>;
                interrupts-extended = <&pio 171 IRQ_TYPE_EDGE_RISING>;
index 0f9cc042d9bf06b3445c2cb125435c823f3b26b4..1cba1d857c96ba06e3f257b8a15f20a99a9250ee 100644 (file)
@@ -70,7 +70,7 @@
 &spi0 {
        status = "okay";
 
-       cr50@0 {
+       tpm@0 {
                compatible = "google,cr50";
                reg = <0>;
                interrupt-parent = <&gpio0>;
index c5e7de60c12140c0dae9789cc338ef5f1b9fac3c..5846a11f0e848fc059446a47b57ff732b45e9f4c 100644 (file)
@@ -706,7 +706,7 @@ camera: &i2c7 {
 &spi2 {
        status = "okay";
 
-       cr50@0 {
+       tpm@0 {
                compatible = "google,cr50";
                reg = <0>;
                interrupt-parent = <&gpio1>;
index e71ceb88f29eecdbebe40c407c1a93e4cd433337..0cb4fdb8a9b5970dfefb24a34c82d1451ff27fa9 100644 (file)
@@ -60,7 +60,7 @@ int kvm_own_lsx(struct kvm_vcpu *vcpu);
 void kvm_save_lsx(struct loongarch_fpu *fpu);
 void kvm_restore_lsx(struct loongarch_fpu *fpu);
 #else
-static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { }
+static inline int kvm_own_lsx(struct kvm_vcpu *vcpu) { return -EINVAL; }
 static inline void kvm_save_lsx(struct loongarch_fpu *fpu) { }
 static inline void kvm_restore_lsx(struct loongarch_fpu *fpu) { }
 #endif
@@ -70,7 +70,7 @@ int kvm_own_lasx(struct kvm_vcpu *vcpu);
 void kvm_save_lasx(struct loongarch_fpu *fpu);
 void kvm_restore_lasx(struct loongarch_fpu *fpu);
 #else
-static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { }
+static inline int kvm_own_lasx(struct kvm_vcpu *vcpu) { return -EINVAL; }
 static inline void kvm_save_lasx(struct loongarch_fpu *fpu) { }
 static inline void kvm_restore_lasx(struct loongarch_fpu *fpu) { }
 #endif
index a16e3dbe9f09eb2fbf1b239b982b727330f7c233..2b49d30eb7c0185e043e462859e76a4ae64ecd67 100644 (file)
@@ -509,7 +509,6 @@ asmlinkage void start_secondary(void)
        sync_counter();
        cpu = raw_smp_processor_id();
        set_my_cpu_offset(per_cpu_offset(cpu));
-       rcutree_report_cpu_starting(cpu);
 
        cpu_probe();
        constant_clockevent_init();
index 915f175278931f26164c1b970663542cf0661a12..50a6acd7ffe4c94b986c5f7a9802420f090a7d79 100644 (file)
@@ -675,7 +675,7 @@ static bool fault_supports_huge_mapping(struct kvm_memory_slot *memslot,
  *
  * There are several ways to safely use this helper:
  *
- * - Check mmu_invalidate_retry_hva() after grabbing the mapping level, before
+ * - Check mmu_invalidate_retry_gfn() after grabbing the mapping level, before
  *   consuming it.  In this case, mmu_lock doesn't need to be held during the
  *   lookup, but it does need to be held while checking the MMU notifier.
  *
@@ -855,7 +855,7 @@ retry:
 
        /* Check if an invalidation has taken place since we got pfn */
        spin_lock(&kvm->mmu_lock);
-       if (mmu_invalidate_retry_hva(kvm, mmu_seq, hva)) {
+       if (mmu_invalidate_retry_gfn(kvm, mmu_seq, gfn)) {
                /*
                 * This can happen when mappings are changed asynchronously, but
                 * also synchronously if a COW is triggered by
index 2c0a411f23aa778bb62160bd511252736fc987be..0b95d32b30c94704a0108fdffcae68c148403ce7 100644 (file)
@@ -284,12 +284,16 @@ static void setup_tlb_handler(int cpu)
                set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
                set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
                set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
-       }
+       } else {
+               int vec_sz __maybe_unused;
+               void *addr __maybe_unused;
+               struct page *page __maybe_unused;
+
+               /* Avoid lockdep warning */
+               rcutree_report_cpu_starting(cpu);
+
 #ifdef CONFIG_NUMA
-       else {
-               void *addr;
-               struct page *page;
-               const int vec_sz = sizeof(exception_handlers);
+               vec_sz = sizeof(exception_handlers);
 
                if (pcpu_handlers[cpu])
                        return;
@@ -305,8 +309,8 @@ static void setup_tlb_handler(int cpu)
                csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
                csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
                csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
-       }
 #endif
+       }
 }
 
 void tlb_init(int cpu)
index b13d8adf3be47dbfd6f65e1e63ee3217feafe04b..20d30f6265cdce2a915ddffc52d0bb67e6e0edac 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/string.h>
 
 #include <asm/bootinfo.h>
+#include <prom.h>
 
 int prom_argc;
 char **prom_argv;
index 2388d68786f4a7c40dcadfed78fd8ecfc91f4896..a7a6d31a7a4148ada6ad340d0723ef8c7a73f0be 100644 (file)
 #include <linux/mm.h>
 #include <linux/dma-map-ops.h> /* for dma_default_coherent */
 
+#include <asm/bootinfo.h>
 #include <asm/mipsregs.h>
 
 #include <au1000.h>
 
-extern void __init board_setup(void);
-extern void __init alchemy_set_lpj(void);
-
 static bool alchemy_dma_coherent(void)
 {
        switch (alchemy_get_cputype()) {
index 01aff80a59672dee1b675c3625aecb6f70eb52b9..99f321b6e417bd4250ab7cec31ae74ad2d396ec3 100644 (file)
@@ -702,7 +702,7 @@ static struct ssb_sprom bcm63xx_sprom = {
        .boardflags_hi          = 0x0000,
 };
 
-int bcm63xx_get_fallback_sprom(struct ssb_bus *bus, struct ssb_sprom *out)
+static int bcm63xx_get_fallback_sprom(struct ssb_bus *bus, struct ssb_sprom *out)
 {
        if (bus->bustype == SSB_BUSTYPE_PCI) {
                memcpy(out, &bcm63xx_sprom, sizeof(struct ssb_sprom));
index d277b4dc6c688eb394544b556e0941a54654c1b9..f94151f7c96fe1d988cd3d88f8451bbdf012955c 100644 (file)
@@ -26,7 +26,7 @@ static struct platform_device bcm63xx_rng_device = {
        .resource       = rng_resources,
 };
 
-int __init bcm63xx_rng_register(void)
+static int __init bcm63xx_rng_register(void)
 {
        if (!BCMCPU_IS_6368())
                return -ENODEV;
index 3bc7f3bfc9ad5c5e45737bcf1510bfcd5b5483e7..5d6bf0445b299cf0e91a4f7992f134e5648ca1c2 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/kernel.h>
 #include <linux/platform_device.h>
 #include <bcm63xx_cpu.h>
+#include <bcm63xx_dev_uart.h>
 
 static struct resource uart0_resources[] = {
        {
index 42130914a3c210993c07d971449a424d40775060..302bf7ed5ad5abfaa6cb94e4a4e0dcdf1ffbceb1 100644 (file)
@@ -34,7 +34,7 @@ static struct platform_device bcm63xx_wdt_device = {
        },
 };
 
-int __init bcm63xx_wdt_register(void)
+static int __init bcm63xx_wdt_register(void)
 {
        wdt_resources[0].start = bcm63xx_regset_address(RSET_WDT);
        wdt_resources[0].end = wdt_resources[0].start;
index 2548013442f6d95bdda071f89cc112d97d8a0d0a..6240a8f88ea366b5d440f6de3416191dead812b8 100644 (file)
@@ -72,7 +72,7 @@ static inline int enable_irq_for_cpu(int cpu, struct irq_data *d,
  */
 
 #define BUILD_IPIC_INTERNAL(width)                                     \
-void __dispatch_internal_##width(int cpu)                              \
+static void __dispatch_internal_##width(int cpu)                       \
 {                                                                      \
        u32 pending[width / 32];                                        \
        unsigned int src, tgt;                                          \
index d811e3e03f819a5005a480d56d5aee5a090fcc3c..c13ddb544a23bf0ebfd6bd627c9ed022a44cda0e 100644 (file)
@@ -159,7 +159,7 @@ void __init plat_mem_setup(void)
        board_setup();
 }
 
-int __init bcm63xx_register_devices(void)
+static int __init bcm63xx_register_devices(void)
 {
        /* register gpiochip */
        bcm63xx_gpio_init();
index a86065854c0c8c6c92254c4d7746fda8e6801250..74b83807df30a7be13f1f9466753b2560ce9b50b 100644 (file)
@@ -178,7 +178,7 @@ int bcm63xx_timer_set(int id, int monotonic, unsigned int countdown_us)
 
 EXPORT_SYMBOL(bcm63xx_timer_set);
 
-int bcm63xx_timer_init(void)
+static int bcm63xx_timer_init(void)
 {
        int ret, irq;
        u32 reg;
index 2e099d55a564a6ecf3dc347ace84ad25e4278dd9..9a266bf7833993b5facbdb63c97e555ad4d9ce27 100644 (file)
@@ -23,9 +23,6 @@
 
 #include <cobalt.h>
 
-extern void cobalt_machine_restart(char *command);
-extern void cobalt_machine_halt(void);
-
 const char *get_system_type(void)
 {
        switch (cobalt_board_id) {
index 66188739f54d20a41ce18acb0a88a4fdf16e8718..fb78e6fd5de4804e221fba63bceeb4dcd4a492a9 100644 (file)
@@ -37,7 +37,7 @@ static unsigned int nr_prom_mem __initdata;
  */
 #define ARC_PAGE_SHIFT 12
 
-struct linux_mdesc * __init ArcGetMemoryDescriptor(struct linux_mdesc *Current)
+static struct linux_mdesc * __init ArcGetMemoryDescriptor(struct linux_mdesc *Current)
 {
        return (struct linux_mdesc *) ARC_CALL1(get_mdesc, Current);
 }
index a7eec3364a64abb60f1dae67ad26c80738878533..41546777902ba0fe25af0f442c688169f9220b48 100644 (file)
 
 #include <asm/cpu.h>
 
+void alchemy_set_lpj(void);
+void board_setup(void);
+
 /* helpers to access the SYS_* registers */
 static inline unsigned long alchemy_rdsys(int regofs)
 {
index 5b9fce73f11d1301fa5724049bfd9f8625ea7061..97f9d5e9446d22e1371b1c9f6fe09d59610d27ed 100644 (file)
@@ -19,4 +19,7 @@ extern int cobalt_board_id;
 #define COBALT_BRD_ID_QUBE2    0x5
 #define COBALT_BRD_ID_RAQ2     0x6
 
+void cobalt_machine_halt(void);
+void cobalt_machine_restart(char *command);
+
 #endif /* __ASM_COBALT_H */
index 5582a4ca1e9e36ad5dac4d23caa4d6c4bfb11a5d..7aa2c2360ff60219bb8fb9f03a8a528edf7f53a1 100644 (file)
@@ -11,6 +11,7 @@
 
 #include <asm/cpu-features.h>
 #include <asm/cpu-info.h>
+#include <asm/fpu.h>
 
 #ifdef CONFIG_MIPS_FP_SUPPORT
 
@@ -309,6 +310,11 @@ void mips_set_personality_nan(struct arch_elf_state *state)
        struct cpuinfo_mips *c = &boot_cpu_data;
        struct task_struct *t = current;
 
+       /* Do this early so t->thread.fpu.fcr31 won't be clobbered in case
+        * we are preempted before the lose_fpu(0) in start_thread.
+        */
+       lose_fpu(0);
+
        t->thread.fpu.fcr31 = c->fpu_csr31;
        switch (state->nan_2008) {
        case 0:
index dec6878b35f627089226618ff4dc4628855c8eb4..a1c1cb5de91321468f338d41a01df2f40efaf293 100644 (file)
@@ -2007,7 +2007,13 @@ unsigned long vi_handlers[64];
 
 void reserve_exception_space(phys_addr_t addr, unsigned long size)
 {
-       memblock_reserve(addr, size);
+       /*
+        * reserve exception space on CPUs other than CPU0
+        * is too late, since memblock is unavailable when APs
+        * up
+        */
+       if (smp_processor_id() == 0)
+               memblock_reserve(addr, size);
 }
 
 void __init *set_except_vector(int n, void *addr)
index a3cf293658581ed6a599da2b870f3c10c67a6be1..0c45767eacf67429ea3910628a2f44c219a4da34 100644 (file)
@@ -108,10 +108,9 @@ void __init prom_init(void)
        prom_init_cmdline();
 
 #if defined(CONFIG_MIPS_MT_SMP)
-       if (cpu_has_mipsmt) {
-               lantiq_smp_ops = vsmp_smp_ops;
+       lantiq_smp_ops = vsmp_smp_ops;
+       if (cpu_has_mipsmt)
                lantiq_smp_ops.init_secondary = lantiq_init_secondary;
-               register_smp_ops(&lantiq_smp_ops);
-       }
+       register_smp_ops(&lantiq_smp_ops);
 #endif
 }
index f25caa6aa9d306e84d719e97ea54f7b8faa449c1..553142c1f14fe2261d963b3784f3ed9e6c086cd2 100644 (file)
@@ -103,6 +103,9 @@ void __init szmem(unsigned int node)
        if (loongson_sysconf.vgabios_addr)
                memblock_reserve(virt_to_phys((void *)loongson_sysconf.vgabios_addr),
                                SZ_256K);
+       /* set nid for reserved memory */
+       memblock_set_node((u64)node << 44, (u64)(node + 1) << 44,
+                       &memblock.reserved, node);
 }
 
 #ifndef CONFIG_NUMA
index 8f61e93c0c5bcf07134cc22a06913c57e5140af4..68dafd6d3e2571f615e9c9e7d9b2c895de80468a 100644 (file)
@@ -132,6 +132,8 @@ static void __init node_mem_init(unsigned int node)
 
                /* Reserve pfn range 0~node[0]->node_start_pfn */
                memblock_reserve(0, PAGE_SIZE * start_pfn);
+               /* set nid for reserved memory on node 0 */
+               memblock_set_node(0, 1ULL << 44, &memblock.reserved, 0);
        }
 }
 
index 27c14ede191eb7b1353e3a2cedd6d9d80bc2b385..9877fcc512b1578731fb6235a35256a61b172afb 100644 (file)
@@ -5,7 +5,7 @@
 
 obj-y  := ip27-berr.o ip27-irq.o ip27-init.o ip27-klconfig.o \
           ip27-klnuma.o ip27-memory.o ip27-nmi.o ip27-reset.o ip27-timer.o \
-          ip27-hubio.o ip27-xtalk.o
+          ip27-xtalk.o
 
 obj-$(CONFIG_EARLY_PRINTK)     += ip27-console.o
 obj-$(CONFIG_SMP)              += ip27-smp.o
index 923a63a51cda39482c227936c17f828ceae3227b..9eb497cb5d525c74e775ca741bd4ec664209280b 100644 (file)
@@ -22,6 +22,8 @@
 #include <asm/traps.h>
 #include <linux/uaccess.h>
 
+#include "ip27-common.h"
+
 static void dump_hub_information(unsigned long errst0, unsigned long errst1)
 {
        static char *err_type[2][8] = {
@@ -57,7 +59,7 @@ static void dump_hub_information(unsigned long errst0, unsigned long errst1)
               [st0.pi_stat0_fmt.s0_err_type] ? : "invalid");
 }
 
-int ip27_be_handler(struct pt_regs *regs, int is_fixup)
+static int ip27_be_handler(struct pt_regs *regs, int is_fixup)
 {
        unsigned long errst0, errst1;
        int data = regs->cp0_cause & 4;
index ed008a08464c208cc1944cfbd6fe5de31e14fee4..a0059fa13934539af5fb616120f66b77054a2219 100644 (file)
@@ -10,6 +10,7 @@ extern void hub_rt_clock_event_init(void);
 extern void hub_rtc_init(nasid_t nasid);
 extern void install_cpu_nmi_handler(int slice);
 extern void install_ipi(void);
+extern void ip27_be_init(void);
 extern void ip27_reboot_setup(void);
 extern const struct plat_smp_ops ip27_smp_ops;
 extern unsigned long node_getfirstfree(nasid_t nasid);
@@ -17,4 +18,5 @@ extern void per_cpu_init(void);
 extern void replicate_kernel_text(void);
 extern void setup_replication_mask(void);
 
+
 #endif /* __IP27_COMMON_H */
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
deleted file mode 100644 (file)
index c57f0d8..0000000
+++ /dev/null
@@ -1,185 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 1992-1997, 2000-2003 Silicon Graphics, Inc.
- * Copyright (C) 2004 Christoph Hellwig.
- *
- * Support functions for the HUB ASIC - mostly PIO mapping related.
- */
-
-#include <linux/bitops.h>
-#include <linux/string.h>
-#include <linux/mmzone.h>
-#include <asm/sn/addrs.h>
-#include <asm/sn/arch.h>
-#include <asm/sn/agent.h>
-#include <asm/sn/io.h>
-#include <asm/xtalk/xtalk.h>
-
-
-static int force_fire_and_forget = 1;
-
-/**
- * hub_pio_map -  establish a HUB PIO mapping
- *
- * @nasid:     nasid to perform PIO mapping on
- * @widget:    widget ID to perform PIO mapping for
- * @xtalk_addr: xtalk_address that needs to be mapped
- * @size:      size of the PIO mapping
- *
- **/
-unsigned long hub_pio_map(nasid_t nasid, xwidgetnum_t widget,
-                         unsigned long xtalk_addr, size_t size)
-{
-       unsigned i;
-
-       /* use small-window mapping if possible */
-       if ((xtalk_addr % SWIN_SIZE) + size <= SWIN_SIZE)
-               return NODE_SWIN_BASE(nasid, widget) + (xtalk_addr % SWIN_SIZE);
-
-       if ((xtalk_addr % BWIN_SIZE) + size > BWIN_SIZE) {
-               printk(KERN_WARNING "PIO mapping at hub %d widget %d addr 0x%lx"
-                               " too big (%ld)\n",
-                               nasid, widget, xtalk_addr, size);
-               return 0;
-       }
-
-       xtalk_addr &= ~(BWIN_SIZE-1);
-       for (i = 0; i < HUB_NUM_BIG_WINDOW; i++) {
-               if (test_and_set_bit(i, hub_data(nasid)->h_bigwin_used))
-                       continue;
-
-               /*
-                * The code below does a PIO write to setup an ITTE entry.
-                *
-                * We need to prevent other CPUs from seeing our updated
-                * memory shadow of the ITTE (in the piomap) until the ITTE
-                * entry is actually set up; otherwise, another CPU might
-                * attempt a PIO prematurely.
-                *
-                * Also, the only way we can know that an entry has been
-                * received  by the hub and can be used by future PIO reads/
-                * writes is by reading back the ITTE entry after writing it.
-                *
-                * For these two reasons, we PIO read back the ITTE entry
-                * after we write it.
-                */
-               IIO_ITTE_PUT(nasid, i, HUB_PIO_MAP_TO_MEM, widget, xtalk_addr);
-               __raw_readq(IIO_ITTE_GET(nasid, i));
-
-               return NODE_BWIN_BASE(nasid, widget) + (xtalk_addr % BWIN_SIZE);
-       }
-
-       printk(KERN_WARNING "unable to establish PIO mapping for at"
-                       " hub %d widget %d addr 0x%lx\n",
-                       nasid, widget, xtalk_addr);
-       return 0;
-}
-
-
-/*
- * hub_setup_prb(nasid, prbnum, credits, conveyor)
- *
- *     Put a PRB into fire-and-forget mode if conveyor isn't set.  Otherwise,
- *     put it into conveyor belt mode with the specified number of credits.
- */
-static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
-{
-       union iprb_u prb;
-       int prb_offset;
-
-       /*
-        * Get the current register value.
-        */
-       prb_offset = IIO_IOPRB(prbnum);
-       prb.iprb_regval = REMOTE_HUB_L(nasid, prb_offset);
-
-       /*
-        * Clear out some fields.
-        */
-       prb.iprb_ovflow = 1;
-       prb.iprb_bnakctr = 0;
-       prb.iprb_anakctr = 0;
-
-       /*
-        * Enable or disable fire-and-forget mode.
-        */
-       prb.iprb_ff = force_fire_and_forget ? 1 : 0;
-
-       /*
-        * Set the appropriate number of PIO credits for the widget.
-        */
-       prb.iprb_xtalkctr = credits;
-
-       /*
-        * Store the new value to the register.
-        */
-       REMOTE_HUB_S(nasid, prb_offset, prb.iprb_regval);
-}
-
-/**
- * hub_set_piomode  -  set pio mode for a given hub
- *
- * @nasid:     physical node ID for the hub in question
- *
- * Put the hub into either "PIO conveyor belt" mode or "fire-and-forget" mode.
- * To do this, we have to make absolutely sure that no PIOs are in progress
- * so we turn off access to all widgets for the duration of the function.
- *
- * XXX - This code should really check what kind of widget we're talking
- * to. Bridges can only handle three requests, but XG will do more.
- * How many can crossbow handle to widget 0?  We're assuming 1.
- *
- * XXX - There is a bug in the crossbow that link reset PIOs do not
- * return write responses.  The easiest solution to this problem is to
- * leave widget 0 (xbow) in fire-and-forget mode at all times. This
- * only affects pio's to xbow registers, which should be rare.
- **/
-static void hub_set_piomode(nasid_t nasid)
-{
-       u64 ii_iowa;
-       union hubii_wcr_u ii_wcr;
-       unsigned i;
-
-       ii_iowa = REMOTE_HUB_L(nasid, IIO_OUTWIDGET_ACCESS);
-       REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, 0);
-
-       ii_wcr.wcr_reg_value = REMOTE_HUB_L(nasid, IIO_WCR);
-
-       if (ii_wcr.iwcr_dir_con) {
-               /*
-                * Assume a bridge here.
-                */
-               hub_setup_prb(nasid, 0, 3);
-       } else {
-               /*
-                * Assume a crossbow here.
-                */
-               hub_setup_prb(nasid, 0, 1);
-       }
-
-       /*
-        * XXX - Here's where we should take the widget type into
-        * when account assigning credits.
-        */
-       for (i = HUB_WIDGET_ID_MIN; i <= HUB_WIDGET_ID_MAX; i++)
-               hub_setup_prb(nasid, i, 3);
-
-       REMOTE_HUB_S(nasid, IIO_OUTWIDGET_ACCESS, ii_iowa);
-}
-
-/*
- * hub_pio_init         -  PIO-related hub initialization
- *
- * @hub:       hubinfo structure for our hub
- */
-void hub_pio_init(nasid_t nasid)
-{
-       unsigned i;
-
-       /* initialize big window piomaps for this hub */
-       bitmap_zero(hub_data(nasid)->h_bigwin_used, HUB_NUM_BIG_WINDOW);
-       for (i = 0; i < HUB_NUM_BIG_WINDOW; i++)
-               IIO_ITTE_DISABLE(nasid, i);
-
-       hub_set_piomode(nasid);
-}
index a0dd3bd2b81b359491b447917486890ebc18fd4b..8f5299b269e7e7d1b104d6fa4616de4f7fdfc34d 100644 (file)
@@ -23,6 +23,8 @@
 #include <asm/sn/intr.h>
 #include <asm/sn/irq_alloc.h>
 
+#include "ip27-common.h"
+
 struct hub_irq_data {
        u64     *irq_mask[2];
        cpuid_t cpu;
index f79c4839371661237141b866d89743a101411c53..b8ca94cfb4fef34b42f9e5307e7dcfc09ef8a6d2 100644 (file)
@@ -23,6 +23,7 @@
 #include <asm/page.h>
 #include <asm/pgalloc.h>
 #include <asm/sections.h>
+#include <asm/sgialib.h>
 
 #include <asm/sn/arch.h>
 #include <asm/sn/agent.h>
index 84889b57d5ff684e32bc2a1897583a0f4770853e..fc2816398d0cf04a48c1f704ade54a65b97e15f8 100644 (file)
@@ -11,6 +11,8 @@
 #include <asm/sn/arch.h>
 #include <asm/sn/agent.h>
 
+#include "ip27-common.h"
+
 #if 0
 #define NODE_NUM_CPUS(n)       CNODE_NUM_CPUS(n)
 #else
 typedef unsigned long machreg_t;
 
 static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
-
-/*
- * Let's see what else we need to do here. Set up sp, gp?
- */
-void nmi_dump(void)
-{
-       void cont_nmi_dump(void);
-
-       cont_nmi_dump();
-}
+static void nmi_dump(void);
 
 void install_cpu_nmi_handler(int slice)
 {
@@ -53,7 +46,7 @@ void install_cpu_nmi_handler(int slice)
  * into the eframe format for the node under consideration.
  */
 
-void nmi_cpu_eframe_save(nasid_t nasid, int slice)
+static void nmi_cpu_eframe_save(nasid_t nasid, int slice)
 {
        struct reg_struct *nr;
        int             i;
@@ -129,7 +122,7 @@ void nmi_cpu_eframe_save(nasid_t nasid, int slice)
        pr_emerg("\n");
 }
 
-void nmi_dump_hub_irq(nasid_t nasid, int slice)
+static void nmi_dump_hub_irq(nasid_t nasid, int slice)
 {
        u64 mask0, mask1, pend0, pend1;
 
@@ -153,7 +146,7 @@ void nmi_dump_hub_irq(nasid_t nasid, int slice)
  * Copy the cpu registers which have been saved in the IP27prom format
  * into the eframe format for the node under consideration.
  */
-void nmi_node_eframe_save(nasid_t nasid)
+static void nmi_node_eframe_save(nasid_t nasid)
 {
        int slice;
 
@@ -170,8 +163,7 @@ void nmi_node_eframe_save(nasid_t nasid)
 /*
  * Save the nmi cpu registers for all cpus in the system.
  */
-void
-nmi_eframes_save(void)
+static void nmi_eframes_save(void)
 {
        nasid_t nasid;
 
@@ -179,8 +171,7 @@ nmi_eframes_save(void)
                nmi_node_eframe_save(nasid);
 }
 
-void
-cont_nmi_dump(void)
+static void nmi_dump(void)
 {
 #ifndef REAL_NMI_SIGNAL
        static atomic_t nmied_cpus = ATOMIC_INIT(0);
index b91f8c4fdc786011172f8111e7e0dfc3e04705e1..7c6dcf6e73f701c68595bd3b26677ff8d667b56a 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/io.h>
 
 #include <asm/sn/ioc3.h>
+#include <asm/setup.h>
 
 static inline struct ioc3_uartregs *console_uart(void)
 {
index 75a34684e7045977a89faa54b1ec740eb13af5ff..e8547636a7482a4a4c08738bccf7f246b8061d26 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/percpu.h>
 #include <linux/memblock.h>
 
+#include <asm/bootinfo.h>
 #include <asm/smp-ops.h>
 #include <asm/sgialib.h>
 #include <asm/time.h>
index a8e0c776ca6c628faa0b0ef4828de3fb4e9f51a2..b8a0e4cfa9ce882dcba3c0dc4e911716d47a457b 100644 (file)
@@ -18,6 +18,8 @@
 #include <asm/ip32/crime.h>
 #include <asm/ip32/mace.h>
 
+#include "ip32-common.h"
+
 struct sgi_crime __iomem *crime;
 struct sgi_mace __iomem *mace;
 
@@ -39,7 +41,7 @@ void __init crime_init(void)
               id, rev, field, (unsigned long) CRIME_BASE);
 }
 
-irqreturn_t crime_memerr_intr(unsigned int irq, void *dev_id)
+irqreturn_t crime_memerr_intr(int irq, void *dev_id)
 {
        unsigned long stat, addr;
        int fatal = 0;
@@ -90,7 +92,7 @@ irqreturn_t crime_memerr_intr(unsigned int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-irqreturn_t crime_cpuerr_intr(unsigned int irq, void *dev_id)
+irqreturn_t crime_cpuerr_intr(int irq, void *dev_id)
 {
        unsigned long stat = crime->cpu_error_stat & CRIME_CPU_ERROR_MASK;
        unsigned long addr = crime->cpu_error_addr & CRIME_CPU_ERROR_ADDR_MASK;
index 478b63b4c808f35456bb0b4ba69de4450edb7404..7cbc27941f928399c3cd5166741f5495c55b7eaa 100644 (file)
@@ -18,6 +18,8 @@
 #include <asm/ptrace.h>
 #include <asm/tlbdebug.h>
 
+#include "ip32-common.h"
+
 static int ip32_be_handler(struct pt_regs *regs, int is_fixup)
 {
        int data = regs->cp0_cause & 4;
diff --git a/arch/mips/sgi-ip32/ip32-common.h b/arch/mips/sgi-ip32/ip32-common.h
new file mode 100644 (file)
index 0000000..cfc0225
--- /dev/null
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __IP32_COMMON_H
+#define __IP32_COMMON_H
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+
+void __init crime_init(void);
+irqreturn_t crime_memerr_intr(int irq, void *dev_id);
+irqreturn_t crime_cpuerr_intr(int irq, void *dev_id);
+void __init ip32_be_init(void);
+void ip32_prepare_poweroff(void);
+
+#endif /* __IP32_COMMON_H */
index e21ea1de05e31953ce51f04122512cd27b2d9c46..29d04468a06b8f5c4004a25a18ad94dcb08013f9 100644 (file)
@@ -28,6 +28,8 @@
 #include <asm/ip32/mace.h>
 #include <asm/ip32/ip32_ints.h>
 
+#include "ip32-common.h"
+
 /* issue a PIO read to make sure no PIO writes are pending */
 static inline void flush_crime_bus(void)
 {
@@ -107,10 +109,6 @@ static inline void flush_mace_bus(void)
  * is quite different anyway.
  */
 
-/* Some initial interrupts to set up */
-extern irqreturn_t crime_memerr_intr(int irq, void *dev_id);
-extern irqreturn_t crime_cpuerr_intr(int irq, void *dev_id);
-
 /*
  * This is for pure CRIME interrupts - ie not MACE.  The advantage?
  * We get to split the register in half and do faster lookups.
index 3fc8d0a0bdfa45cc8b3aead0bd31144a874e17bb..5fee33744f674bdbdd777ba63b7d15f92d661a99 100644 (file)
@@ -15,6 +15,7 @@
 #include <asm/ip32/crime.h>
 #include <asm/bootinfo.h>
 #include <asm/page.h>
+#include <asm/sgialib.h>
 
 extern void crime_init(void);
 
index 18d1c115cd534a2d78a1ee5f8b53681e46fc021f..6bdc1421cda46cad28b5b253bf53703005ed09bf 100644 (file)
@@ -29,6 +29,8 @@
 #include <asm/ip32/crime.h>
 #include <asm/ip32/ip32_ints.h>
 
+#include "ip32-common.h"
+
 #define POWERDOWN_TIMEOUT      120
 /*
  * Blink frequency during reboot grace period and when panicked.
index 8019dae1721a811cef26fb75430a2b3ca151d6dd..aeb0805aae57bacfef7b95877042a6dc476a14a5 100644 (file)
@@ -26,8 +26,7 @@
 #include <asm/ip32/mace.h>
 #include <asm/ip32/ip32_ints.h>
 
-extern void ip32_be_init(void);
-extern void crime_init(void);
+#include "ip32-common.h"
 
 #ifdef CONFIG_SGI_O2MACE_ETH
 /*
index 93256540d07882af2b12a6faf0642d93ddc4970b..ead1cc35d88b2f13bfecf935a6e66e6049a24a75 100644 (file)
                                              <&cpu63_intc 3>;
                };
 
-               clint_mtimer0: timer@70ac000000 {
+               clint_mtimer0: timer@70ac004000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac000000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac004000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu0_intc 7>,
                                              <&cpu1_intc 7>,
                                              <&cpu2_intc 7>,
                                              <&cpu3_intc 7>;
                };
 
-               clint_mtimer1: timer@70ac010000 {
+               clint_mtimer1: timer@70ac014000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac010000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac014000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu4_intc 7>,
                                              <&cpu5_intc 7>,
                                              <&cpu6_intc 7>,
                                              <&cpu7_intc 7>;
                };
 
-               clint_mtimer2: timer@70ac020000 {
+               clint_mtimer2: timer@70ac024000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac020000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac024000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu8_intc 7>,
                                              <&cpu9_intc 7>,
                                              <&cpu10_intc 7>,
                                              <&cpu11_intc 7>;
                };
 
-               clint_mtimer3: timer@70ac030000 {
+               clint_mtimer3: timer@70ac034000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac030000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac034000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu12_intc 7>,
                                              <&cpu13_intc 7>,
                                              <&cpu14_intc 7>,
                                              <&cpu15_intc 7>;
                };
 
-               clint_mtimer4: timer@70ac040000 {
+               clint_mtimer4: timer@70ac044000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac040000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac044000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu16_intc 7>,
                                              <&cpu17_intc 7>,
                                              <&cpu18_intc 7>,
                                              <&cpu19_intc 7>;
                };
 
-               clint_mtimer5: timer@70ac050000 {
+               clint_mtimer5: timer@70ac054000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac050000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac054000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu20_intc 7>,
                                              <&cpu21_intc 7>,
                                              <&cpu22_intc 7>,
                                              <&cpu23_intc 7>;
                };
 
-               clint_mtimer6: timer@70ac060000 {
+               clint_mtimer6: timer@70ac064000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac060000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac064000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu24_intc 7>,
                                              <&cpu25_intc 7>,
                                              <&cpu26_intc 7>,
                                              <&cpu27_intc 7>;
                };
 
-               clint_mtimer7: timer@70ac070000 {
+               clint_mtimer7: timer@70ac074000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac070000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac074000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu28_intc 7>,
                                              <&cpu29_intc 7>,
                                              <&cpu30_intc 7>,
                                              <&cpu31_intc 7>;
                };
 
-               clint_mtimer8: timer@70ac080000 {
+               clint_mtimer8: timer@70ac084000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac080000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac084000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu32_intc 7>,
                                              <&cpu33_intc 7>,
                                              <&cpu34_intc 7>,
                                              <&cpu35_intc 7>;
                };
 
-               clint_mtimer9: timer@70ac090000 {
+               clint_mtimer9: timer@70ac094000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac090000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac094000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu36_intc 7>,
                                              <&cpu37_intc 7>,
                                              <&cpu38_intc 7>,
                                              <&cpu39_intc 7>;
                };
 
-               clint_mtimer10: timer@70ac0a0000 {
+               clint_mtimer10: timer@70ac0a4000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac0a0000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac0a4000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu40_intc 7>,
                                              <&cpu41_intc 7>,
                                              <&cpu42_intc 7>,
                                              <&cpu43_intc 7>;
                };
 
-               clint_mtimer11: timer@70ac0b0000 {
+               clint_mtimer11: timer@70ac0b4000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac0b0000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac0b4000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu44_intc 7>,
                                              <&cpu45_intc 7>,
                                              <&cpu46_intc 7>,
                                              <&cpu47_intc 7>;
                };
 
-               clint_mtimer12: timer@70ac0c0000 {
+               clint_mtimer12: timer@70ac0c4000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac0c0000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac0c4000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu48_intc 7>,
                                              <&cpu49_intc 7>,
                                              <&cpu50_intc 7>,
                                              <&cpu51_intc 7>;
                };
 
-               clint_mtimer13: timer@70ac0d0000 {
+               clint_mtimer13: timer@70ac0d4000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac0d0000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac0d4000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu52_intc 7>,
                                              <&cpu53_intc 7>,
                                              <&cpu54_intc 7>,
                                              <&cpu55_intc 7>;
                };
 
-               clint_mtimer14: timer@70ac0e0000 {
+               clint_mtimer14: timer@70ac0e4000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac0e0000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac0e4000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu56_intc 7>,
                                              <&cpu57_intc 7>,
                                              <&cpu58_intc 7>,
                                              <&cpu59_intc 7>;
                };
 
-               clint_mtimer15: timer@70ac0f0000 {
+               clint_mtimer15: timer@70ac0f4000 {
                        compatible = "sophgo,sg2042-aclint-mtimer", "thead,c900-aclint-mtimer";
-                       reg = <0x00000070 0xac0f0000 0x00000000 0x00007ff8>;
+                       reg = <0x00000070 0xac0f4000 0x00000000 0x0000c000>;
+                       reg-names = "mtimecmp";
                        interrupts-extended = <&cpu60_intc 7>,
                                              <&cpu61_intc 7>,
                                              <&cpu62_intc 7>,
index 58dc64dd94a82c8d8cc42a71ec69954dc548934a..719a97e7edb2c12277a8e08dd214e0eb03be094a 100644 (file)
@@ -795,6 +795,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
        struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
        struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
        struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+       bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT;
        void *orig_call = func_addr;
        bool save_ret;
        u32 insn;
@@ -878,7 +879,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
 
        stack_size = round_up(stack_size, 16);
 
-       if (func_addr) {
+       if (!is_struct_ops) {
                /* For the trampoline called from function entry,
                 * the frame of traced function and the frame of
                 * trampoline need to be considered.
@@ -998,7 +999,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
 
        emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
 
-       if (func_addr) {
+       if (!is_struct_ops) {
                /* trampoline called from function entry */
                emit_ld(RV_REG_T0, stack_size - 8, RV_REG_SP, ctx);
                emit_ld(RV_REG_FP, stack_size - 16, RV_REG_SP, ctx);
index 29cb275a219d7fb38fa0d16e6ba48e91c9d032b4..fdf723b6f6d0ce9f6742ef3c67adce3c8d57c002 100644 (file)
 #define X86_FEATURE_K6_MTRR            ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
 #define X86_FEATURE_CYRIX_ARR          ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
 #define X86_FEATURE_CENTAUR_MCR                ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
-
-/* CPU types for specific tunings: */
 #define X86_FEATURE_K8                 ( 3*32+ 4) /* "" Opteron, Athlon64 */
-/* FREE, was #define X86_FEATURE_K7                    ( 3*32+ 5) "" Athlon */
+#define X86_FEATURE_ZEN5               ( 3*32+ 5) /* "" CPU based on Zen5 microarchitecture */
 #define X86_FEATURE_P3                 ( 3*32+ 6) /* "" P3 */
 #define X86_FEATURE_P4                 ( 3*32+ 7) /* "" P4 */
 #define X86_FEATURE_CONSTANT_TSC       ( 3*32+ 8) /* TSC ticks at a constant rate */
index 197316121f04e154dad9ba4d9a7169674c623dd5..b65e9c46b92210293d767ab01434593c2aad27a0 100644 (file)
 #define INTEL_FAM6_ATOM_CRESTMONT_X    0xAF /* Sierra Forest */
 #define INTEL_FAM6_ATOM_CRESTMONT      0xB6 /* Grand Ridge */
 
+#define INTEL_FAM6_ATOM_DARKMONT_X     0xDD /* Clearwater Forest */
+
 /* Xeon Phi */
 
 #define INTEL_FAM6_XEON_PHI_KNL                0x57 /* Knights Landing */
index 8fa6ac0e2d7665f936756748c0e1b4ab08a2c5a7..d91b37f5b4bb45106ee927fcd98b66f1b82a54c1 100644 (file)
@@ -64,6 +64,7 @@ static inline bool kmsan_virt_addr_valid(void *addr)
 {
        unsigned long x = (unsigned long)addr;
        unsigned long y = x - __START_KERNEL_map;
+       bool ret;
 
        /* use the carry flag to determine if x was < __START_KERNEL_map */
        if (unlikely(x > y)) {
@@ -79,7 +80,21 @@ static inline bool kmsan_virt_addr_valid(void *addr)
                        return false;
        }
 
-       return pfn_valid(x >> PAGE_SHIFT);
+       /*
+        * pfn_valid() relies on RCU, and may call into the scheduler on exiting
+        * the critical section. However, this would result in recursion with
+        * KMSAN. Therefore, disable preemption here, and re-enable preemption
+        * below while suppressing reschedules to avoid recursion.
+        *
+        * Note, this sacrifices occasionally breaking scheduling guarantees.
+        * Although, a kernel compiled with KMSAN has already given up on any
+        * performance guarantees due to being heavily instrumented.
+        */
+       preempt_disable();
+       ret = pfn_valid(x >> PAGE_SHIFT);
+       preempt_enable_no_resched();
+
+       return ret;
 }
 
 #endif /* !MODULE */
index 21f9407be5d357a8f4204addc66841dc50d9bf51..7e88705e907f411b416d25e533e06623997555ea 100644 (file)
@@ -58,12 +58,29 @@ extern long __ia32_sys_ni_syscall(const struct pt_regs *regs);
                ,,regs->di,,regs->si,,regs->dx                          \
                ,,regs->r10,,regs->r8,,regs->r9)                        \
 
+
+/* SYSCALL_PT_ARGS is Adapted from s390x */
+#define SYSCALL_PT_ARG6(m, t1, t2, t3, t4, t5, t6)                     \
+       SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5), m(t6, (regs->bp))
+#define SYSCALL_PT_ARG5(m, t1, t2, t3, t4, t5)                         \
+       SYSCALL_PT_ARG4(m, t1, t2, t3, t4),  m(t5, (regs->di))
+#define SYSCALL_PT_ARG4(m, t1, t2, t3, t4)                             \
+       SYSCALL_PT_ARG3(m, t1, t2, t3),  m(t4, (regs->si))
+#define SYSCALL_PT_ARG3(m, t1, t2, t3)                                 \
+       SYSCALL_PT_ARG2(m, t1, t2), m(t3, (regs->dx))
+#define SYSCALL_PT_ARG2(m, t1, t2)                                     \
+       SYSCALL_PT_ARG1(m, t1), m(t2, (regs->cx))
+#define SYSCALL_PT_ARG1(m, t1) m(t1, (regs->bx))
+#define SYSCALL_PT_ARGS(x, ...) SYSCALL_PT_ARG##x(__VA_ARGS__)
+
+#define __SC_COMPAT_CAST(t, a)                                         \
+       (__typeof(__builtin_choose_expr(__TYPE_IS_L(t), 0, 0U)))        \
+       (unsigned int)a
+
 /* Mapping of registers to parameters for syscalls on i386 */
 #define SC_IA32_REGS_TO_ARGS(x, ...)                                   \
-       __MAP(x,__SC_ARGS                                               \
-             ,,(unsigned int)regs->bx,,(unsigned int)regs->cx          \
-             ,,(unsigned int)regs->dx,,(unsigned int)regs->si          \
-             ,,(unsigned int)regs->di,,(unsigned int)regs->bp)
+       SYSCALL_PT_ARGS(x, __SC_COMPAT_CAST,                            \
+                       __MAP(x, __SC_TYPE, __VA_ARGS__))               \
 
 #define __SYS_STUB0(abi, name)                                         \
        long __##abi##_##name(const struct pt_regs *regs);              \
index cc130b57542ac4033c1c653809f429306eb3e460..1d85cb7071cb21c84899477ec4a150d2fcc4da43 100644 (file)
@@ -403,7 +403,7 @@ noinstr void BUG_func(void)
 {
        BUG();
 }
-EXPORT_SYMBOL_GPL(BUG_func);
+EXPORT_SYMBOL(BUG_func);
 
 #define CALL_RIP_REL_OPCODE    0xff
 #define CALL_RIP_REL_MODRM     0x15
index 9f42d1c59e095ee6923a78cb2ecb04fbe375a438..f3abca334199d8eae235f1560f99448eb9675a27 100644 (file)
@@ -538,7 +538,7 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
 
        /* Figure out Zen generations: */
        switch (c->x86) {
-       case 0x17: {
+       case 0x17:
                switch (c->x86_model) {
                case 0x00 ... 0x2f:
                case 0x50 ... 0x5f:
@@ -554,8 +554,8 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                        goto warn;
                }
                break;
-       }
-       case 0x19: {
+
+       case 0x19:
                switch (c->x86_model) {
                case 0x00 ... 0x0f:
                case 0x20 ... 0x5f:
@@ -569,7 +569,20 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
                        goto warn;
                }
                break;
-       }
+
+       case 0x1a:
+               switch (c->x86_model) {
+               case 0x00 ... 0x0f:
+               case 0x20 ... 0x2f:
+               case 0x40 ... 0x4f:
+               case 0x70 ... 0x7f:
+                       setup_force_cpu_cap(X86_FEATURE_ZEN5);
+                       break;
+               default:
+                       goto warn;
+               }
+               break;
+
        default:
                break;
        }
@@ -1039,6 +1052,11 @@ static void init_amd_zen4(struct cpuinfo_x86 *c)
                msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT);
 }
 
+static void init_amd_zen5(struct cpuinfo_x86 *c)
+{
+       init_amd_zen_common();
+}
+
 static void init_amd(struct cpuinfo_x86 *c)
 {
        u64 vm_cr;
@@ -1084,6 +1102,8 @@ static void init_amd(struct cpuinfo_x86 *c)
                init_amd_zen3(c);
        else if (boot_cpu_has(X86_FEATURE_ZEN4))
                init_amd_zen4(c);
+       else if (boot_cpu_has(X86_FEATURE_ZEN5))
+               init_amd_zen5(c);
 
        /*
         * Enable workaround for FXSAVE leak on CPUs
index 8584babf3ea0ca2590f30383b9594231266e9437..71210cdb34426d967b5632667cb7579b11e97a2d 100644 (file)
@@ -205,12 +205,19 @@ static int bio_copy_user_iov(struct request *rq, struct rq_map_data *map_data,
        /*
         * success
         */
-       if ((iov_iter_rw(iter) == WRITE &&
-            (!map_data || !map_data->null_mapped)) ||
-           (map_data && map_data->from_user)) {
+       if (iov_iter_rw(iter) == WRITE &&
+            (!map_data || !map_data->null_mapped)) {
                ret = bio_copy_from_iter(bio, iter);
                if (ret)
                        goto cleanup;
+       } else if (map_data && map_data->from_user) {
+               struct iov_iter iter2 = *iter;
+
+               /* This is the copy-in part of SG_DXFER_TO_FROM_DEV. */
+               iter2.data_source = ITER_SOURCE;
+               ret = bio_copy_from_iter(bio, &iter2);
+               if (ret)
+                       goto cleanup;
        } else {
                if (bmd->is_our_pages)
                        zero_fill_bio(bio);
index 9c73a763ef8838953bd1050b505621c39b8d4cdb..438f79c564cfc05d6f525550417eeee93c7b82bb 100644 (file)
@@ -20,8 +20,6 @@ static int blkpg_do_ioctl(struct block_device *bdev,
        struct blkpg_partition p;
        sector_t start, length;
 
-       if (disk->flags & GENHD_FL_NO_PART)
-               return -EINVAL;
        if (!capable(CAP_SYS_ADMIN))
                return -EACCES;
        if (copy_from_user(&p, upart, sizeof(struct blkpg_partition)))
index cab0d76a828e37eb90e38d91b4e92a61e703717e..5f5ed5c75f04d91d7bc8bf87ff4c9fa685c62318 100644 (file)
@@ -439,6 +439,11 @@ int bdev_add_partition(struct gendisk *disk, int partno, sector_t start,
                goto out;
        }
 
+       if (disk->flags & GENHD_FL_NO_PART) {
+               ret = -EINVAL;
+               goto out;
+       }
+
        if (partition_overlaps(disk, start, length, -1)) {
                ret = -EBUSY;
                goto out;
index 19035230563d7bf8ca2625a06c241d0eb010c3b7..7cb962e2145349670e4a506c879822a6bb3c6c23 100644 (file)
@@ -102,7 +102,7 @@ static int reset_pending_show(struct seq_file *s, void *v)
 {
        struct ivpu_device *vdev = seq_to_ivpu(s);
 
-       seq_printf(s, "%d\n", atomic_read(&vdev->pm->in_reset));
+       seq_printf(s, "%d\n", atomic_read(&vdev->pm->reset_pending));
        return 0;
 }
 
@@ -130,7 +130,9 @@ dvfs_mode_fops_write(struct file *file, const char __user *user_buf, size_t size
 
        fw->dvfs_mode = dvfs_mode;
 
-       ivpu_pm_schedule_recovery(vdev);
+       ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
+       if (ret)
+               return ret;
 
        return size;
 }
@@ -190,7 +192,10 @@ fw_profiling_freq_fops_write(struct file *file, const char __user *user_buf,
                return ret;
 
        ivpu_hw_profiling_freq_drive(vdev, enable);
-       ivpu_pm_schedule_recovery(vdev);
+
+       ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
+       if (ret)
+               return ret;
 
        return size;
 }
@@ -301,11 +306,18 @@ static ssize_t
 ivpu_force_recovery_fn(struct file *file, const char __user *user_buf, size_t size, loff_t *pos)
 {
        struct ivpu_device *vdev = file->private_data;
+       int ret;
 
        if (!size)
                return -EINVAL;
 
-       ivpu_pm_schedule_recovery(vdev);
+       ret = ivpu_rpm_get(vdev);
+       if (ret)
+               return ret;
+
+       ivpu_pm_trigger_recovery(vdev, "debugfs");
+       flush_work(&vdev->pm->recovery_work);
+       ivpu_rpm_put(vdev);
        return size;
 }
 
index 64927682161b282e739ef024a0ccff29c49de2cd..9418c73ee8ef8ba025ef896ffe218b61b8058f75 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/firmware.h>
 #include <linux/module.h>
 #include <linux/pci.h>
+#include <linux/pm_runtime.h>
 
 #include <drm/drm_accel.h>
 #include <drm/drm_file.h>
@@ -17,6 +18,7 @@
 #include "ivpu_debugfs.h"
 #include "ivpu_drv.h"
 #include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
 #include "ivpu_gem.h"
 #include "ivpu_hw.h"
 #include "ivpu_ipc.h"
@@ -65,22 +67,20 @@ struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv)
        return file_priv;
 }
 
-struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id)
+static void file_priv_unbind(struct ivpu_device *vdev, struct ivpu_file_priv *file_priv)
 {
-       struct ivpu_file_priv *file_priv;
-
-       xa_lock_irq(&vdev->context_xa);
-       file_priv = xa_load(&vdev->context_xa, id);
-       /* file_priv may still be in context_xa during file_priv_release() */
-       if (file_priv && !kref_get_unless_zero(&file_priv->ref))
-               file_priv = NULL;
-       xa_unlock_irq(&vdev->context_xa);
-
-       if (file_priv)
-               ivpu_dbg(vdev, KREF, "file_priv get by id: ctx %u refcount %u\n",
-                        file_priv->ctx.id, kref_read(&file_priv->ref));
-
-       return file_priv;
+       mutex_lock(&file_priv->lock);
+       if (file_priv->bound) {
+               ivpu_dbg(vdev, FILE, "file_priv unbind: ctx %u\n", file_priv->ctx.id);
+
+               ivpu_cmdq_release_all_locked(file_priv);
+               ivpu_jsm_context_release(vdev, file_priv->ctx.id);
+               ivpu_bo_unbind_all_bos_from_context(vdev, &file_priv->ctx);
+               ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
+               file_priv->bound = false;
+               drm_WARN_ON(&vdev->drm, !xa_erase_irq(&vdev->context_xa, file_priv->ctx.id));
+       }
+       mutex_unlock(&file_priv->lock);
 }
 
 static void file_priv_release(struct kref *ref)
@@ -88,13 +88,15 @@ static void file_priv_release(struct kref *ref)
        struct ivpu_file_priv *file_priv = container_of(ref, struct ivpu_file_priv, ref);
        struct ivpu_device *vdev = file_priv->vdev;
 
-       ivpu_dbg(vdev, FILE, "file_priv release: ctx %u\n", file_priv->ctx.id);
+       ivpu_dbg(vdev, FILE, "file_priv release: ctx %u bound %d\n",
+                file_priv->ctx.id, (bool)file_priv->bound);
+
+       pm_runtime_get_sync(vdev->drm.dev);
+       mutex_lock(&vdev->context_list_lock);
+       file_priv_unbind(vdev, file_priv);
+       mutex_unlock(&vdev->context_list_lock);
+       pm_runtime_put_autosuspend(vdev->drm.dev);
 
-       ivpu_cmdq_release_all(file_priv);
-       ivpu_jsm_context_release(vdev, file_priv->ctx.id);
-       ivpu_bo_remove_all_bos_from_context(vdev, &file_priv->ctx);
-       ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
-       drm_WARN_ON(&vdev->drm, xa_erase_irq(&vdev->context_xa, file_priv->ctx.id) != file_priv);
        mutex_destroy(&file_priv->lock);
        kfree(file_priv);
 }
@@ -176,9 +178,6 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
        case DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
                args->value = vdev->hw->ranges.user.start;
                break;
-       case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
-               args->value = file_priv->priority;
-               break;
        case DRM_IVPU_PARAM_CONTEXT_ID:
                args->value = file_priv->ctx.id;
                break;
@@ -218,17 +217,10 @@ static int ivpu_get_param_ioctl(struct drm_device *dev, void *data, struct drm_f
 
 static int ivpu_set_param_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
 {
-       struct ivpu_file_priv *file_priv = file->driver_priv;
        struct drm_ivpu_param *args = data;
        int ret = 0;
 
        switch (args->param) {
-       case DRM_IVPU_PARAM_CONTEXT_PRIORITY:
-               if (args->value <= DRM_IVPU_CONTEXT_PRIORITY_REALTIME)
-                       file_priv->priority = args->value;
-               else
-                       ret = -EINVAL;
-               break;
        default:
                ret = -EINVAL;
        }
@@ -241,50 +233,53 @@ static int ivpu_open(struct drm_device *dev, struct drm_file *file)
        struct ivpu_device *vdev = to_ivpu_device(dev);
        struct ivpu_file_priv *file_priv;
        u32 ctx_id;
-       void *old;
-       int ret;
+       int idx, ret;
 
-       ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, NULL, vdev->context_xa_limit, GFP_KERNEL);
-       if (ret) {
-               ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
-               return ret;
-       }
+       if (!drm_dev_enter(dev, &idx))
+               return -ENODEV;
 
        file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
        if (!file_priv) {
                ret = -ENOMEM;
-               goto err_xa_erase;
+               goto err_dev_exit;
        }
 
        file_priv->vdev = vdev;
-       file_priv->priority = DRM_IVPU_CONTEXT_PRIORITY_NORMAL;
+       file_priv->bound = true;
        kref_init(&file_priv->ref);
        mutex_init(&file_priv->lock);
 
+       mutex_lock(&vdev->context_list_lock);
+
+       ret = xa_alloc_irq(&vdev->context_xa, &ctx_id, file_priv,
+                          vdev->context_xa_limit, GFP_KERNEL);
+       if (ret) {
+               ivpu_err(vdev, "Failed to allocate context id: %d\n", ret);
+               goto err_unlock;
+       }
+
        ret = ivpu_mmu_user_context_init(vdev, &file_priv->ctx, ctx_id);
        if (ret)
-               goto err_mutex_destroy;
+               goto err_xa_erase;
 
-       old = xa_store_irq(&vdev->context_xa, ctx_id, file_priv, GFP_KERNEL);
-       if (xa_is_err(old)) {
-               ret = xa_err(old);
-               ivpu_err(vdev, "Failed to store context %u: %d\n", ctx_id, ret);
-               goto err_ctx_fini;
-       }
+       mutex_unlock(&vdev->context_list_lock);
+       drm_dev_exit(idx);
+
+       file->driver_priv = file_priv;
 
        ivpu_dbg(vdev, FILE, "file_priv create: ctx %u process %s pid %d\n",
                 ctx_id, current->comm, task_pid_nr(current));
 
-       file->driver_priv = file_priv;
        return 0;
 
-err_ctx_fini:
-       ivpu_mmu_user_context_fini(vdev, &file_priv->ctx);
-err_mutex_destroy:
-       mutex_destroy(&file_priv->lock);
-       kfree(file_priv);
 err_xa_erase:
        xa_erase_irq(&vdev->context_xa, ctx_id);
+err_unlock:
+       mutex_unlock(&vdev->context_list_lock);
+       mutex_destroy(&file_priv->lock);
+       kfree(file_priv);
+err_dev_exit:
+       drm_dev_exit(idx);
        return ret;
 }
 
@@ -340,8 +335,6 @@ static int ivpu_wait_for_ready(struct ivpu_device *vdev)
 
        if (!ret)
                ivpu_dbg(vdev, PM, "VPU ready message received successfully\n");
-       else
-               ivpu_hw_diagnose_failure(vdev);
 
        return ret;
 }
@@ -369,6 +362,9 @@ int ivpu_boot(struct ivpu_device *vdev)
        ret = ivpu_wait_for_ready(vdev);
        if (ret) {
                ivpu_err(vdev, "Failed to boot the firmware: %d\n", ret);
+               ivpu_hw_diagnose_failure(vdev);
+               ivpu_mmu_evtq_dump(vdev);
+               ivpu_fw_log_dump(vdev);
                return ret;
        }
 
@@ -540,6 +536,10 @@ static int ivpu_dev_init(struct ivpu_device *vdev)
        lockdep_set_class(&vdev->submitted_jobs_xa.xa_lock, &submitted_jobs_xa_lock_class_key);
        INIT_LIST_HEAD(&vdev->bo_list);
 
+       ret = drmm_mutex_init(&vdev->drm, &vdev->context_list_lock);
+       if (ret)
+               goto err_xa_destroy;
+
        ret = drmm_mutex_init(&vdev->drm, &vdev->bo_list_lock);
        if (ret)
                goto err_xa_destroy;
@@ -611,14 +611,30 @@ err_xa_destroy:
        return ret;
 }
 
+static void ivpu_bo_unbind_all_user_contexts(struct ivpu_device *vdev)
+{
+       struct ivpu_file_priv *file_priv;
+       unsigned long ctx_id;
+
+       mutex_lock(&vdev->context_list_lock);
+
+       xa_for_each(&vdev->context_xa, ctx_id, file_priv)
+               file_priv_unbind(vdev, file_priv);
+
+       mutex_unlock(&vdev->context_list_lock);
+}
+
 static void ivpu_dev_fini(struct ivpu_device *vdev)
 {
        ivpu_pm_disable(vdev);
        ivpu_shutdown(vdev);
        if (IVPU_WA(d3hot_after_power_off))
                pci_set_power_state(to_pci_dev(vdev->drm.dev), PCI_D3hot);
+
+       ivpu_jobs_abort_all(vdev);
        ivpu_job_done_consumer_fini(vdev);
        ivpu_pm_cancel_recovery(vdev);
+       ivpu_bo_unbind_all_user_contexts(vdev);
 
        ivpu_ipc_fini(vdev);
        ivpu_fw_fini(vdev);
index ebc4b84f27b209df9d653747772e756702cf4601..069ace4adb2d19c1a0544333d0da65632c524ea7 100644 (file)
@@ -56,6 +56,7 @@
 #define IVPU_DBG_JSM    BIT(10)
 #define IVPU_DBG_KREF   BIT(11)
 #define IVPU_DBG_RPM    BIT(12)
+#define IVPU_DBG_MMU_MAP BIT(13)
 
 #define ivpu_err(vdev, fmt, ...) \
        drm_err(&(vdev)->drm, "%s(): " fmt, __func__, ##__VA_ARGS__)
@@ -114,6 +115,7 @@ struct ivpu_device {
 
        struct ivpu_mmu_context gctx;
        struct ivpu_mmu_context rctx;
+       struct mutex context_list_lock; /* Protects user context addition/removal */
        struct xarray context_xa;
        struct xa_limit context_xa_limit;
 
@@ -145,8 +147,8 @@ struct ivpu_file_priv {
        struct mutex lock; /* Protects cmdq */
        struct ivpu_cmdq *cmdq[IVPU_NUM_ENGINES];
        struct ivpu_mmu_context ctx;
-       u32 priority;
        bool has_mmu_faults;
+       bool bound;
 };
 
 extern int ivpu_dbg_mask;
@@ -162,7 +164,6 @@ extern bool ivpu_disable_mmu_cont_pages;
 extern int ivpu_test_mode;
 
 struct ivpu_file_priv *ivpu_file_priv_get(struct ivpu_file_priv *file_priv);
-struct ivpu_file_priv *ivpu_file_priv_get_by_ctx_id(struct ivpu_device *vdev, unsigned long id);
 void ivpu_file_priv_put(struct ivpu_file_priv **link);
 
 int ivpu_boot(struct ivpu_device *vdev);
index 1dda4f38ea25cd356cc9efadcaa8d35394c6b19f..e9ddbe9f50ebeffaa3a1617431b864ceec73e2e7 100644 (file)
@@ -24,14 +24,11 @@ static const struct drm_gem_object_funcs ivpu_gem_funcs;
 
 static inline void ivpu_dbg_bo(struct ivpu_device *vdev, struct ivpu_bo *bo, const char *action)
 {
-       if (bo->ctx)
-               ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u ctx %d vpu_addr 0x%llx mmu_mapped %d\n",
-                        action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt,
-                        bo->handle, bo->ctx->id, bo->vpu_addr, bo->mmu_mapped);
-       else
-               ivpu_dbg(vdev, BO, "%6s: size %zu has_pages %d dma_mapped %d handle %u (not added to context)\n",
-                        action, ivpu_bo_size(bo), (bool)bo->base.pages, (bool)bo->base.sgt,
-                        bo->handle);
+       ivpu_dbg(vdev, BO,
+                "%6s: bo %8p vpu_addr %9llx size %8zu ctx %d has_pages %d dma_mapped %d mmu_mapped %d wc %d imported %d\n",
+                action, bo, bo->vpu_addr, ivpu_bo_size(bo), bo->ctx ? bo->ctx->id : 0,
+                (bool)bo->base.pages, (bool)bo->base.sgt, bo->mmu_mapped, bo->base.map_wc,
+                (bool)bo->base.base.import_attach);
 }
 
 /*
@@ -49,12 +46,7 @@ int __must_check ivpu_bo_pin(struct ivpu_bo *bo)
        mutex_lock(&bo->lock);
 
        ivpu_dbg_bo(vdev, bo, "pin");
-
-       if (!bo->ctx) {
-               ivpu_err(vdev, "vpu_addr not allocated for BO %d\n", bo->handle);
-               ret = -EINVAL;
-               goto unlock;
-       }
+       drm_WARN_ON(&vdev->drm, !bo->ctx);
 
        if (!bo->mmu_mapped) {
                struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
@@ -85,7 +77,10 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
                       const struct ivpu_addr_range *range)
 {
        struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
-       int ret;
+       int idx, ret;
+
+       if (!drm_dev_enter(&vdev->drm, &idx))
+               return -ENODEV;
 
        mutex_lock(&bo->lock);
 
@@ -101,6 +96,8 @@ ivpu_bo_alloc_vpu_addr(struct ivpu_bo *bo, struct ivpu_mmu_context *ctx,
 
        mutex_unlock(&bo->lock);
 
+       drm_dev_exit(idx);
+
        return ret;
 }
 
@@ -108,11 +105,7 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
 {
        struct ivpu_device *vdev = ivpu_bo_to_vdev(bo);
 
-       lockdep_assert_held(&bo->lock);
-
-       ivpu_dbg_bo(vdev, bo, "unbind");
-
-       /* TODO: dma_unmap */
+       lockdep_assert(lockdep_is_held(&bo->lock) || !kref_read(&bo->base.base.refcount));
 
        if (bo->mmu_mapped) {
                drm_WARN_ON(&vdev->drm, !bo->ctx);
@@ -124,19 +117,23 @@ static void ivpu_bo_unbind_locked(struct ivpu_bo *bo)
 
        if (bo->ctx) {
                ivpu_mmu_context_remove_node(bo->ctx, &bo->mm_node);
-               bo->vpu_addr = 0;
                bo->ctx = NULL;
        }
-}
 
-static void ivpu_bo_unbind(struct ivpu_bo *bo)
-{
-       mutex_lock(&bo->lock);
-       ivpu_bo_unbind_locked(bo);
-       mutex_unlock(&bo->lock);
+       if (bo->base.base.import_attach)
+               return;
+
+       dma_resv_lock(bo->base.base.resv, NULL);
+       if (bo->base.sgt) {
+               dma_unmap_sgtable(vdev->drm.dev, bo->base.sgt, DMA_BIDIRECTIONAL, 0);
+               sg_free_table(bo->base.sgt);
+               kfree(bo->base.sgt);
+               bo->base.sgt = NULL;
+       }
+       dma_resv_unlock(bo->base.base.resv);
 }
 
-void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
+void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx)
 {
        struct ivpu_bo *bo;
 
@@ -146,8 +143,10 @@ void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_m
        mutex_lock(&vdev->bo_list_lock);
        list_for_each_entry(bo, &vdev->bo_list, bo_list_node) {
                mutex_lock(&bo->lock);
-               if (bo->ctx == ctx)
+               if (bo->ctx == ctx) {
+                       ivpu_dbg_bo(vdev, bo, "unbind");
                        ivpu_bo_unbind_locked(bo);
+               }
                mutex_unlock(&bo->lock);
        }
        mutex_unlock(&vdev->bo_list_lock);
@@ -199,9 +198,6 @@ ivpu_bo_create(struct ivpu_device *vdev, u64 size, u32 flags)
        list_add_tail(&bo->bo_list_node, &vdev->bo_list);
        mutex_unlock(&vdev->bo_list_lock);
 
-       ivpu_dbg(vdev, BO, "create: vpu_addr 0x%llx size %zu flags 0x%x\n",
-                bo->vpu_addr, bo->base.base.size, flags);
-
        return bo;
 }
 
@@ -212,6 +208,12 @@ static int ivpu_bo_open(struct drm_gem_object *obj, struct drm_file *file)
        struct ivpu_bo *bo = to_ivpu_bo(obj);
        struct ivpu_addr_range *range;
 
+       if (bo->ctx) {
+               ivpu_warn(vdev, "Can't add BO to ctx %u: already in ctx %u\n",
+                         file_priv->ctx.id, bo->ctx->id);
+               return -EALREADY;
+       }
+
        if (bo->flags & DRM_IVPU_BO_SHAVE_MEM)
                range = &vdev->hw->ranges.shave;
        else if (bo->flags & DRM_IVPU_BO_DMA_MEM)
@@ -227,62 +229,24 @@ static void ivpu_bo_free(struct drm_gem_object *obj)
        struct ivpu_device *vdev = to_ivpu_device(obj->dev);
        struct ivpu_bo *bo = to_ivpu_bo(obj);
 
+       ivpu_dbg_bo(vdev, bo, "free");
+
        mutex_lock(&vdev->bo_list_lock);
        list_del(&bo->bo_list_node);
        mutex_unlock(&vdev->bo_list_lock);
 
        drm_WARN_ON(&vdev->drm, !dma_resv_test_signaled(obj->resv, DMA_RESV_USAGE_READ));
 
-       ivpu_dbg_bo(vdev, bo, "free");
-
-       ivpu_bo_unbind(bo);
+       ivpu_bo_unbind_locked(bo);
        mutex_destroy(&bo->lock);
 
        drm_WARN_ON(obj->dev, bo->base.pages_use_count > 1);
        drm_gem_shmem_free(&bo->base);
 }
 
-static const struct dma_buf_ops ivpu_bo_dmabuf_ops =  {
-       .cache_sgt_mapping = true,
-       .attach = drm_gem_map_attach,
-       .detach = drm_gem_map_detach,
-       .map_dma_buf = drm_gem_map_dma_buf,
-       .unmap_dma_buf = drm_gem_unmap_dma_buf,
-       .release = drm_gem_dmabuf_release,
-       .mmap = drm_gem_dmabuf_mmap,
-       .vmap = drm_gem_dmabuf_vmap,
-       .vunmap = drm_gem_dmabuf_vunmap,
-};
-
-static struct dma_buf *ivpu_bo_export(struct drm_gem_object *obj, int flags)
-{
-       struct drm_device *dev = obj->dev;
-       struct dma_buf_export_info exp_info = {
-               .exp_name = KBUILD_MODNAME,
-               .owner = dev->driver->fops->owner,
-               .ops = &ivpu_bo_dmabuf_ops,
-               .size = obj->size,
-               .flags = flags,
-               .priv = obj,
-               .resv = obj->resv,
-       };
-       void *sgt;
-
-       /*
-        * Make sure that pages are allocated and dma-mapped before exporting the bo.
-        * DMA-mapping is required if the bo will be imported to the same device.
-        */
-       sgt = drm_gem_shmem_get_pages_sgt(to_drm_gem_shmem_obj(obj));
-       if (IS_ERR(sgt))
-               return sgt;
-
-       return drm_gem_dmabuf_export(dev, &exp_info);
-}
-
 static const struct drm_gem_object_funcs ivpu_gem_funcs = {
        .free = ivpu_bo_free,
        .open = ivpu_bo_open,
-       .export = ivpu_bo_export,
        .print_info = drm_gem_shmem_object_print_info,
        .pin = drm_gem_shmem_object_pin,
        .unpin = drm_gem_shmem_object_unpin,
@@ -315,11 +279,9 @@ int ivpu_bo_create_ioctl(struct drm_device *dev, void *data, struct drm_file *fi
                return PTR_ERR(bo);
        }
 
-       ret = drm_gem_handle_create(file, &bo->base.base, &bo->handle);
-       if (!ret) {
+       ret = drm_gem_handle_create(file, &bo->base.base, &args->handle);
+       if (!ret)
                args->vpu_addr = bo->vpu_addr;
-               args->handle = bo->handle;
-       }
 
        drm_gem_object_put(&bo->base.base);
 
@@ -361,7 +323,9 @@ ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 fla
        if (ret)
                goto err_put;
 
+       dma_resv_lock(bo->base.base.resv, NULL);
        ret = drm_gem_shmem_vmap(&bo->base, &map);
+       dma_resv_unlock(bo->base.base.resv);
        if (ret)
                goto err_put;
 
@@ -376,7 +340,10 @@ void ivpu_bo_free_internal(struct ivpu_bo *bo)
 {
        struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->base.vaddr);
 
+       dma_resv_lock(bo->base.base.resv, NULL);
        drm_gem_shmem_vunmap(&bo->base, &map);
+       dma_resv_unlock(bo->base.base.resv);
+
        drm_gem_object_put(&bo->base.base);
 }
 
@@ -432,19 +399,11 @@ int ivpu_bo_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file
 
 static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
 {
-       unsigned long dma_refcount = 0;
-
        mutex_lock(&bo->lock);
 
-       if (bo->base.base.dma_buf && bo->base.base.dma_buf->file)
-               dma_refcount = atomic_long_read(&bo->base.base.dma_buf->file->f_count);
-
-       drm_printf(p, "%-3u %-6d 0x%-12llx %-10lu 0x%-8x %-4u %-8lu",
-                  bo->ctx->id, bo->handle, bo->vpu_addr, bo->base.base.size,
-                  bo->flags, kref_read(&bo->base.base.refcount), dma_refcount);
-
-       if (bo->base.base.import_attach)
-               drm_printf(p, " imported");
+       drm_printf(p, "%-9p %-3u 0x%-12llx %-10lu 0x%-8x %-4u",
+                  bo, bo->ctx->id, bo->vpu_addr, bo->base.base.size,
+                  bo->flags, kref_read(&bo->base.base.refcount));
 
        if (bo->base.pages)
                drm_printf(p, " has_pages");
@@ -452,6 +411,9 @@ static void ivpu_bo_print_info(struct ivpu_bo *bo, struct drm_printer *p)
        if (bo->mmu_mapped)
                drm_printf(p, " mmu_mapped");
 
+       if (bo->base.base.import_attach)
+               drm_printf(p, " imported");
+
        drm_printf(p, "\n");
 
        mutex_unlock(&bo->lock);
@@ -462,8 +424,8 @@ void ivpu_bo_list(struct drm_device *dev, struct drm_printer *p)
        struct ivpu_device *vdev = to_ivpu_device(dev);
        struct ivpu_bo *bo;
 
-       drm_printf(p, "%-3s %-6s %-14s %-10s %-10s %-4s %-8s %s\n",
-                  "ctx", "handle", "vpu_addr", "size", "flags", "refs", "dma_refs", "attribs");
+       drm_printf(p, "%-9s %-3s %-14s %-10s %-10s %-4s %s\n",
+                  "bo", "ctx", "vpu_addr", "size", "flags", "refs", "attribs");
 
        mutex_lock(&vdev->bo_list_lock);
        list_for_each_entry(bo, &vdev->bo_list, bo_list_node)
index d75cad0d3c742db703dbe0812a0df6eaaba24d53..a8559211c70d41ac20bae1da57d846ffd4d7e3b3 100644 (file)
@@ -19,14 +19,13 @@ struct ivpu_bo {
 
        struct mutex lock; /* Protects: ctx, mmu_mapped, vpu_addr */
        u64 vpu_addr;
-       u32 handle;
        u32 flags;
        u32 job_status; /* Valid only for command buffer */
        bool mmu_mapped;
 };
 
 int ivpu_bo_pin(struct ivpu_bo *bo);
-void ivpu_bo_remove_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
+void ivpu_bo_unbind_all_bos_from_context(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx);
 
 struct drm_gem_object *ivpu_gem_create_object(struct drm_device *dev, size_t size);
 struct ivpu_bo *ivpu_bo_alloc_internal(struct ivpu_device *vdev, u64 vpu_addr, u64 size, u32 flags);
index 574cdeefb66b39af45beda6534a6ac3eb0e57c7b..f15a93d83057822f2414aedb22359063fb99c6a1 100644 (file)
@@ -875,24 +875,18 @@ static void ivpu_hw_37xx_irq_disable(struct ivpu_device *vdev)
 
 static void ivpu_hw_37xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
 {
-       ivpu_err_ratelimited(vdev, "WDT NCE irq\n");
-
-       ivpu_pm_schedule_recovery(vdev);
+       ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
 }
 
 static void ivpu_hw_37xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
 {
-       ivpu_err_ratelimited(vdev, "WDT MSS irq\n");
-
        ivpu_hw_wdt_disable(vdev);
-       ivpu_pm_schedule_recovery(vdev);
+       ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
 }
 
 static void ivpu_hw_37xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
 {
-       ivpu_err_ratelimited(vdev, "NOC Firewall irq\n");
-
-       ivpu_pm_schedule_recovery(vdev);
+       ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
 }
 
 /* Handler for IRQs from VPU core (irqV) */
@@ -970,7 +964,7 @@ static bool ivpu_hw_37xx_irqb_handler(struct ivpu_device *vdev, int irq)
                REGB_WR32(VPU_37XX_BUTTRESS_INTERRUPT_STAT, status);
 
        if (schedule_recovery)
-               ivpu_pm_schedule_recovery(vdev);
+               ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
 
        return true;
 }
index eba2fdef2ace1384c93c1cbb30a8e3d9633abba8..704288084f37379eb6c7a1a5beb8f58ec3cea4a2 100644 (file)
@@ -746,7 +746,7 @@ static int ivpu_hw_40xx_info_init(struct ivpu_device *vdev)
        return 0;
 }
 
-static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
+static int ivpu_hw_40xx_ip_reset(struct ivpu_device *vdev)
 {
        int ret;
        u32 val;
@@ -768,6 +768,23 @@ static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
        return ret;
 }
 
+static int ivpu_hw_40xx_reset(struct ivpu_device *vdev)
+{
+       int ret = 0;
+
+       if (ivpu_hw_40xx_ip_reset(vdev)) {
+               ivpu_err(vdev, "Failed to reset VPU IP\n");
+               ret = -EIO;
+       }
+
+       if (ivpu_pll_disable(vdev)) {
+               ivpu_err(vdev, "Failed to disable PLL\n");
+               ret = -EIO;
+       }
+
+       return ret;
+}
+
 static int ivpu_hw_40xx_d0i3_enable(struct ivpu_device *vdev)
 {
        int ret;
@@ -913,7 +930,7 @@ static int ivpu_hw_40xx_power_down(struct ivpu_device *vdev)
 
        ivpu_hw_40xx_save_d0i3_entry_timestamp(vdev);
 
-       if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_reset(vdev))
+       if (!ivpu_hw_40xx_is_idle(vdev) && ivpu_hw_40xx_ip_reset(vdev))
                ivpu_warn(vdev, "Failed to reset the VPU\n");
 
        if (ivpu_pll_disable(vdev)) {
@@ -1032,18 +1049,18 @@ static void ivpu_hw_40xx_irq_disable(struct ivpu_device *vdev)
 static void ivpu_hw_40xx_irq_wdt_nce_handler(struct ivpu_device *vdev)
 {
        /* TODO: For LNN hang consider engine reset instead of full recovery */
-       ivpu_pm_schedule_recovery(vdev);
+       ivpu_pm_trigger_recovery(vdev, "WDT NCE IRQ");
 }
 
 static void ivpu_hw_40xx_irq_wdt_mss_handler(struct ivpu_device *vdev)
 {
        ivpu_hw_wdt_disable(vdev);
-       ivpu_pm_schedule_recovery(vdev);
+       ivpu_pm_trigger_recovery(vdev, "WDT MSS IRQ");
 }
 
 static void ivpu_hw_40xx_irq_noc_firewall_handler(struct ivpu_device *vdev)
 {
-       ivpu_pm_schedule_recovery(vdev);
+       ivpu_pm_trigger_recovery(vdev, "NOC Firewall IRQ");
 }
 
 /* Handler for IRQs from VPU core (irqV) */
@@ -1137,7 +1154,7 @@ static bool ivpu_hw_40xx_irqb_handler(struct ivpu_device *vdev, int irq)
        REGB_WR32(VPU_40XX_BUTTRESS_INTERRUPT_STAT, status);
 
        if (schedule_recovery)
-               ivpu_pm_schedule_recovery(vdev);
+               ivpu_pm_trigger_recovery(vdev, "Buttress IRQ");
 
        return true;
 }
index e86621f16f85a8d5d41f0d5dcf1ef99d1e45541d..fa66c39b57ecaaecae036d17f79c3e7683fc5279 100644 (file)
@@ -343,10 +343,8 @@ int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *r
        hb_ret = ivpu_ipc_send_receive_internal(vdev, &hb_req, VPU_JSM_MSG_QUERY_ENGINE_HB_DONE,
                                                &hb_resp, VPU_IPC_CHAN_ASYNC_CMD,
                                                vdev->timeout.jsm);
-       if (hb_ret == -ETIMEDOUT) {
-               ivpu_hw_diagnose_failure(vdev);
-               ivpu_pm_schedule_recovery(vdev);
-       }
+       if (hb_ret == -ETIMEDOUT)
+               ivpu_pm_trigger_recovery(vdev, "IPC timeout");
 
        return ret;
 }
index 7206cf9cdb4a45335b220796621fcd2c55a8ddf0..0440bee3ecafd567da6cbf47584f3daa688b618d 100644 (file)
@@ -112,22 +112,20 @@ static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engin
        }
 }
 
-void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv)
+void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv)
 {
        int i;
 
-       mutex_lock(&file_priv->lock);
+       lockdep_assert_held(&file_priv->lock);
 
        for (i = 0; i < IVPU_NUM_ENGINES; i++)
                ivpu_cmdq_release_locked(file_priv, i);
-
-       mutex_unlock(&file_priv->lock);
 }
 
 /*
  * Mark the doorbell as unregistered and reset job queue pointers.
  * This function needs to be called when the VPU hardware is restarted
- * and FW looses job queue state. The next time job queue is used it
+ * and FW loses job queue state. The next time job queue is used it
  * will be registered again.
  */
 static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine)
@@ -161,15 +159,13 @@ void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev)
        struct ivpu_file_priv *file_priv;
        unsigned long ctx_id;
 
-       xa_for_each(&vdev->context_xa, ctx_id, file_priv) {
-               file_priv = ivpu_file_priv_get_by_ctx_id(vdev, ctx_id);
-               if (!file_priv)
-                       continue;
+       mutex_lock(&vdev->context_list_lock);
 
+       xa_for_each(&vdev->context_xa, ctx_id, file_priv)
                ivpu_cmdq_reset_all(file_priv);
 
-               ivpu_file_priv_put(&file_priv);
-       }
+       mutex_unlock(&vdev->context_list_lock);
+
 }
 
 static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job)
@@ -243,60 +239,32 @@ static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev)
        return &fence->base;
 }
 
-static void job_get(struct ivpu_job *job, struct ivpu_job **link)
+static void ivpu_job_destroy(struct ivpu_job *job)
 {
        struct ivpu_device *vdev = job->vdev;
-
-       kref_get(&job->ref);
-       *link = job;
-
-       ivpu_dbg(vdev, KREF, "Job get: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
-}
-
-static void job_release(struct kref *ref)
-{
-       struct ivpu_job *job = container_of(ref, struct ivpu_job, ref);
-       struct ivpu_device *vdev = job->vdev;
        u32 i;
 
+       ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d",
+                job->job_id, job->file_priv->ctx.id, job->engine_idx);
+
        for (i = 0; i < job->bo_count; i++)
                if (job->bos[i])
                        drm_gem_object_put(&job->bos[i]->base.base);
 
        dma_fence_put(job->done_fence);
        ivpu_file_priv_put(&job->file_priv);
-
-       ivpu_dbg(vdev, KREF, "Job released: id %u\n", job->job_id);
        kfree(job);
-
-       /* Allow the VPU to get suspended, must be called after ivpu_file_priv_put() */
-       ivpu_rpm_put(vdev);
-}
-
-static void job_put(struct ivpu_job *job)
-{
-       struct ivpu_device *vdev = job->vdev;
-
-       ivpu_dbg(vdev, KREF, "Job put: id %u refcount %u\n", job->job_id, kref_read(&job->ref));
-       kref_put(&job->ref, job_release);
 }
 
 static struct ivpu_job *
-ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
+ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
 {
        struct ivpu_device *vdev = file_priv->vdev;
        struct ivpu_job *job;
-       int ret;
-
-       ret = ivpu_rpm_get(vdev);
-       if (ret < 0)
-               return NULL;
 
        job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL);
        if (!job)
-               goto err_rpm_put;
-
-       kref_init(&job->ref);
+               return NULL;
 
        job->vdev = vdev;
        job->engine_idx = engine_idx;
@@ -310,17 +278,14 @@ ivpu_create_job(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count)
        job->file_priv = ivpu_file_priv_get(file_priv);
 
        ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d", file_priv->ctx.id, job->engine_idx);
-
        return job;
 
 err_free_job:
        kfree(job);
-err_rpm_put:
-       ivpu_rpm_put(vdev);
        return NULL;
 }
 
-static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
+static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status)
 {
        struct ivpu_job *job;
 
@@ -337,9 +302,10 @@ static int ivpu_job_done(struct ivpu_device *vdev, u32 job_id, u32 job_status)
        ivpu_dbg(vdev, JOB, "Job complete:  id %3u ctx %2d engine %d status 0x%x\n",
                 job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status);
 
+       ivpu_job_destroy(job);
        ivpu_stop_job_timeout_detection(vdev);
 
-       job_put(job);
+       ivpu_rpm_put(vdev);
        return 0;
 }
 
@@ -349,10 +315,10 @@ void ivpu_jobs_abort_all(struct ivpu_device *vdev)
        unsigned long id;
 
        xa_for_each(&vdev->submitted_jobs_xa, id, job)
-               ivpu_job_done(vdev, id, VPU_JSM_STATUS_ABORTED);
+               ivpu_job_signal_and_destroy(vdev, id, VPU_JSM_STATUS_ABORTED);
 }
 
-static int ivpu_direct_job_submission(struct ivpu_job *job)
+static int ivpu_job_submit(struct ivpu_job *job)
 {
        struct ivpu_file_priv *file_priv = job->file_priv;
        struct ivpu_device *vdev = job->vdev;
@@ -360,53 +326,65 @@ static int ivpu_direct_job_submission(struct ivpu_job *job)
        struct ivpu_cmdq *cmdq;
        int ret;
 
+       ret = ivpu_rpm_get(vdev);
+       if (ret < 0)
+               return ret;
+
        mutex_lock(&file_priv->lock);
 
        cmdq = ivpu_cmdq_acquire(job->file_priv, job->engine_idx);
        if (!cmdq) {
-               ivpu_warn(vdev, "Failed get job queue, ctx %d engine %d\n",
-                         file_priv->ctx.id, job->engine_idx);
+               ivpu_warn_ratelimited(vdev, "Failed get job queue, ctx %d engine %d\n",
+                                     file_priv->ctx.id, job->engine_idx);
                ret = -EINVAL;
-               goto err_unlock;
+               goto err_unlock_file_priv;
        }
 
        job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1));
        job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK;
 
-       job_get(job, &job);
-       ret = xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
+       xa_lock(&vdev->submitted_jobs_xa);
+       ret = __xa_alloc(&vdev->submitted_jobs_xa, &job->job_id, job, job_id_range, GFP_KERNEL);
        if (ret) {
-               ivpu_warn_ratelimited(vdev, "Failed to allocate job id: %d\n", ret);
-               goto err_job_put;
+               ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n",
+                        file_priv->ctx.id);
+               ret = -EBUSY;
+               goto err_unlock_submitted_jobs_xa;
        }
 
        ret = ivpu_cmdq_push_job(cmdq, job);
        if (ret)
-               goto err_xa_erase;
+               goto err_erase_xa;
 
        ivpu_start_job_timeout_detection(vdev);
 
-       ivpu_dbg(vdev, JOB, "Job submitted: id %3u addr 0x%llx ctx %2d engine %d next %d\n",
-                job->job_id, job->cmd_buf_vpu_addr, file_priv->ctx.id,
-                job->engine_idx, cmdq->jobq->header.tail);
-
-       if (ivpu_test_mode & IVPU_TEST_MODE_NULL_HW) {
-               ivpu_job_done(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+       if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) {
                cmdq->jobq->header.head = cmdq->jobq->header.tail;
                wmb(); /* Flush WC buffer for jobq header */
        } else {
                ivpu_cmdq_ring_db(vdev, cmdq);
        }
 
+       ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d addr 0x%llx next %d\n",
+                job->job_id, file_priv->ctx.id, job->engine_idx,
+                job->cmd_buf_vpu_addr, cmdq->jobq->header.tail);
+
+       xa_unlock(&vdev->submitted_jobs_xa);
+
        mutex_unlock(&file_priv->lock);
+
+       if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW))
+               ivpu_job_signal_and_destroy(vdev, job->job_id, VPU_JSM_STATUS_SUCCESS);
+
        return 0;
 
-err_xa_erase:
-       xa_erase(&vdev->submitted_jobs_xa, job->job_id);
-err_job_put:
-       job_put(job);
-err_unlock:
+err_erase_xa:
+       __xa_erase(&vdev->submitted_jobs_xa, job->job_id);
+err_unlock_submitted_jobs_xa:
+       xa_unlock(&vdev->submitted_jobs_xa);
+err_unlock_file_priv:
        mutex_unlock(&file_priv->lock);
+       ivpu_rpm_put(vdev);
        return ret;
 }
 
@@ -488,6 +466,9 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        if (params->engine > DRM_IVPU_ENGINE_COPY)
                return -EINVAL;
 
+       if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME)
+               return -EINVAL;
+
        if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT)
                return -EINVAL;
 
@@ -509,44 +490,49 @@ int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                             params->buffer_count * sizeof(u32));
        if (ret) {
                ret = -EFAULT;
-               goto free_handles;
+               goto err_free_handles;
        }
 
        if (!drm_dev_enter(&vdev->drm, &idx)) {
                ret = -ENODEV;
-               goto free_handles;
+               goto err_free_handles;
        }
 
        ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n",
                 file_priv->ctx.id, params->buffer_count);
 
-       job = ivpu_create_job(file_priv, params->engine, params->buffer_count);
+       job = ivpu_job_create(file_priv, params->engine, params->buffer_count);
        if (!job) {
                ivpu_err(vdev, "Failed to create job\n");
                ret = -ENOMEM;
-               goto dev_exit;
+               goto err_exit_dev;
        }
 
        ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, params->buffer_count,
                                              params->commands_offset);
        if (ret) {
-               ivpu_err(vdev, "Failed to prepare job, ret %d\n", ret);
-               goto job_put;
+               ivpu_err(vdev, "Failed to prepare job: %d\n", ret);
+               goto err_destroy_job;
        }
 
-       ret = ivpu_direct_job_submission(job);
-       if (ret) {
-               dma_fence_signal(job->done_fence);
-               ivpu_err(vdev, "Failed to submit job to the HW, ret %d\n", ret);
-       }
+       down_read(&vdev->pm->reset_lock);
+       ret = ivpu_job_submit(job);
+       up_read(&vdev->pm->reset_lock);
+       if (ret)
+               goto err_signal_fence;
 
-job_put:
-       job_put(job);
-dev_exit:
        drm_dev_exit(idx);
-free_handles:
        kfree(buf_handles);
+       return ret;
 
+err_signal_fence:
+       dma_fence_signal(job->done_fence);
+err_destroy_job:
+       ivpu_job_destroy(job);
+err_exit_dev:
+       drm_dev_exit(idx);
+err_free_handles:
+       kfree(buf_handles);
        return ret;
 }
 
@@ -568,7 +554,7 @@ ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr,
        }
 
        payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload;
-       ret = ivpu_job_done(vdev, payload->job_id, payload->job_status);
+       ret = ivpu_job_signal_and_destroy(vdev, payload->job_id, payload->job_status);
        if (!ret && !xa_empty(&vdev->submitted_jobs_xa))
                ivpu_start_job_timeout_detection(vdev);
 }
index 45a2f2ec82e5ba69110d737e154c38bf6765174a..ca4984071cc76b17d858ae86929a48a9cea39c88 100644 (file)
@@ -43,7 +43,6 @@ struct ivpu_cmdq {
                          will update the job status
  */
 struct ivpu_job {
-       struct kref ref;
        struct ivpu_device *vdev;
        struct ivpu_file_priv *file_priv;
        struct dma_fence *done_fence;
@@ -56,7 +55,7 @@ struct ivpu_job {
 
 int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file);
 
-void ivpu_cmdq_release_all(struct ivpu_file_priv *file_priv);
+void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv);
 void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev);
 
 void ivpu_job_done_consumer_init(struct ivpu_device *vdev);
index 2228c44b115fa0e4d48f36c115e2fdc7b434a8c0..9a3122ffce03c1dc11311ab36f31f775f4fdf6fe 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/highmem.h>
 
 #include "ivpu_drv.h"
+#include "ivpu_hw.h"
 #include "ivpu_hw_reg_io.h"
 #include "ivpu_mmu.h"
 #include "ivpu_mmu_context.h"
@@ -518,6 +519,7 @@ static int ivpu_mmu_cmdq_sync(struct ivpu_device *vdev)
 
                ivpu_err(vdev, "Timed out waiting for MMU consumer: %d, error: %s\n", ret,
                         ivpu_mmu_cmdq_err_to_str(err));
+               ivpu_hw_diagnose_failure(vdev);
        }
 
        return ret;
@@ -885,7 +887,6 @@ static u32 *ivpu_mmu_get_event(struct ivpu_device *vdev)
 
 void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
 {
-       bool schedule_recovery = false;
        u32 *event;
        u32 ssid;
 
@@ -895,14 +896,21 @@ void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev)
                ivpu_mmu_dump_event(vdev, event);
 
                ssid = FIELD_GET(IVPU_MMU_EVT_SSID_MASK, event[0]);
-               if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID)
-                       schedule_recovery = true;
-               else
-                       ivpu_mmu_user_context_mark_invalid(vdev, ssid);
+               if (ssid == IVPU_GLOBAL_CONTEXT_MMU_SSID) {
+                       ivpu_pm_trigger_recovery(vdev, "MMU event");
+                       return;
+               }
+
+               ivpu_mmu_user_context_mark_invalid(vdev, ssid);
        }
+}
 
-       if (schedule_recovery)
-               ivpu_pm_schedule_recovery(vdev);
+void ivpu_mmu_evtq_dump(struct ivpu_device *vdev)
+{
+       u32 *event;
+
+       while ((event = ivpu_mmu_get_event(vdev)) != NULL)
+               ivpu_mmu_dump_event(vdev, event);
 }
 
 void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev)
index cb551126806baa9bb47a967c7bff916b444c2427..6fa35c240710625670b6879098833c6cd680fb40 100644 (file)
@@ -46,5 +46,6 @@ int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid);
 
 void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev);
 void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev);
+void ivpu_mmu_evtq_dump(struct ivpu_device *vdev);
 
 #endif /* __IVPU_MMU_H__ */
index 12a8c09d4547d7d9b81cd91d93307e59e648e14f..fe61612992364c65d184eef9c3a3ad3ebb60ce6a 100644 (file)
@@ -355,6 +355,9 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
                dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
                size_t size = sg_dma_len(sg) + sg->offset;
 
+               ivpu_dbg(vdev, MMU_MAP, "Map ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
+                        ctx->id, dma_addr, vpu_addr, size);
+
                ret = ivpu_mmu_context_map_pages(vdev, ctx, vpu_addr, dma_addr, size, prot);
                if (ret) {
                        ivpu_err(vdev, "Failed to map context pages\n");
@@ -366,6 +369,7 @@ ivpu_mmu_context_map_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ctx,
 
        /* Ensure page table modifications are flushed from wc buffers to memory */
        wmb();
+
        mutex_unlock(&ctx->lock);
 
        ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
@@ -388,14 +392,19 @@ ivpu_mmu_context_unmap_sgt(struct ivpu_device *vdev, struct ivpu_mmu_context *ct
        mutex_lock(&ctx->lock);
 
        for_each_sgtable_dma_sg(sgt, sg, i) {
+               dma_addr_t dma_addr = sg_dma_address(sg) - sg->offset;
                size_t size = sg_dma_len(sg) + sg->offset;
 
+               ivpu_dbg(vdev, MMU_MAP, "Unmap ctx: %u dma_addr: 0x%llx vpu_addr: 0x%llx size: %lu\n",
+                        ctx->id, dma_addr, vpu_addr, size);
+
                ivpu_mmu_context_unmap_pages(ctx, vpu_addr, size);
                vpu_addr += size;
        }
 
        /* Ensure page table modifications are flushed from wc buffers to memory */
        wmb();
+
        mutex_unlock(&ctx->lock);
 
        ret = ivpu_mmu_invalidate_tlb(vdev, ctx->id);
index 0af8864cb3b55f636bc7418a03dc3c07360e7ac8..f501f27ebafdf6687b5a46ca7e2387faa931af3e 100644 (file)
@@ -13,6 +13,7 @@
 #include "ivpu_drv.h"
 #include "ivpu_hw.h"
 #include "ivpu_fw.h"
+#include "ivpu_fw_log.h"
 #include "ivpu_ipc.h"
 #include "ivpu_job.h"
 #include "ivpu_jsm_msg.h"
@@ -111,6 +112,14 @@ static void ivpu_pm_recovery_work(struct work_struct *work)
        char *evt[2] = {"IVPU_PM_EVENT=IVPU_RECOVER", NULL};
        int ret;
 
+       ivpu_err(vdev, "Recovering the VPU (reset #%d)\n", atomic_read(&vdev->pm->reset_counter));
+
+       ret = pm_runtime_resume_and_get(vdev->drm.dev);
+       if (ret)
+               ivpu_err(vdev, "Failed to resume VPU: %d\n", ret);
+
+       ivpu_fw_log_dump(vdev);
+
 retry:
        ret = pci_try_reset_function(to_pci_dev(vdev->drm.dev));
        if (ret == -EAGAIN && !drm_dev_is_unplugged(&vdev->drm)) {
@@ -122,11 +131,13 @@ retry:
                ivpu_err(vdev, "Failed to reset VPU: %d\n", ret);
 
        kobject_uevent_env(&vdev->drm.dev->kobj, KOBJ_CHANGE, evt);
+       pm_runtime_mark_last_busy(vdev->drm.dev);
+       pm_runtime_put_autosuspend(vdev->drm.dev);
 }
 
-void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
+void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason)
 {
-       struct ivpu_pm_info *pm = vdev->pm;
+       ivpu_err(vdev, "Recovery triggered by %s\n", reason);
 
        if (ivpu_disable_recovery) {
                ivpu_err(vdev, "Recovery not available when disable_recovery param is set\n");
@@ -138,10 +149,11 @@ void ivpu_pm_schedule_recovery(struct ivpu_device *vdev)
                return;
        }
 
-       /* Schedule recovery if it's not in progress */
-       if (atomic_cmpxchg(&pm->in_reset, 0, 1) == 0) {
-               ivpu_hw_irq_disable(vdev);
-               queue_work(system_long_wq, &pm->recovery_work);
+       /* Trigger recovery if it's not in progress */
+       if (atomic_cmpxchg(&vdev->pm->reset_pending, 0, 1) == 0) {
+               ivpu_hw_diagnose_failure(vdev);
+               ivpu_hw_irq_disable(vdev); /* Disable IRQ early to protect from IRQ storm */
+               queue_work(system_long_wq, &vdev->pm->recovery_work);
        }
 }
 
@@ -149,12 +161,8 @@ static void ivpu_job_timeout_work(struct work_struct *work)
 {
        struct ivpu_pm_info *pm = container_of(work, struct ivpu_pm_info, job_timeout_work.work);
        struct ivpu_device *vdev = pm->vdev;
-       unsigned long timeout_ms = ivpu_tdr_timeout_ms ? ivpu_tdr_timeout_ms : vdev->timeout.tdr;
 
-       ivpu_err(vdev, "TDR detected, timeout %lu ms", timeout_ms);
-       ivpu_hw_diagnose_failure(vdev);
-
-       ivpu_pm_schedule_recovery(vdev);
+       ivpu_pm_trigger_recovery(vdev, "TDR");
 }
 
 void ivpu_start_job_timeout_detection(struct ivpu_device *vdev)
@@ -227,6 +235,9 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
        bool hw_is_idle = true;
        int ret;
 
+       drm_WARN_ON(&vdev->drm, !xa_empty(&vdev->submitted_jobs_xa));
+       drm_WARN_ON(&vdev->drm, work_pending(&vdev->pm->recovery_work));
+
        ivpu_dbg(vdev, PM, "Runtime suspend..\n");
 
        if (!ivpu_hw_is_idle(vdev) && vdev->pm->suspend_reschedule_counter) {
@@ -247,7 +258,8 @@ int ivpu_pm_runtime_suspend_cb(struct device *dev)
                ivpu_err(vdev, "Failed to set suspend VPU: %d\n", ret);
 
        if (!hw_is_idle) {
-               ivpu_warn(vdev, "VPU failed to enter idle, force suspended.\n");
+               ivpu_err(vdev, "VPU failed to enter idle, force suspended.\n");
+               ivpu_fw_log_dump(vdev);
                ivpu_pm_prepare_cold_boot(vdev);
        } else {
                ivpu_pm_prepare_warm_boot(vdev);
@@ -308,11 +320,12 @@ void ivpu_pm_reset_prepare_cb(struct pci_dev *pdev)
 {
        struct ivpu_device *vdev = pci_get_drvdata(pdev);
 
-       pm_runtime_get_sync(vdev->drm.dev);
-
        ivpu_dbg(vdev, PM, "Pre-reset..\n");
        atomic_inc(&vdev->pm->reset_counter);
-       atomic_set(&vdev->pm->in_reset, 1);
+       atomic_set(&vdev->pm->reset_pending, 1);
+
+       pm_runtime_get_sync(vdev->drm.dev);
+       down_write(&vdev->pm->reset_lock);
        ivpu_prepare_for_reset(vdev);
        ivpu_hw_reset(vdev);
        ivpu_pm_prepare_cold_boot(vdev);
@@ -329,9 +342,11 @@ void ivpu_pm_reset_done_cb(struct pci_dev *pdev)
        ret = ivpu_resume(vdev);
        if (ret)
                ivpu_err(vdev, "Failed to set RESUME state: %d\n", ret);
-       atomic_set(&vdev->pm->in_reset, 0);
+       up_write(&vdev->pm->reset_lock);
+       atomic_set(&vdev->pm->reset_pending, 0);
        ivpu_dbg(vdev, PM, "Post-reset done.\n");
 
+       pm_runtime_mark_last_busy(vdev->drm.dev);
        pm_runtime_put_autosuspend(vdev->drm.dev);
 }
 
@@ -344,7 +359,10 @@ void ivpu_pm_init(struct ivpu_device *vdev)
        pm->vdev = vdev;
        pm->suspend_reschedule_counter = PM_RESCHEDULE_LIMIT;
 
-       atomic_set(&pm->in_reset, 0);
+       init_rwsem(&pm->reset_lock);
+       atomic_set(&pm->reset_pending, 0);
+       atomic_set(&pm->reset_counter, 0);
+
        INIT_WORK(&pm->recovery_work, ivpu_pm_recovery_work);
        INIT_DELAYED_WORK(&pm->job_timeout_work, ivpu_job_timeout_work);
 
index 97c6e0b0aa42d0a5a071940c5b54a052f99a748c..ec60fbeefefc65bbca4ed619d7265aabffd1bb61 100644 (file)
@@ -6,6 +6,7 @@
 #ifndef __IVPU_PM_H__
 #define __IVPU_PM_H__
 
+#include <linux/rwsem.h>
 #include <linux/types.h>
 
 struct ivpu_device;
@@ -14,8 +15,9 @@ struct ivpu_pm_info {
        struct ivpu_device *vdev;
        struct delayed_work job_timeout_work;
        struct work_struct recovery_work;
-       atomic_t in_reset;
+       struct rw_semaphore reset_lock;
        atomic_t reset_counter;
+       atomic_t reset_pending;
        bool is_warmboot;
        u32 suspend_reschedule_counter;
 };
@@ -37,7 +39,7 @@ int __must_check ivpu_rpm_get(struct ivpu_device *vdev);
 int __must_check ivpu_rpm_get_if_active(struct ivpu_device *vdev);
 void ivpu_rpm_put(struct ivpu_device *vdev);
 
-void ivpu_pm_schedule_recovery(struct ivpu_device *vdev);
+void ivpu_pm_trigger_recovery(struct ivpu_device *vdev, const char *reason);
 void ivpu_start_job_timeout_detection(struct ivpu_device *vdev);
 void ivpu_stop_job_timeout_detection(struct ivpu_device *vdev);
 
index 3a5f3255f51b39cc4a5b65554e7d55eed8ea2c57..d2460fa985b7e3b53cf71bc154d613c715ccf2fc 100644 (file)
@@ -48,6 +48,7 @@ enum {
 enum board_ids {
        /* board IDs by feature in alphabetical order */
        board_ahci,
+       board_ahci_43bit_dma,
        board_ahci_ign_iferr,
        board_ahci_low_power,
        board_ahci_no_debounce_delay,
@@ -128,6 +129,13 @@ static const struct ata_port_info ahci_port_info[] = {
                .udma_mask      = ATA_UDMA6,
                .port_ops       = &ahci_ops,
        },
+       [board_ahci_43bit_dma] = {
+               AHCI_HFLAGS     (AHCI_HFLAG_43BIT_ONLY),
+               .flags          = AHCI_FLAG_COMMON,
+               .pio_mask       = ATA_PIO4,
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &ahci_ops,
+       },
        [board_ahci_ign_iferr] = {
                AHCI_HFLAGS     (AHCI_HFLAG_IGN_IRQ_IF_ERR),
                .flags          = AHCI_FLAG_COMMON,
@@ -597,11 +605,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },   /* PDC42819 */
        { PCI_VDEVICE(PROMISE, 0x3781), board_ahci },   /* FastTrak TX8660 ahci-mode */
 
-       /* Asmedia */
+       /* ASMedia */
        { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci },   /* ASM1060 */
        { PCI_VDEVICE(ASMEDIA, 0x0602), board_ahci },   /* ASM1060 */
-       { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci },   /* ASM1061 */
-       { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci },   /* ASM1062 */
+       { PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci_43bit_dma }, /* ASM1061 */
+       { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci_43bit_dma }, /* ASM1061/1062 */
        { PCI_VDEVICE(ASMEDIA, 0x0621), board_ahci },   /* ASM1061R */
        { PCI_VDEVICE(ASMEDIA, 0x0622), board_ahci },   /* ASM1062R */
        { PCI_VDEVICE(ASMEDIA, 0x0624), board_ahci },   /* ASM1062+JMB575 */
@@ -663,6 +671,11 @@ MODULE_PARM_DESC(mobile_lpm_policy, "Default LPM policy for mobile chipsets");
 static void ahci_pci_save_initial_config(struct pci_dev *pdev,
                                         struct ahci_host_priv *hpriv)
 {
+       if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA && pdev->device == 0x1166) {
+               dev_info(&pdev->dev, "ASM1166 has only six ports\n");
+               hpriv->saved_port_map = 0x3f;
+       }
+
        if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
                dev_info(&pdev->dev, "JMB361 has only one port\n");
                hpriv->saved_port_map = 1;
@@ -949,11 +962,20 @@ static int ahci_pci_device_resume(struct device *dev)
 
 #endif /* CONFIG_PM */
 
-static int ahci_configure_dma_masks(struct pci_dev *pdev, int using_dac)
+static int ahci_configure_dma_masks(struct pci_dev *pdev,
+                                   struct ahci_host_priv *hpriv)
 {
-       const int dma_bits = using_dac ? 64 : 32;
+       int dma_bits;
        int rc;
 
+       if (hpriv->cap & HOST_CAP_64) {
+               dma_bits = 64;
+               if (hpriv->flags & AHCI_HFLAG_43BIT_ONLY)
+                       dma_bits = 43;
+       } else {
+               dma_bits = 32;
+       }
+
        /*
         * If the device fixup already set the dma_mask to some non-standard
         * value, don't extend it here. This happens on STA2X11, for example.
@@ -1926,7 +1948,7 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        ahci_gtf_filter_workaround(host);
 
        /* initialize adapter */
-       rc = ahci_configure_dma_masks(pdev, hpriv->cap & HOST_CAP_64);
+       rc = ahci_configure_dma_masks(pdev, hpriv);
        if (rc)
                return rc;
 
index 4bae95b06ae3c953de7a567c35f9b46dd9e3083f..df8f8a1a3a34c3ee26d0d2b899522a82d220b6c2 100644 (file)
@@ -247,6 +247,7 @@ enum {
        AHCI_HFLAG_SUSPEND_PHYS         = BIT(26), /* handle PHYs during
                                                      suspend/resume */
        AHCI_HFLAG_NO_SXS               = BIT(28), /* SXS not supported */
+       AHCI_HFLAG_43BIT_ONLY           = BIT(29), /* 43bit DMA addr limit */
 
        /* ap->flags bits */
 
index b6656c287175c7653324758ccb5422dc36168e3d..0fb1934875f2084a753216cf54ff443aa601361b 100644 (file)
@@ -784,7 +784,7 @@ bool sata_lpm_ignore_phy_events(struct ata_link *link)
 EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
 
 static const char *ata_lpm_policy_names[] = {
-       [ATA_LPM_UNKNOWN]               = "max_performance",
+       [ATA_LPM_UNKNOWN]               = "keep_firmware_settings",
        [ATA_LPM_MAX_POWER]             = "max_performance",
        [ATA_LPM_MED_POWER]             = "medium_power",
        [ATA_LPM_MED_POWER_WITH_DIPM]   = "med_power_with_dipm",
index d2dbf8aaccb5b1320909964b3092ea47df90cc4a..b1b47d88f5db44dd9dcb8fc7468718ac36869130 100644 (file)
@@ -333,6 +333,7 @@ aoeblk_gdalloc(void *vp)
        struct gendisk *gd;
        mempool_t *mp;
        struct blk_mq_tag_set *set;
+       sector_t ssize;
        ulong flags;
        int late = 0;
        int err;
@@ -396,7 +397,7 @@ aoeblk_gdalloc(void *vp)
        gd->minors = AOE_PARTITIONS;
        gd->fops = &aoe_bdops;
        gd->private_data = d;
-       set_capacity(gd, d->ssize);
+       ssize = d->ssize;
        snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d",
                d->aoemajor, d->aoeminor);
 
@@ -405,6 +406,8 @@ aoeblk_gdalloc(void *vp)
 
        spin_unlock_irqrestore(&d->lock, flags);
 
+       set_capacity(gd, ssize);
+
        err = device_add_disk(NULL, gd, aoe_attr_groups);
        if (err)
                goto out_disk_cleanup;
index a999b698b131f7763916c3bd0de5c87478fd0df4..12b5d53ec85645fb22395d41adef81d13cdb7292 100644 (file)
@@ -3452,14 +3452,15 @@ static bool rbd_lock_add_request(struct rbd_img_request *img_req)
 static void rbd_lock_del_request(struct rbd_img_request *img_req)
 {
        struct rbd_device *rbd_dev = img_req->rbd_dev;
-       bool need_wakeup;
+       bool need_wakeup = false;
 
        lockdep_assert_held(&rbd_dev->lock_rwsem);
        spin_lock(&rbd_dev->lock_lists_lock);
-       rbd_assert(!list_empty(&img_req->lock_item));
-       list_del_init(&img_req->lock_item);
-       need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
-                      list_empty(&rbd_dev->running_list));
+       if (!list_empty(&img_req->lock_item)) {
+               list_del_init(&img_req->lock_item);
+               need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
+                              list_empty(&rbd_dev->running_list));
+       }
        spin_unlock(&rbd_dev->lock_lists_lock);
        if (need_wakeup)
                complete(&rbd_dev->releasing_wait);
@@ -3842,14 +3843,19 @@ static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
                return;
        }
 
-       list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
+       while (!list_empty(&rbd_dev->acquiring_list)) {
+               img_req = list_first_entry(&rbd_dev->acquiring_list,
+                                          struct rbd_img_request, lock_item);
                mutex_lock(&img_req->state_mutex);
                rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
+               if (!result)
+                       list_move_tail(&img_req->lock_item,
+                                      &rbd_dev->running_list);
+               else
+                       list_del_init(&img_req->lock_item);
                rbd_img_schedule(img_req, result);
                mutex_unlock(&img_req->state_mutex);
        }
-
-       list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
 }
 
 static bool locker_equal(const struct ceph_locker *lhs,
@@ -5326,7 +5332,7 @@ static void rbd_dev_release(struct device *dev)
 
        if (need_put) {
                destroy_workqueue(rbd_dev->task_wq);
-               ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+               ida_free(&rbd_dev_id_ida, rbd_dev->dev_id);
        }
 
        rbd_dev_free(rbd_dev);
@@ -5402,9 +5408,9 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
                return NULL;
 
        /* get an id and fill in device name */
-       rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
-                                        minor_to_rbd_dev_id(1 << MINORBITS),
-                                        GFP_KERNEL);
+       rbd_dev->dev_id = ida_alloc_max(&rbd_dev_id_ida,
+                                       minor_to_rbd_dev_id(1 << MINORBITS) - 1,
+                                       GFP_KERNEL);
        if (rbd_dev->dev_id < 0)
                goto fail_rbd_dev;
 
@@ -5425,7 +5431,7 @@ static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
        return rbd_dev;
 
 fail_dev_id:
-       ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
+       ida_free(&rbd_dev_id_ida, rbd_dev->dev_id);
 fail_rbd_dev:
        rbd_dev_free(rbd_dev);
        return NULL;
index 1f6186475715e0592df1028ade0a336703338b15..1791d37fbc53c57e0f13469934eee357c0de87cc 100644 (file)
@@ -1232,14 +1232,13 @@ static void amd_pstate_epp_update_limit(struct cpufreq_policy *policy)
        max_limit_perf = div_u64(policy->max * cpudata->highest_perf, cpudata->max_freq);
        min_limit_perf = div_u64(policy->min * cpudata->highest_perf, cpudata->max_freq);
 
+       WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
+       WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
+
        max_perf = clamp_t(unsigned long, max_perf, cpudata->min_limit_perf,
                        cpudata->max_limit_perf);
        min_perf = clamp_t(unsigned long, min_perf, cpudata->min_limit_perf,
                        cpudata->max_limit_perf);
-
-       WRITE_ONCE(cpudata->max_limit_perf, max_limit_perf);
-       WRITE_ONCE(cpudata->min_limit_perf, min_limit_perf);
-
        value = READ_ONCE(cpudata->cppc_req_cached);
 
        if (cpudata->policy == CPUFREQ_POLICY_PERFORMANCE)
index 2ca70b0b5fdc5d39990bb88ba4dcd1f7e7131d31..ca94e60e705a1df435b1dd75a13c0a50dc3f8c27 100644 (file)
@@ -529,6 +529,30 @@ static int intel_pstate_cppc_get_scaling(int cpu)
 }
 #endif /* CONFIG_ACPI_CPPC_LIB */
 
+static int intel_pstate_freq_to_hwp_rel(struct cpudata *cpu, int freq,
+                                       unsigned int relation)
+{
+       if (freq == cpu->pstate.turbo_freq)
+               return cpu->pstate.turbo_pstate;
+
+       if (freq == cpu->pstate.max_freq)
+               return cpu->pstate.max_pstate;
+
+       switch (relation) {
+       case CPUFREQ_RELATION_H:
+               return freq / cpu->pstate.scaling;
+       case CPUFREQ_RELATION_C:
+               return DIV_ROUND_CLOSEST(freq, cpu->pstate.scaling);
+       }
+
+       return DIV_ROUND_UP(freq, cpu->pstate.scaling);
+}
+
+static int intel_pstate_freq_to_hwp(struct cpudata *cpu, int freq)
+{
+       return intel_pstate_freq_to_hwp_rel(cpu, freq, CPUFREQ_RELATION_L);
+}
+
 /**
  * intel_pstate_hybrid_hwp_adjust - Calibrate HWP performance levels.
  * @cpu: Target CPU.
@@ -546,6 +570,7 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
        int perf_ctl_scaling = cpu->pstate.perf_ctl_scaling;
        int perf_ctl_turbo = pstate_funcs.get_turbo(cpu->cpu);
        int scaling = cpu->pstate.scaling;
+       int freq;
 
        pr_debug("CPU%d: perf_ctl_max_phys = %d\n", cpu->cpu, perf_ctl_max_phys);
        pr_debug("CPU%d: perf_ctl_turbo = %d\n", cpu->cpu, perf_ctl_turbo);
@@ -559,16 +584,16 @@ static void intel_pstate_hybrid_hwp_adjust(struct cpudata *cpu)
        cpu->pstate.max_freq = rounddown(cpu->pstate.max_pstate * scaling,
                                         perf_ctl_scaling);
 
-       cpu->pstate.max_pstate_physical =
-                       DIV_ROUND_UP(perf_ctl_max_phys * perf_ctl_scaling,
-                                    scaling);
+       freq = perf_ctl_max_phys * perf_ctl_scaling;
+       cpu->pstate.max_pstate_physical = intel_pstate_freq_to_hwp(cpu, freq);
 
-       cpu->pstate.min_freq = cpu->pstate.min_pstate * perf_ctl_scaling;
+       freq = cpu->pstate.min_pstate * perf_ctl_scaling;
+       cpu->pstate.min_freq = freq;
        /*
         * Cast the min P-state value retrieved via pstate_funcs.get_min() to
         * the effective range of HWP performance levels.
         */
-       cpu->pstate.min_pstate = DIV_ROUND_UP(cpu->pstate.min_freq, scaling);
+       cpu->pstate.min_pstate = intel_pstate_freq_to_hwp(cpu, freq);
 }
 
 static inline void update_turbo_state(void)
@@ -2528,13 +2553,12 @@ static void intel_pstate_update_perf_limits(struct cpudata *cpu,
         * abstract values to represent performance rather than pure ratios.
         */
        if (hwp_active && cpu->pstate.scaling != perf_ctl_scaling) {
-               int scaling = cpu->pstate.scaling;
                int freq;
 
                freq = max_policy_perf * perf_ctl_scaling;
-               max_policy_perf = DIV_ROUND_UP(freq, scaling);
+               max_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
                freq = min_policy_perf * perf_ctl_scaling;
-               min_policy_perf = DIV_ROUND_UP(freq, scaling);
+               min_policy_perf = intel_pstate_freq_to_hwp(cpu, freq);
        }
 
        pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
@@ -2908,18 +2932,7 @@ static int intel_cpufreq_target(struct cpufreq_policy *policy,
 
        cpufreq_freq_transition_begin(policy, &freqs);
 
-       switch (relation) {
-       case CPUFREQ_RELATION_L:
-               target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
-               break;
-       case CPUFREQ_RELATION_H:
-               target_pstate = freqs.new / cpu->pstate.scaling;
-               break;
-       default:
-               target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
-               break;
-       }
-
+       target_pstate = intel_pstate_freq_to_hwp_rel(cpu, freqs.new, relation);
        target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
 
        freqs.new = target_pstate * cpu->pstate.scaling;
@@ -2937,7 +2950,7 @@ static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
 
        update_turbo_state();
 
-       target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
+       target_pstate = intel_pstate_freq_to_hwp(cpu, target_freq);
 
        target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
 
index 0f05692bfec3946841a766c8583bc1a3b526073e..ce0e2d82bb2b4cfdc61761d5e32a8c91cc121d82 100644 (file)
@@ -525,7 +525,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
        struct cxl_root_decoder *cxlrd = to_cxl_root_decoder(cxlr->dev.parent);
        struct cxl_region_params *p = &cxlr->params;
        struct resource *res;
-       u32 remainder = 0;
+       u64 remainder = 0;
 
        lockdep_assert_held_write(&cxl_region_rwsem);
 
@@ -545,7 +545,7 @@ static int alloc_hpa(struct cxl_region *cxlr, resource_size_t size)
            (cxlr->mode == CXL_DECODER_PMEM && uuid_is_null(&p->uuid)))
                return -ENXIO;
 
-       div_u64_rem(size, SZ_256M * p->interleave_ways, &remainder);
+       div64_u64_rem(size, (u64)SZ_256M * p->interleave_ways, &remainder);
        if (remainder)
                return -EINVAL;
 
index 4fd1f207c84ee53a857e417a5fc2260fb43b9733..233e7c42c161d8e0b64424776d121f5d08176010 100644 (file)
@@ -382,7 +382,7 @@ static int cxl_pci_mbox_send(struct cxl_memdev_state *mds,
        return rc;
 }
 
-static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
+static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds, bool irq_avail)
 {
        struct cxl_dev_state *cxlds = &mds->cxlds;
        const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
@@ -441,7 +441,7 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
        INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
 
        /* background command interrupts are optional */
-       if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ))
+       if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) || !irq_avail)
                return 0;
 
        msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
@@ -588,7 +588,7 @@ static int cxl_mem_alloc_event_buf(struct cxl_memdev_state *mds)
        return devm_add_action_or_reset(mds->cxlds.dev, free_event_buf, buf);
 }
 
-static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
+static bool cxl_alloc_irq_vectors(struct pci_dev *pdev)
 {
        int nvecs;
 
@@ -605,9 +605,9 @@ static int cxl_alloc_irq_vectors(struct pci_dev *pdev)
                                      PCI_IRQ_MSIX | PCI_IRQ_MSI);
        if (nvecs < 1) {
                dev_dbg(&pdev->dev, "Failed to alloc irq vectors: %d\n", nvecs);
-               return -ENXIO;
+               return false;
        }
-       return 0;
+       return true;
 }
 
 static irqreturn_t cxl_event_thread(int irq, void *id)
@@ -743,7 +743,7 @@ static bool cxl_event_int_is_fw(u8 setting)
 }
 
 static int cxl_event_config(struct pci_host_bridge *host_bridge,
-                           struct cxl_memdev_state *mds)
+                           struct cxl_memdev_state *mds, bool irq_avail)
 {
        struct cxl_event_interrupt_policy policy;
        int rc;
@@ -755,6 +755,11 @@ static int cxl_event_config(struct pci_host_bridge *host_bridge,
        if (!host_bridge->native_cxl_error)
                return 0;
 
+       if (!irq_avail) {
+               dev_info(mds->cxlds.dev, "No interrupt support, disable event processing.\n");
+               return 0;
+       }
+
        rc = cxl_mem_alloc_event_buf(mds);
        if (rc)
                return rc;
@@ -789,6 +794,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        struct cxl_register_map map;
        struct cxl_memdev *cxlmd;
        int i, rc, pmu_count;
+       bool irq_avail;
 
        /*
         * Double check the anonymous union trickery in struct cxl_regs
@@ -846,11 +852,9 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        else
                dev_warn(&pdev->dev, "Media not active (%d)\n", rc);
 
-       rc = cxl_alloc_irq_vectors(pdev);
-       if (rc)
-               return rc;
+       irq_avail = cxl_alloc_irq_vectors(pdev);
 
-       rc = cxl_pci_setup_mailbox(mds);
+       rc = cxl_pci_setup_mailbox(mds, irq_avail);
        if (rc)
                return rc;
 
@@ -909,7 +913,7 @@ static int cxl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                }
        }
 
-       rc = cxl_event_config(host_bridge, mds);
+       rc = cxl_event_config(host_bridge, mds, irq_avail);
        if (rc)
                return rc;
 
index 1eca8cc271f841e7b15967b2c33394169065b4ab..5152bd1b0daf599869195e81805fbb2709dbe6b4 100644 (file)
@@ -29,8 +29,6 @@ static u32 dpll_pin_xa_id;
        WARN_ON_ONCE(!xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
 #define ASSERT_DPLL_NOT_REGISTERED(d)  \
        WARN_ON_ONCE(xa_get_mark(&dpll_device_xa, (d)->id, DPLL_REGISTERED))
-#define ASSERT_PIN_REGISTERED(p)       \
-       WARN_ON_ONCE(!xa_get_mark(&dpll_pin_xa, (p)->id, DPLL_REGISTERED))
 
 struct dpll_device_registration {
        struct list_head list;
@@ -425,6 +423,53 @@ void dpll_device_unregister(struct dpll_device *dpll,
 }
 EXPORT_SYMBOL_GPL(dpll_device_unregister);
 
+static void dpll_pin_prop_free(struct dpll_pin_properties *prop)
+{
+       kfree(prop->package_label);
+       kfree(prop->panel_label);
+       kfree(prop->board_label);
+       kfree(prop->freq_supported);
+}
+
+static int dpll_pin_prop_dup(const struct dpll_pin_properties *src,
+                            struct dpll_pin_properties *dst)
+{
+       memcpy(dst, src, sizeof(*dst));
+       if (src->freq_supported && src->freq_supported_num) {
+               size_t freq_size = src->freq_supported_num *
+                                  sizeof(*src->freq_supported);
+               dst->freq_supported = kmemdup(src->freq_supported,
+                                             freq_size, GFP_KERNEL);
+               if (!src->freq_supported)
+                       return -ENOMEM;
+       }
+       if (src->board_label) {
+               dst->board_label = kstrdup(src->board_label, GFP_KERNEL);
+               if (!dst->board_label)
+                       goto err_board_label;
+       }
+       if (src->panel_label) {
+               dst->panel_label = kstrdup(src->panel_label, GFP_KERNEL);
+               if (!dst->panel_label)
+                       goto err_panel_label;
+       }
+       if (src->package_label) {
+               dst->package_label = kstrdup(src->package_label, GFP_KERNEL);
+               if (!dst->package_label)
+                       goto err_package_label;
+       }
+
+       return 0;
+
+err_package_label:
+       kfree(dst->panel_label);
+err_panel_label:
+       kfree(dst->board_label);
+err_board_label:
+       kfree(dst->freq_supported);
+       return -ENOMEM;
+}
+
 static struct dpll_pin *
 dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
               const struct dpll_pin_properties *prop)
@@ -441,20 +486,24 @@ dpll_pin_alloc(u64 clock_id, u32 pin_idx, struct module *module,
        if (WARN_ON(prop->type < DPLL_PIN_TYPE_MUX ||
                    prop->type > DPLL_PIN_TYPE_MAX)) {
                ret = -EINVAL;
-               goto err;
+               goto err_pin_prop;
        }
-       pin->prop = prop;
+       ret = dpll_pin_prop_dup(prop, &pin->prop);
+       if (ret)
+               goto err_pin_prop;
        refcount_set(&pin->refcount, 1);
        xa_init_flags(&pin->dpll_refs, XA_FLAGS_ALLOC);
        xa_init_flags(&pin->parent_refs, XA_FLAGS_ALLOC);
        ret = xa_alloc_cyclic(&dpll_pin_xa, &pin->id, pin, xa_limit_32b,
                              &dpll_pin_xa_id, GFP_KERNEL);
        if (ret)
-               goto err;
+               goto err_xa_alloc;
        return pin;
-err:
+err_xa_alloc:
        xa_destroy(&pin->dpll_refs);
        xa_destroy(&pin->parent_refs);
+       dpll_pin_prop_free(&pin->prop);
+err_pin_prop:
        kfree(pin);
        return ERR_PTR(ret);
 }
@@ -514,6 +563,7 @@ void dpll_pin_put(struct dpll_pin *pin)
                xa_destroy(&pin->dpll_refs);
                xa_destroy(&pin->parent_refs);
                xa_erase(&dpll_pin_xa, pin->id);
+               dpll_pin_prop_free(&pin->prop);
                kfree(pin);
        }
        mutex_unlock(&dpll_lock);
@@ -564,8 +614,6 @@ dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
            WARN_ON(!ops->state_on_dpll_get) ||
            WARN_ON(!ops->direction_get))
                return -EINVAL;
-       if (ASSERT_DPLL_REGISTERED(dpll))
-               return -EINVAL;
 
        mutex_lock(&dpll_lock);
        if (WARN_ON(!(dpll->module == pin->module &&
@@ -636,15 +684,13 @@ int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
        unsigned long i, stop;
        int ret;
 
-       if (WARN_ON(parent->prop->type != DPLL_PIN_TYPE_MUX))
+       if (WARN_ON(parent->prop.type != DPLL_PIN_TYPE_MUX))
                return -EINVAL;
 
        if (WARN_ON(!ops) ||
            WARN_ON(!ops->state_on_pin_get) ||
            WARN_ON(!ops->direction_get))
                return -EINVAL;
-       if (ASSERT_PIN_REGISTERED(parent))
-               return -EINVAL;
 
        mutex_lock(&dpll_lock);
        ret = dpll_xa_ref_pin_add(&pin->parent_refs, parent, ops, priv);
index 5585873c5c1b020e5618896aaa43ff1656ff53dd..717f715015c742238d5585fddc5cd267fbb0db9f 100644 (file)
@@ -44,7 +44,7 @@ struct dpll_device {
  * @module:            module of creator
  * @dpll_refs:         hold referencees to dplls pin was registered with
  * @parent_refs:       hold references to parent pins pin was registered with
- * @prop:              pointer to pin properties given by registerer
+ * @prop:              pin properties copied from the registerer
  * @rclk_dev_name:     holds name of device when pin can recover clock from it
  * @refcount:          refcount
  **/
@@ -55,7 +55,7 @@ struct dpll_pin {
        struct module *module;
        struct xarray dpll_refs;
        struct xarray parent_refs;
-       const struct dpll_pin_properties *prop;
+       struct dpll_pin_properties prop;
        refcount_t refcount;
 };
 
index 3370dbddb86bdeb6b627fdf741357eeb15ee3676..314bb377546519ef25987b2e6f77827f590fe5fe 100644 (file)
@@ -303,17 +303,17 @@ dpll_msg_add_pin_freq(struct sk_buff *msg, struct dpll_pin *pin,
        if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY, sizeof(freq), &freq,
                          DPLL_A_PIN_PAD))
                return -EMSGSIZE;
-       for (fs = 0; fs < pin->prop->freq_supported_num; fs++) {
+       for (fs = 0; fs < pin->prop.freq_supported_num; fs++) {
                nest = nla_nest_start(msg, DPLL_A_PIN_FREQUENCY_SUPPORTED);
                if (!nest)
                        return -EMSGSIZE;
-               freq = pin->prop->freq_supported[fs].min;
+               freq = pin->prop.freq_supported[fs].min;
                if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MIN, sizeof(freq),
                                  &freq, DPLL_A_PIN_PAD)) {
                        nla_nest_cancel(msg, nest);
                        return -EMSGSIZE;
                }
-               freq = pin->prop->freq_supported[fs].max;
+               freq = pin->prop.freq_supported[fs].max;
                if (nla_put_64bit(msg, DPLL_A_PIN_FREQUENCY_MAX, sizeof(freq),
                                  &freq, DPLL_A_PIN_PAD)) {
                        nla_nest_cancel(msg, nest);
@@ -329,9 +329,9 @@ static bool dpll_pin_is_freq_supported(struct dpll_pin *pin, u32 freq)
 {
        int fs;
 
-       for (fs = 0; fs < pin->prop->freq_supported_num; fs++)
-               if (freq >= pin->prop->freq_supported[fs].min &&
-                   freq <= pin->prop->freq_supported[fs].max)
+       for (fs = 0; fs < pin->prop.freq_supported_num; fs++)
+               if (freq >= pin->prop.freq_supported[fs].min &&
+                   freq <= pin->prop.freq_supported[fs].max)
                        return true;
        return false;
 }
@@ -421,7 +421,7 @@ static int
 dpll_cmd_pin_get_one(struct sk_buff *msg, struct dpll_pin *pin,
                     struct netlink_ext_ack *extack)
 {
-       const struct dpll_pin_properties *prop = pin->prop;
+       const struct dpll_pin_properties *prop = &pin->prop;
        struct dpll_pin_ref *ref;
        int ret;
 
@@ -553,6 +553,24 @@ __dpll_device_change_ntf(struct dpll_device *dpll)
        return dpll_device_event_send(DPLL_CMD_DEVICE_CHANGE_NTF, dpll);
 }
 
+static bool dpll_pin_available(struct dpll_pin *pin)
+{
+       struct dpll_pin_ref *par_ref;
+       unsigned long i;
+
+       if (!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED))
+               return false;
+       xa_for_each(&pin->parent_refs, i, par_ref)
+               if (xa_get_mark(&dpll_pin_xa, par_ref->pin->id,
+                               DPLL_REGISTERED))
+                       return true;
+       xa_for_each(&pin->dpll_refs, i, par_ref)
+               if (xa_get_mark(&dpll_device_xa, par_ref->dpll->id,
+                               DPLL_REGISTERED))
+                       return true;
+       return false;
+}
+
 /**
  * dpll_device_change_ntf - notify that the dpll device has been changed
  * @dpll: registered dpll pointer
@@ -579,7 +597,7 @@ dpll_pin_event_send(enum dpll_cmd event, struct dpll_pin *pin)
        int ret = -ENOMEM;
        void *hdr;
 
-       if (WARN_ON(!xa_get_mark(&dpll_pin_xa, pin->id, DPLL_REGISTERED)))
+       if (!dpll_pin_available(pin))
                return -ENODEV;
 
        msg = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
@@ -717,7 +735,7 @@ dpll_pin_on_pin_state_set(struct dpll_pin *pin, u32 parent_idx,
        int ret;
 
        if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
-             pin->prop->capabilities)) {
+             pin->prop.capabilities)) {
                NL_SET_ERR_MSG(extack, "state changing is not allowed");
                return -EOPNOTSUPP;
        }
@@ -753,7 +771,7 @@ dpll_pin_state_set(struct dpll_device *dpll, struct dpll_pin *pin,
        int ret;
 
        if (!(DPLL_PIN_CAPABILITIES_STATE_CAN_CHANGE &
-             pin->prop->capabilities)) {
+             pin->prop.capabilities)) {
                NL_SET_ERR_MSG(extack, "state changing is not allowed");
                return -EOPNOTSUPP;
        }
@@ -780,7 +798,7 @@ dpll_pin_prio_set(struct dpll_device *dpll, struct dpll_pin *pin,
        int ret;
 
        if (!(DPLL_PIN_CAPABILITIES_PRIORITY_CAN_CHANGE &
-             pin->prop->capabilities)) {
+             pin->prop.capabilities)) {
                NL_SET_ERR_MSG(extack, "prio changing is not allowed");
                return -EOPNOTSUPP;
        }
@@ -808,7 +826,7 @@ dpll_pin_direction_set(struct dpll_pin *pin, struct dpll_device *dpll,
        int ret;
 
        if (!(DPLL_PIN_CAPABILITIES_DIRECTION_CAN_CHANGE &
-             pin->prop->capabilities)) {
+             pin->prop.capabilities)) {
                NL_SET_ERR_MSG(extack, "direction changing is not allowed");
                return -EOPNOTSUPP;
        }
@@ -838,8 +856,8 @@ dpll_pin_phase_adj_set(struct dpll_pin *pin, struct nlattr *phase_adj_attr,
        int ret;
 
        phase_adj = nla_get_s32(phase_adj_attr);
-       if (phase_adj > pin->prop->phase_range.max ||
-           phase_adj < pin->prop->phase_range.min) {
+       if (phase_adj > pin->prop.phase_range.max ||
+           phase_adj < pin->prop.phase_range.min) {
                NL_SET_ERR_MSG_ATTR(extack, phase_adj_attr,
                                    "phase adjust value not supported");
                return -EINVAL;
@@ -1023,7 +1041,7 @@ dpll_pin_find(u64 clock_id, struct nlattr *mod_name_attr,
        unsigned long i;
 
        xa_for_each_marked(&dpll_pin_xa, i, pin, DPLL_REGISTERED) {
-               prop = pin->prop;
+               prop = &pin->prop;
                cid_match = clock_id ? pin->clock_id == clock_id : true;
                mod_match = mod_name_attr && module_name(pin->module) ?
                        !nla_strcmp(mod_name_attr,
@@ -1130,6 +1148,10 @@ int dpll_nl_pin_id_get_doit(struct sk_buff *skb, struct genl_info *info)
        }
        pin = dpll_pin_find_from_nlattr(info);
        if (!IS_ERR(pin)) {
+               if (!dpll_pin_available(pin)) {
+                       nlmsg_free(msg);
+                       return -ENODEV;
+               }
                ret = dpll_msg_add_pin_handle(msg, pin);
                if (ret) {
                        nlmsg_free(msg);
@@ -1179,6 +1201,8 @@ int dpll_nl_pin_get_dumpit(struct sk_buff *skb, struct netlink_callback *cb)
 
        xa_for_each_marked_start(&dpll_pin_xa, i, pin, DPLL_REGISTERED,
                                 ctx->idx) {
+               if (!dpll_pin_available(pin))
+                       continue;
                hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid,
                                  cb->nlh->nlmsg_seq,
                                  &dpll_nl_family, NLM_F_MULTI,
@@ -1441,7 +1465,8 @@ int dpll_pin_pre_doit(const struct genl_split_ops *ops, struct sk_buff *skb,
        }
        info->user_ptr[0] = xa_load(&dpll_pin_xa,
                                    nla_get_u32(info->attrs[DPLL_A_PIN_ID]));
-       if (!info->user_ptr[0]) {
+       if (!info->user_ptr[0] ||
+           !dpll_pin_available(info->user_ptr[0])) {
                NL_SET_ERR_MSG(info->extack, "pin not found");
                ret = -ENODEV;
                goto unlock_dev;
index 6146b2927d5c56af6bc3b9722c1789f29a4498fe..f2556a8e940156bc4f9d34ae5dc92aac837b688a 100644 (file)
@@ -107,12 +107,12 @@ struct ffa_drv_info {
        struct work_struct notif_pcpu_work;
        struct work_struct irq_work;
        struct xarray partition_info;
-       unsigned int partition_count;
        DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
        struct mutex notify_lock; /* lock to protect notifier hashtable  */
 };
 
 static struct ffa_drv_info *drv_info;
+static void ffa_partitions_cleanup(void);
 
 /*
  * The driver must be able to support all the versions from the earliest
@@ -733,6 +733,11 @@ static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
        void *cb_data;
 
        partition = xa_load(&drv_info->partition_info, part_id);
+       if (!partition) {
+               pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
+               return;
+       }
+
        read_lock(&partition->rw_lock);
        callback = partition->callback;
        cb_data = partition->cb_data;
@@ -915,6 +920,11 @@ static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
                return -EOPNOTSUPP;
 
        partition = xa_load(&drv_info->partition_info, part_id);
+       if (!partition) {
+               pr_err("%s: Invalid partition ID 0x%x\n", __func__, part_id);
+               return -EINVAL;
+       }
+
        write_lock(&partition->rw_lock);
 
        cb_valid = !!partition->callback;
@@ -1186,9 +1196,9 @@ void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
        kfree(pbuf);
 }
 
-static void ffa_setup_partitions(void)
+static int ffa_setup_partitions(void)
 {
-       int count, idx;
+       int count, idx, ret;
        uuid_t uuid;
        struct ffa_device *ffa_dev;
        struct ffa_dev_part_info *info;
@@ -1197,7 +1207,7 @@ static void ffa_setup_partitions(void)
        count = ffa_partition_probe(&uuid_null, &pbuf);
        if (count <= 0) {
                pr_info("%s: No partitions found, error %d\n", __func__, count);
-               return;
+               return -EINVAL;
        }
 
        xa_init(&drv_info->partition_info);
@@ -1226,40 +1236,53 @@ static void ffa_setup_partitions(void)
                        ffa_device_unregister(ffa_dev);
                        continue;
                }
-               xa_store(&drv_info->partition_info, tpbuf->id, info, GFP_KERNEL);
+               rwlock_init(&info->rw_lock);
+               ret = xa_insert(&drv_info->partition_info, tpbuf->id,
+                               info, GFP_KERNEL);
+               if (ret) {
+                       pr_err("%s: failed to save partition ID 0x%x - ret:%d\n",
+                              __func__, tpbuf->id, ret);
+                       ffa_device_unregister(ffa_dev);
+                       kfree(info);
+               }
        }
-       drv_info->partition_count = count;
 
        kfree(pbuf);
 
        /* Allocate for the host */
        info = kzalloc(sizeof(*info), GFP_KERNEL);
-       if (!info)
-               return;
-       xa_store(&drv_info->partition_info, drv_info->vm_id, info, GFP_KERNEL);
-       drv_info->partition_count++;
+       if (!info) {
+               pr_err("%s: failed to alloc Host partition ID 0x%x. Abort.\n",
+                      __func__, drv_info->vm_id);
+               /* Already registered devices are freed on bus_exit */
+               ffa_partitions_cleanup();
+               return -ENOMEM;
+       }
+
+       rwlock_init(&info->rw_lock);
+       ret = xa_insert(&drv_info->partition_info, drv_info->vm_id,
+                       info, GFP_KERNEL);
+       if (ret) {
+               pr_err("%s: failed to save Host partition ID 0x%x - ret:%d. Abort.\n",
+                      __func__, drv_info->vm_id, ret);
+               kfree(info);
+               /* Already registered devices are freed on bus_exit */
+               ffa_partitions_cleanup();
+       }
+
+       return ret;
 }
 
 static void ffa_partitions_cleanup(void)
 {
-       struct ffa_dev_part_info **info;
-       int idx, count = drv_info->partition_count;
-
-       if (!count)
-               return;
-
-       info = kcalloc(count, sizeof(*info), GFP_KERNEL);
-       if (!info)
-               return;
-
-       xa_extract(&drv_info->partition_info, (void **)info, 0, VM_ID_MASK,
-                  count, XA_PRESENT);
+       struct ffa_dev_part_info *info;
+       unsigned long idx;
 
-       for (idx = 0; idx < count; idx++)
-               kfree(info[idx]);
-       kfree(info);
+       xa_for_each(&drv_info->partition_info, idx, info) {
+               xa_erase(&drv_info->partition_info, idx);
+               kfree(info);
+       }
 
-       drv_info->partition_count = 0;
        xa_destroy(&drv_info->partition_info);
 }
 
@@ -1508,7 +1531,11 @@ static int __init ffa_init(void)
 
        ffa_notifications_setup();
 
-       ffa_setup_partitions();
+       ret = ffa_setup_partitions();
+       if (ret) {
+               pr_err("failed to setup partitions\n");
+               goto cleanup_notifs;
+       }
 
        ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
                                       drv_info, true);
@@ -1516,6 +1543,9 @@ static int __init ffa_init(void)
                pr_info("Failed to register driver sched callback %d\n", ret);
 
        return 0;
+
+cleanup_notifs:
+       ffa_notifications_cleanup();
 free_pages:
        if (drv_info->tx_buffer)
                free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
@@ -1535,7 +1565,6 @@ static void __exit ffa_exit(void)
        ffa_rxtx_unmap(drv_info->vm_id);
        free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
        free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
-       xa_destroy(&drv_info->partition_info);
        kfree(drv_info);
        arm_ffa_bus_exit();
 }
index c0644558042a06dc1f0c087af4f22264abeee52a..e2050adbf85c6a125fc5ba241fb0c6b133466bfe 100644 (file)
@@ -13,7 +13,7 @@
 #include "notify.h"
 
 /* Updated only after ALL the mandatory features for that version are merged */
-#define SCMI_PROTOCOL_SUPPORTED_VERSION                0x20001
+#define SCMI_PROTOCOL_SUPPORTED_VERSION                0x20000
 
 enum scmi_clock_protocol_cmd {
        CLOCK_ATTRIBUTES = 0x3,
@@ -954,8 +954,7 @@ static int scmi_clock_protocol_init(const struct scmi_protocol_handle *ph)
                        scmi_clock_describe_rates_get(ph, clkid, clk);
        }
 
-       if (PROTOCOL_REV_MAJOR(version) >= 0x2 &&
-           PROTOCOL_REV_MINOR(version) >= 0x1) {
+       if (PROTOCOL_REV_MAJOR(version) >= 0x3) {
                cinfo->clock_config_set = scmi_clock_config_set_v2;
                cinfo->clock_config_get = scmi_clock_config_get_v2;
        } else {
index c46dc5215af7a7c8a78e0fe26c12fac51c8080b7..00b165d1f502df7816527298996f196585d10f5a 100644 (file)
@@ -314,6 +314,7 @@ void shmem_fetch_notification(struct scmi_shared_mem __iomem *shmem,
 void shmem_clear_channel(struct scmi_shared_mem __iomem *shmem);
 bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
                     struct scmi_xfer *xfer);
+bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem);
 
 /* declarations for message passing transports */
 struct scmi_msg_payld;
index 19246ed1f01ff7cc3ea7346402c32e02b57b336a..b8d470417e8f99bb6408aba541bc4b89541ddf7c 100644 (file)
@@ -45,6 +45,20 @@ static void rx_callback(struct mbox_client *cl, void *m)
 {
        struct scmi_mailbox *smbox = client_to_scmi_mailbox(cl);
 
+       /*
+        * An A2P IRQ is NOT valid when received while the platform still has
+        * the ownership of the channel, because the platform at first releases
+        * the SMT channel and then sends the completion interrupt.
+        *
+        * This addresses a possible race condition in which a spurious IRQ from
+        * a previous timed-out reply which arrived late could be wrongly
+        * associated with the next pending transaction.
+        */
+       if (cl->knows_txdone && !shmem_channel_free(smbox->shmem)) {
+               dev_warn(smbox->cinfo->dev, "Ignoring spurious A2P IRQ !\n");
+               return;
+       }
+
        scmi_rx_callback(smbox->cinfo, shmem_read_header(smbox->shmem), NULL);
 }
 
index 8ea2a7b3d35d2029f9731ef3031d575609093d6e..211e8e0aef2c2b4fade048990249c2444afb946a 100644 (file)
@@ -350,8 +350,8 @@ process_response_opp(struct scmi_opp *opp, unsigned int loop_idx,
 }
 
 static inline void
-process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
-                       unsigned int loop_idx,
+process_response_opp_v4(struct device *dev, struct perf_dom_info *dom,
+                       struct scmi_opp *opp, unsigned int loop_idx,
                        const struct scmi_msg_resp_perf_describe_levels_v4 *r)
 {
        opp->perf = le32_to_cpu(r->opp[loop_idx].perf_val);
@@ -362,10 +362,23 @@ process_response_opp_v4(struct perf_dom_info *dom, struct scmi_opp *opp,
        /* Note that PERF v4 reports always five 32-bit words */
        opp->indicative_freq = le32_to_cpu(r->opp[loop_idx].indicative_freq);
        if (dom->level_indexing_mode) {
+               int ret;
+
                opp->level_index = le32_to_cpu(r->opp[loop_idx].level_index);
 
-               xa_store(&dom->opps_by_idx, opp->level_index, opp, GFP_KERNEL);
-               xa_store(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+               ret = xa_insert(&dom->opps_by_idx, opp->level_index, opp,
+                               GFP_KERNEL);
+               if (ret)
+                       dev_warn(dev,
+                                "Failed to add opps_by_idx at %d - ret:%d\n",
+                                opp->level_index, ret);
+
+               ret = xa_insert(&dom->opps_by_lvl, opp->perf, opp, GFP_KERNEL);
+               if (ret)
+                       dev_warn(dev,
+                                "Failed to add opps_by_lvl at %d - ret:%d\n",
+                                opp->perf, ret);
+
                hash_add(dom->opps_by_freq, &opp->hash, opp->indicative_freq);
        }
 }
@@ -382,7 +395,7 @@ iter_perf_levels_process_response(const struct scmi_protocol_handle *ph,
        if (PROTOCOL_REV_MAJOR(p->version) <= 0x3)
                process_response_opp(opp, st->loop_idx, response);
        else
-               process_response_opp_v4(p->perf_dom, opp, st->loop_idx,
+               process_response_opp_v4(ph->dev, p->perf_dom, opp, st->loop_idx,
                                        response);
        p->perf_dom->opp_count++;
 
index 0493aa3c12bf5363e02c1ecc9b2520d1bd8b3d67..350573518503355f6abaa4d24cbcac6368e8930c 100644 (file)
@@ -1111,7 +1111,6 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
                int i;
 
                for (i = 0; i < num_chans; i++) {
-                       void *xret;
                        struct scmi_raw_queue *q;
 
                        q = scmi_raw_queue_init(raw);
@@ -1120,13 +1119,12 @@ static int scmi_raw_mode_setup(struct scmi_raw_mode_info *raw,
                                goto err_xa;
                        }
 
-                       xret = xa_store(&raw->chans_q, channels[i], q,
+                       ret = xa_insert(&raw->chans_q, channels[i], q,
                                        GFP_KERNEL);
-                       if (xa_err(xret)) {
+                       if (ret) {
                                dev_err(dev,
                                        "Fail to allocate Raw queue 0x%02X\n",
                                        channels[i]);
-                               ret = xa_err(xret);
                                goto err_xa;
                        }
                }
@@ -1322,6 +1320,12 @@ void scmi_raw_message_report(void *r, struct scmi_xfer *xfer,
        dev = raw->handle->dev;
        q = scmi_raw_queue_select(raw, idx,
                                  SCMI_XFER_IS_CHAN_SET(xfer) ? chan_id : 0);
+       if (!q) {
+               dev_warn(dev,
+                        "RAW[%d] - NO queue for chan 0x%X. Dropping report.\n",
+                        idx, chan_id);
+               return;
+       }
 
        /*
         * Grab the msg_q_lock upfront to avoid a possible race between
index 87b4f4d35f06230bc161fc4205c7b199e03c0015..8bf495bcad09b7ba8246c05b4e76086fa1bdaf90 100644 (file)
@@ -10,7 +10,7 @@
 #include <linux/processor.h>
 #include <linux/types.h>
 
-#include <asm-generic/bug.h>
+#include <linux/bug.h>
 
 #include "common.h"
 
@@ -122,3 +122,9 @@ bool shmem_poll_done(struct scmi_shared_mem __iomem *shmem,
                (SCMI_SHMEM_CHAN_STAT_CHANNEL_ERROR |
                 SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
 }
+
+bool shmem_channel_free(struct scmi_shared_mem __iomem *shmem)
+{
+       return (ioread32(&shmem->channel_status) &
+                       SCMI_SHMEM_CHAN_STAT_CHANNEL_FREE);
+}
index 82fcfd29bc4d29116b051c946edb9b6535fd78ac..3c197db42c9d936866f9ff68cf7561e4735cfe1e 100644 (file)
@@ -128,4 +128,4 @@ unlock_mutex:
 }
 
 /* must execute after PCI subsystem for EFI quirks */
-subsys_initcall_sync(sysfb_init);
+device_initcall(sysfb_init);
index be7f2fa5aa7b600a2605084d832e23f24d501c84..806b88d8dfb7bda7d23cae021eb08c3bbf383ab1 100644 (file)
@@ -330,20 +330,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
                switch (flow_type) {
                case IRQ_TYPE_LEVEL_HIGH:
                        sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
                        break;
                case IRQ_TYPE_LEVEL_LOW:
                        sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IEV, 0);
+                       sprd_eic_update(chip, offset, SPRD_EIC_DBNC_IC, 1);
                        break;
                case IRQ_TYPE_EDGE_RISING:
                case IRQ_TYPE_EDGE_FALLING:
                case IRQ_TYPE_EDGE_BOTH:
                        state = sprd_eic_get(chip, offset);
-                       if (state)
+                       if (state) {
                                sprd_eic_update(chip, offset,
                                                SPRD_EIC_DBNC_IEV, 0);
-                       else
+                               sprd_eic_update(chip, offset,
+                                               SPRD_EIC_DBNC_IC, 1);
+                       } else {
                                sprd_eic_update(chip, offset,
                                                SPRD_EIC_DBNC_IEV, 1);
+                               sprd_eic_update(chip, offset,
+                                               SPRD_EIC_DBNC_IC, 1);
+                       }
                        break;
                default:
                        return -ENOTSUPP;
@@ -355,20 +362,27 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
                switch (flow_type) {
                case IRQ_TYPE_LEVEL_HIGH:
                        sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 0);
+                       sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
                        break;
                case IRQ_TYPE_LEVEL_LOW:
                        sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTPOL, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_LATCH_INTCLR, 1);
                        break;
                case IRQ_TYPE_EDGE_RISING:
                case IRQ_TYPE_EDGE_FALLING:
                case IRQ_TYPE_EDGE_BOTH:
                        state = sprd_eic_get(chip, offset);
-                       if (state)
+                       if (state) {
                                sprd_eic_update(chip, offset,
                                                SPRD_EIC_LATCH_INTPOL, 0);
-                       else
+                               sprd_eic_update(chip, offset,
+                                               SPRD_EIC_LATCH_INTCLR, 1);
+                       } else {
                                sprd_eic_update(chip, offset,
                                                SPRD_EIC_LATCH_INTPOL, 1);
+                               sprd_eic_update(chip, offset,
+                                               SPRD_EIC_LATCH_INTCLR, 1);
+                       }
                        break;
                default:
                        return -ENOTSUPP;
@@ -382,29 +396,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_EDGE_FALLING:
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
+                       sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_EDGE_BOTH:
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_LEVEL_HIGH:
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_level_irq);
                        break;
                case IRQ_TYPE_LEVEL_LOW:
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTMODE, 1);
                        sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTPOL, 0);
+                       sprd_eic_update(chip, offset, SPRD_EIC_ASYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_level_irq);
                        break;
                default:
@@ -417,29 +436,34 @@ static int sprd_eic_irq_set_type(struct irq_data *data, unsigned int flow_type)
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_EDGE_FALLING:
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
+                       sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_EDGE_BOTH:
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_edge_irq);
                        break;
                case IRQ_TYPE_LEVEL_HIGH:
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 1);
+                       sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_level_irq);
                        break;
                case IRQ_TYPE_LEVEL_LOW:
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTBOTH, 0);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTMODE, 1);
                        sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTPOL, 0);
+                       sprd_eic_update(chip, offset, SPRD_EIC_SYNC_INTCLR, 1);
                        irq_set_handler_locked(data, handle_level_irq);
                        break;
                default:
index 88066826d8e5b629697136b8bb2431b543544977..cd3e9657cc36df59123a571a0ed2ed5332272a5d 100644 (file)
@@ -1651,6 +1651,20 @@ static const struct dmi_system_id gpiolib_acpi_quirks[] __initconst = {
                        .ignore_interrupt = "INT33FC:00@3",
                },
        },
+       {
+               /*
+                * Spurious wakeups from TP_ATTN# pin
+                * Found in BIOS 0.35
+                * https://gitlab.freedesktop.org/drm/amd/-/issues/3073
+                */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "GPD"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "G1619-04"),
+               },
+               .driver_data = &(struct acpi_gpiolib_dmi_quirk) {
+                       .ignore_wake = "PNP0C50:00@8",
+               },
+       },
        {} /* Terminating entry */
 };
 
index 73b8cca35bab8780d1938a45d035d19648bdd081..c623e23049d1d4bde50991fcddc8b542df0099b7 100644 (file)
@@ -121,6 +121,7 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
        struct amdgpu_bo_param bp;
        dma_addr_t dma_addr;
        struct page *p;
+       unsigned long x;
        int ret;
 
        if (adev->gart.bo != NULL)
@@ -130,6 +131,10 @@ int amdgpu_gart_table_ram_alloc(struct amdgpu_device *adev)
        if (!p)
                return -ENOMEM;
 
+       /* assign pages to this device */
+       for (x = 0; x < (1UL << order); x++)
+               p[x].mapping = adev->mman.bdev.dev_mapping;
+
        /* If the hardware does not support UTCL2 snooping of the CPU caches
         * then set_memory_wc() could be used as a workaround to mark the pages
         * as write combine memory.
@@ -223,6 +228,7 @@ void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
        unsigned int order = get_order(adev->gart.table_size);
        struct sg_table *sg = adev->gart.bo->tbo.sg;
        struct page *p;
+       unsigned long x;
        int ret;
 
        ret = amdgpu_bo_reserve(adev->gart.bo, false);
@@ -234,6 +240,8 @@ void amdgpu_gart_table_ram_free(struct amdgpu_device *adev)
        sg_free_table(sg);
        kfree(sg);
        p = virt_to_page(adev->gart.ptr);
+       for (x = 0; x < (1UL << order); x++)
+               p[x].mapping = NULL;
        __free_pages(p, order);
 
        adev->gart.ptr = NULL;
index 08916538a615ff3d072eb5241a97495795c7e32a..8db880244324ff1077ff3d87c20b7387ecd8b74b 100644 (file)
@@ -221,8 +221,23 @@ static struct attribute *amdgpu_vram_mgr_attributes[] = {
        NULL
 };
 
+static umode_t amdgpu_vram_attrs_is_visible(struct kobject *kobj,
+                                           struct attribute *attr, int i)
+{
+       struct device *dev = kobj_to_dev(kobj);
+       struct drm_device *ddev = dev_get_drvdata(dev);
+       struct amdgpu_device *adev = drm_to_adev(ddev);
+
+       if (attr == &dev_attr_mem_info_vram_vendor.attr &&
+           !adev->gmc.vram_vendor)
+               return 0;
+
+       return attr->mode;
+}
+
 const struct attribute_group amdgpu_vram_mgr_attr_group = {
-       .attrs = amdgpu_vram_mgr_attributes
+       .attrs = amdgpu_vram_mgr_attributes,
+       .is_visible = amdgpu_vram_attrs_is_visible
 };
 
 /**
index d63cab294883b8b44caa908d5bafaeaf19750ef6..ecb622b7f9709c1a0f9e5307b3dad1da7f0f8f05 100644 (file)
@@ -6589,7 +6589,7 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
 #ifdef __BIG_ENDIAN
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1);
 #endif
-       tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+       tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
                            prop->allow_tunneling);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
index 0ea0866c261f84e24e8494755387b3d22482a0a2..d9cf9fd03d30010df0827033c4ceb6bbd21d3afa 100644 (file)
@@ -3846,7 +3846,7 @@ static int gfx_v11_0_compute_mqd_init(struct amdgpu_device *adev, void *m,
                            (order_base_2(prop->queue_size / 4) - 1));
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE,
                            (order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1));
-       tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0);
+       tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 1);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, TUNNEL_DISPATCH,
                            prop->allow_tunneling);
        tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1);
index f9039d64ff2d72804556daa16b8ed9632b08b307..17b7a25121b00e48637fffff324531890301819a 100644 (file)
@@ -1950,7 +1950,8 @@ static void gmc_v9_4_3_init_vram_info(struct amdgpu_device *adev)
        static const u32 regBIF_BIOS_SCRATCH_4 = 0x50;
        u32 vram_info;
 
-       if (!amdgpu_sriov_vf(adev)) {
+       /* Only for dGPU, vendor informaton is reliable */
+       if (!amdgpu_sriov_vf(adev) && !(adev->flags & AMD_IS_APU)) {
                vram_info = RREG32(regBIF_BIOS_SCRATCH_4);
                adev->gmc.vram_vendor = vram_info & 0xF;
        }
index 8b7fed91352696cf2b5cafab0680ad0737fa95ee..22cbfa1bdaddb9a764053421b16159391c1ba56d 100644 (file)
@@ -170,6 +170,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
        m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
        m->cp_hqd_pq_control |=
                        ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+       m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
        pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
 
        m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
index 15277f1d5cf0a9d9eb694ccaeec540e467ab774a..d722cbd317834a8a893a0ed5a847feb3a51d6961 100644 (file)
@@ -224,6 +224,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
        m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
        m->cp_hqd_pq_control |=
                        ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
+       m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
        pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
 
        m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
index d4f525b66a09055909e163b815beee357f28d19d..6cda5b536362655bce2b3bab59f4cbd6c7452c8a 100644 (file)
@@ -272,6 +272,7 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
 {
        u32 v_blank_start, v_blank_end, h_position, v_position;
        struct amdgpu_crtc *acrtc = NULL;
+       struct dc *dc = adev->dm.dc;
 
        if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc))
                return -EINVAL;
@@ -284,6 +285,9 @@ static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc,
                return 0;
        }
 
+       if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
+               dc_allow_idle_optimizations(dc, false);
+
        /*
         * TODO rework base driver to use values directly.
         * for now parse it back into reg-format
@@ -1715,7 +1719,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
        init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0];
        init_data.clk_reg_offsets = adev->reg_offset[CLK_HWIP][0];
 
-       init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+       if (amdgpu_dc_debug_mask & DC_DISABLE_IPS)
+               init_data.flags.disable_ips = DMUB_IPS_DISABLE_ALL;
+
+       init_data.flags.disable_ips_in_vpb = 1;
 
        /* Enable DWB for tested platforms only */
        if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
@@ -8976,16 +8983,8 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
 
        trace_amdgpu_dm_atomic_commit_tail_begin(state);
 
-       if (dm->dc->caps.ips_support) {
-               for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) {
-                       if (new_con_state->crtc &&
-                               new_con_state->crtc->state->active &&
-                               drm_atomic_crtc_needs_modeset(new_con_state->crtc->state)) {
-                               dc_dmub_srv_apply_idle_power_optimizations(dm->dc, false);
-                               break;
-                       }
-               }
-       }
+       if (dm->dc->caps.ips_support && dm->dc->idle_optimizations_allowed)
+               dc_allow_idle_optimizations(dm->dc, false);
 
        drm_atomic_helper_update_legacy_modeset_state(dev, state);
        drm_dp_mst_atomic_wait_for_dependencies(state);
index 58b880acb087ae73352e9ac487d5ccd033f03f4d..3390f0d8420a05dc6d9daae1f3d8c4f53de57aed 100644 (file)
@@ -711,7 +711,7 @@ static inline int dm_irq_state(struct amdgpu_device *adev,
 {
        bool st;
        enum dc_irq_source irq_source;
-
+       struct dc *dc = adev->dm.dc;
        struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id];
 
        if (!acrtc) {
@@ -729,6 +729,9 @@ static inline int dm_irq_state(struct amdgpu_device *adev,
 
        st = (state == AMDGPU_IRQ_STATE_ENABLE);
 
+       if (dc && dc->caps.ips_support && dc->idle_optimizations_allowed)
+               dc_allow_idle_optimizations(dc, false);
+
        dc_interrupt_set(adev->dm.dc, irq_source, st);
        return 0;
 }
index 5d7aa882416b3435a5dcfbaf502a9f326981bc81..c9317ea0258ea1cb2f686830fddc7469158966cc 100644 (file)
@@ -434,6 +434,7 @@ struct dc_config {
        bool EnableMinDispClkODM;
        bool enable_auto_dpm_test_logs;
        unsigned int disable_ips;
+       unsigned int disable_ips_in_vpb;
 };
 
 enum visual_confirm {
index b08ccb8c68bc366386e82a566c452459da0aabdc..9900dda2eef5cd2e44e6dbd008cd411194d107af 100644 (file)
@@ -1034,6 +1034,7 @@ enum replay_FW_Message_type {
        Replay_Msg_Not_Support = -1,
        Replay_Set_Timing_Sync_Supported,
        Replay_Set_Residency_Frameupdate_Timer,
+       Replay_Set_Pseudo_VTotal,
 };
 
 union replay_error_status {
@@ -1089,6 +1090,10 @@ struct replay_settings {
        uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM];
        /* Maximum link off frame count */
        enum replay_link_off_frame_count_level link_off_frame_count_level;
+       /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */
+       uint16_t abm_with_ips_on_full_screen_video_pseudo_vtotal;
+       /* Replay last pseudo vtotal set to DMUB */
+       uint16_t last_pseudo_vtotal;
 };
 
 /* To split out "global" and "per-panel" config settings.
index 9c806385ecbdcce6c0d14f949ea41879758969f7..8b6c49622f3b63c8e6dae68c507e1e45c5a736a2 100644 (file)
@@ -680,7 +680,7 @@ void dcn35_power_down_on_boot(struct dc *dc)
 bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
 {
        struct dc_link *edp_links[MAX_NUM_EDP];
-       int edp_num;
+       int i, edp_num;
        if (dc->debug.dmcub_emulation)
                return true;
 
@@ -688,6 +688,13 @@ bool dcn35_apply_idle_power_optimizations(struct dc *dc, bool enable)
                dc_get_edp_links(dc, edp_links, &edp_num);
                if (edp_num == 0 || edp_num > 1)
                        return false;
+
+               for (i = 0; i < dc->current_state->stream_count; ++i) {
+                       struct dc_stream_state *stream = dc->current_state->streams[i];
+
+                       if (!stream->dpms_off && !dc_is_embedded_signal(stream->signal))
+                               return false;
+               }
        }
 
        // TODO: review other cases when idle optimization is allowed
index 5c9a30211c109f749ab7e1cceb402bd7a0dcb786..fc50931c2aecbb53d74a2d48913e608134510940 100644 (file)
@@ -205,7 +205,7 @@ enum dc_status core_link_read_dpcd(
        uint32_t extended_size;
        /* size of the remaining partitioned address space */
        uint32_t size_left_to_read;
-       enum dc_status status;
+       enum dc_status status = DC_ERROR_UNEXPECTED;
        /* size of the next partition to be read from */
        uint32_t partition_size;
        uint32_t data_index = 0;
@@ -234,7 +234,7 @@ enum dc_status core_link_write_dpcd(
 {
        uint32_t partition_size;
        uint32_t data_index = 0;
-       enum dc_status status;
+       enum dc_status status = DC_ERROR_UNEXPECTED;
 
        while (size) {
                partition_size = dpcd_get_next_partition_size(address, size);
index c64b6c848ef7219e3ddc44da8d4e56763a9bf7f4..e699731ee68e96388c52ed55c17b34cc8710aaab 100644 (file)
@@ -2832,6 +2832,7 @@ struct dmub_rb_cmd_psr_set_power_opt {
 #define REPLAY_RESIDENCY_MODE_MASK             (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
 # define REPLAY_RESIDENCY_MODE_PHY             (0x0 << REPLAY_RESIDENCY_MODE_SHIFT)
 # define REPLAY_RESIDENCY_MODE_ALPM            (0x1 << REPLAY_RESIDENCY_MODE_SHIFT)
+# define REPLAY_RESIDENCY_MODE_IPS             0x10
 
 #define REPLAY_RESIDENCY_ENABLE_MASK           (0x1 << REPLAY_RESIDENCY_ENABLE_SHIFT)
 # define REPLAY_RESIDENCY_DISABLE              (0x0 << REPLAY_RESIDENCY_ENABLE_SHIFT)
@@ -2894,6 +2895,10 @@ enum dmub_cmd_replay_type {
         * Set Residency Frameupdate Timer.
         */
        DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER = 6,
+       /**
+        * Set pseudo vtotal
+        */
+       DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL = 7,
 };
 
 /**
@@ -3076,6 +3081,26 @@ struct dmub_cmd_replay_set_timing_sync_data {
        uint8_t pad[2];
 };
 
+/**
+ * Data passed from driver to FW in a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+struct dmub_cmd_replay_set_pseudo_vtotal {
+       /**
+        * Panel Instance.
+        * Panel isntance to identify which replay_state to use
+        * Currently the support is only for 0 or 1
+        */
+       uint8_t panel_inst;
+       /**
+        * Source Vtotal that Replay + IPS + ABM full screen video src vtotal
+        */
+       uint16_t vtotal;
+       /**
+        * Explicit padding to 4 byte boundary.
+        */
+       uint8_t pad;
+};
+
 /**
  * Definition of a DMUB_CMD__SET_REPLAY_POWER_OPT command.
  */
@@ -3156,6 +3181,20 @@ struct dmub_rb_cmd_replay_set_timing_sync {
        struct dmub_cmd_replay_set_timing_sync_data replay_set_timing_sync_data;
 };
 
+/**
+ * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+ */
+struct dmub_rb_cmd_replay_set_pseudo_vtotal {
+       /**
+        * Command header.
+        */
+       struct dmub_cmd_header header;
+       /**
+        * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+        */
+       struct dmub_cmd_replay_set_pseudo_vtotal data;
+};
+
 /**
  * Data passed from driver to FW in  DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command.
  */
@@ -3207,6 +3246,10 @@ union dmub_replay_cmd_set {
         * Definition of DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command data.
         */
        struct dmub_cmd_replay_frameupdate_timer_data timer_data;
+       /**
+        * Definition of DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command data.
+        */
+       struct dmub_cmd_replay_set_pseudo_vtotal pseudo_vtotal_data;
 };
 
 /**
@@ -4358,6 +4401,10 @@ union dmub_rb_cmd {
         * Definition of a DMUB_CMD__REPLAY_SET_RESIDENCY_FRAMEUPDATE_TIMER command.
         */
        struct dmub_rb_cmd_replay_set_frameupdate_timer replay_set_frameupdate_timer;
+       /**
+        * Definition of a DMUB_CMD__REPLAY_SET_PSEUDO_VTOTAL command.
+        */
+       struct dmub_rb_cmd_replay_set_pseudo_vtotal replay_set_pseudo_vtotal;
 };
 
 /**
index ad98e504c00de5908ca94a38392ef818e91b2152..e304e8435fb8f1c5e29428f72c20a6097fb57697 100644 (file)
@@ -980,6 +980,11 @@ void set_replay_coasting_vtotal(struct dc_link *link,
        link->replay_settings.coasting_vtotal_table[type] = vtotal;
 }
 
+void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal)
+{
+       link->replay_settings.abm_with_ips_on_full_screen_video_pseudo_vtotal = vtotal;
+}
+
 void calculate_replay_link_off_frame_count(struct dc_link *link,
        uint16_t vtotal, uint16_t htotal)
 {
index c17bbc6fb38cafb518777b16c96a99b2116c36eb..bef4815e1703d78cdebc6f49bc160932d08c5272 100644 (file)
@@ -57,6 +57,7 @@ void init_replay_config(struct dc_link *link, struct replay_config *pr_config);
 void set_replay_coasting_vtotal(struct dc_link *link,
        enum replay_coasting_vtotal_type type,
        uint16_t vtotal);
+void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal);
 void calculate_replay_link_off_frame_count(struct dc_link *link,
        uint16_t vtotal, uint16_t htotal);
 
index 1dc5dd9b7bf70b10641a76e4c731e3e735aeaeef..df2c7ffe190f4db36050901dce5af89180646f3b 100644 (file)
@@ -258,6 +258,7 @@ enum DC_DEBUG_MASK {
        DC_ENABLE_DML2 = 0x100,
        DC_DISABLE_PSR_SU = 0x200,
        DC_DISABLE_REPLAY = 0x400,
+       DC_DISABLE_IPS = 0x800,
 };
 
 enum amd_dpm_forced_level;
index be519c8edf496fda93f393a077f174454e635a05..335980e2afbfb8e6eae89e7f28fdcc3391d39cde 100644 (file)
@@ -138,7 +138,7 @@ static inline size_t amdgpu_reginst_size(uint16_t num_inst, size_t inst_size,
 }
 
 #define amdgpu_asic_get_reg_state_supported(adev) \
-       ((adev)->asic_funcs->get_reg_state ? 1 : 0)
+       (((adev)->asic_funcs && (adev)->asic_funcs->get_reg_state) ? 1 : 0)
 
 #define amdgpu_asic_get_reg_state(adev, state, buf, size)                  \
        ((adev)->asic_funcs->get_reg_state ?                               \
index c16703868e5ca2a3f0a7f6c7e3757b8e6ba036d0..7ffad3eb0a01500fedf1be4348b17561bc744a38 100644 (file)
@@ -24,6 +24,7 @@
 
 #include <linux/firmware.h>
 #include <linux/pci.h>
+#include <linux/power_supply.h>
 #include <linux/reboot.h>
 
 #include "amdgpu.h"
@@ -817,16 +818,8 @@ static int smu_late_init(void *handle)
         * handle the switch automatically. Driver involvement
         * is unnecessary.
         */
-       if (!smu->dc_controlled_by_gpio) {
-               ret = smu_set_power_source(smu,
-                                          adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
-                                          SMU_POWER_SOURCE_DC);
-               if (ret) {
-                       dev_err(adev->dev, "Failed to switch to %s mode!\n",
-                               adev->pm.ac_power ? "AC" : "DC");
-                       return ret;
-               }
-       }
+       adev->pm.ac_power = power_supply_is_system_supplied() > 0;
+       smu_set_ac_dc(smu);
 
        if ((amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 1)) ||
            (amdgpu_ip_version(adev, MP1_HWIP, 0) == IP_VERSION(13, 0, 3)))
@@ -2710,6 +2703,7 @@ int smu_get_power_limit(void *handle,
                case SMU_PPT_LIMIT_CURRENT:
                        switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {
                        case IP_VERSION(13, 0, 2):
+                       case IP_VERSION(13, 0, 6):
                        case IP_VERSION(11, 0, 7):
                        case IP_VERSION(11, 0, 11):
                        case IP_VERSION(11, 0, 12):
index 5a314d0316c1c8410d1f44281e5cc487e4947e81..c7bfa68bf00f400f3396c9853d2c08c6bf971659 100644 (file)
@@ -1442,10 +1442,12 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
                        case 0x3:
                                dev_dbg(adev->dev, "Switched to AC mode!\n");
                                schedule_work(&smu->interrupt_work);
+                               adev->pm.ac_power = true;
                                break;
                        case 0x4:
                                dev_dbg(adev->dev, "Switched to DC mode!\n");
                                schedule_work(&smu->interrupt_work);
+                               adev->pm.ac_power = false;
                                break;
                        case 0x7:
                                /*
index 771a3d457c335e2cc08582a3e4e3a3ba853d2928..c486182ff275222fedfaa1e27c417f9be80d19d0 100644 (file)
@@ -1379,10 +1379,12 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
                        case 0x3:
                                dev_dbg(adev->dev, "Switched to AC mode!\n");
                                smu_v13_0_ack_ac_dc_interrupt(smu);
+                               adev->pm.ac_power = true;
                                break;
                        case 0x4:
                                dev_dbg(adev->dev, "Switched to DC mode!\n");
                                smu_v13_0_ack_ac_dc_interrupt(smu);
+                               adev->pm.ac_power = false;
                                break;
                        case 0x7:
                                /*
index a9b25faa63e468d0069ea08acfd7b90b1b36f056..4fdf34fffa9a57f53ad55418e957e36282ac98b9 100644 (file)
@@ -2357,6 +2357,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
        PPTable_t *pptable = table_context->driver_pptable;
        SkuTable_t *skutable = &pptable->SkuTable;
        uint32_t power_limit, od_percent_upper, od_percent_lower;
+       uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
 
        if (smu_v13_0_get_current_power_limit(smu, &power_limit))
                power_limit = smu->adev->pm.ac_power ?
@@ -2380,7 +2381,7 @@ static int smu_v13_0_0_get_power_limit(struct smu_context *smu,
                                        od_percent_upper, od_percent_lower, power_limit);
 
        if (max_power_limit) {
-               *max_power_limit = power_limit * (100 + od_percent_upper);
+               *max_power_limit = msg_limit * (100 + od_percent_upper);
                *max_power_limit /= 100;
        }
 
@@ -2959,6 +2960,55 @@ static bool smu_v13_0_0_wbrf_support_check(struct smu_context *smu)
        }
 }
 
+static int smu_v13_0_0_set_power_limit(struct smu_context *smu,
+                                      enum smu_ppt_limit_type limit_type,
+                                      uint32_t limit)
+{
+       PPTable_t *pptable = smu->smu_table.driver_pptable;
+       SkuTable_t *skutable = &pptable->SkuTable;
+       uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+       struct smu_table_context *table_context = &smu->smu_table;
+       OverDriveTableExternal_t *od_table =
+               (OverDriveTableExternal_t *)table_context->overdrive_table;
+       int ret = 0;
+
+       if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+               return -EINVAL;
+
+       if (limit <= msg_limit) {
+               if (smu->current_power_limit > msg_limit) {
+                       od_table->OverDriveTable.Ppt = 0;
+                       od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+                       ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
+                       if (ret) {
+                               dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+                               return ret;
+                       }
+               }
+               return smu_v13_0_set_power_limit(smu, limit_type, limit);
+       } else if (smu->od_enabled) {
+               ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
+               if (ret)
+                       return ret;
+
+               od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
+               od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+               ret = smu_v13_0_0_upload_overdrive_table(smu, od_table);
+               if (ret) {
+                 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+                 return ret;
+               }
+
+               smu->current_power_limit = limit;
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
        .get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
        .set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
@@ -3013,7 +3063,7 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
        .set_fan_control_mode = smu_v13_0_set_fan_control_mode,
        .enable_mgpu_fan_boost = smu_v13_0_0_enable_mgpu_fan_boost,
        .get_power_limit = smu_v13_0_0_get_power_limit,
-       .set_power_limit = smu_v13_0_set_power_limit,
+       .set_power_limit = smu_v13_0_0_set_power_limit,
        .set_power_source = smu_v13_0_set_power_source,
        .get_power_profile_mode = smu_v13_0_0_get_power_profile_mode,
        .set_power_profile_mode = smu_v13_0_0_set_power_profile_mode,
index 3c98a8a0386a2612d0470dd6dbd767a6ecc308b0..7e1941cf17964c594dc8821c3fcda75f64e9f145 100644 (file)
@@ -160,8 +160,8 @@ static const struct cmn2asic_msg_mapping smu_v13_0_6_message_map[SMU_MSG_MAX_COU
        MSG_MAP(GfxDriverResetRecovery,              PPSMC_MSG_GfxDriverResetRecovery,          0),
        MSG_MAP(GetMinGfxclkFrequency,               PPSMC_MSG_GetMinGfxDpmFreq,                1),
        MSG_MAP(GetMaxGfxclkFrequency,               PPSMC_MSG_GetMaxGfxDpmFreq,                1),
-       MSG_MAP(SetSoftMinGfxclk,                    PPSMC_MSG_SetSoftMinGfxClk,                0),
-       MSG_MAP(SetSoftMaxGfxClk,                    PPSMC_MSG_SetSoftMaxGfxClk,                0),
+       MSG_MAP(SetSoftMinGfxclk,                    PPSMC_MSG_SetSoftMinGfxClk,                1),
+       MSG_MAP(SetSoftMaxGfxClk,                    PPSMC_MSG_SetSoftMaxGfxClk,                1),
        MSG_MAP(PrepareMp1ForUnload,                 PPSMC_MSG_PrepareForDriverUnload,          0),
        MSG_MAP(GetCTFLimit,                         PPSMC_MSG_GetCTFLimit,                     0),
        MSG_MAP(GetThermalLimit,                     PPSMC_MSG_ReadThrottlerLimit,              0),
index 59606a19e3d2b4494885b72f36f6524ec25b5139..7c3e162e2d818fa4083c62373990b2c7e9a69e26 100644 (file)
@@ -2321,6 +2321,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
        PPTable_t *pptable = table_context->driver_pptable;
        SkuTable_t *skutable = &pptable->SkuTable;
        uint32_t power_limit, od_percent_upper, od_percent_lower;
+       uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
 
        if (smu_v13_0_get_current_power_limit(smu, &power_limit))
                power_limit = smu->adev->pm.ac_power ?
@@ -2344,7 +2345,7 @@ static int smu_v13_0_7_get_power_limit(struct smu_context *smu,
                                        od_percent_upper, od_percent_lower, power_limit);
 
        if (max_power_limit) {
-               *max_power_limit = power_limit * (100 + od_percent_upper);
+               *max_power_limit = msg_limit * (100 + od_percent_upper);
                *max_power_limit /= 100;
        }
 
@@ -2545,6 +2546,55 @@ static bool smu_v13_0_7_wbrf_support_check(struct smu_context *smu)
        return smu->smc_fw_version > 0x00524600;
 }
 
+static int smu_v13_0_7_set_power_limit(struct smu_context *smu,
+                                      enum smu_ppt_limit_type limit_type,
+                                      uint32_t limit)
+{
+       PPTable_t *pptable = smu->smu_table.driver_pptable;
+       SkuTable_t *skutable = &pptable->SkuTable;
+       uint32_t msg_limit = skutable->MsgLimits.Power[PPT_THROTTLER_PPT0][POWER_SOURCE_AC];
+       struct smu_table_context *table_context = &smu->smu_table;
+       OverDriveTableExternal_t *od_table =
+               (OverDriveTableExternal_t *)table_context->overdrive_table;
+       int ret = 0;
+
+       if (limit_type != SMU_DEFAULT_PPT_LIMIT)
+               return -EINVAL;
+
+       if (limit <= msg_limit) {
+               if (smu->current_power_limit > msg_limit) {
+                       od_table->OverDriveTable.Ppt = 0;
+                       od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+                       ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
+                       if (ret) {
+                               dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+                               return ret;
+                       }
+               }
+               return smu_v13_0_set_power_limit(smu, limit_type, limit);
+       } else if (smu->od_enabled) {
+               ret = smu_v13_0_set_power_limit(smu, limit_type, msg_limit);
+               if (ret)
+                       return ret;
+
+               od_table->OverDriveTable.Ppt = (limit * 100) / msg_limit - 100;
+               od_table->OverDriveTable.FeatureCtrlMask |= 1U << PP_OD_FEATURE_PPT_BIT;
+
+               ret = smu_v13_0_7_upload_overdrive_table(smu, od_table);
+               if (ret) {
+                 dev_err(smu->adev->dev, "Failed to upload overdrive table!\n");
+                 return ret;
+               }
+
+               smu->current_power_limit = limit;
+       } else {
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
 static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
        .get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
        .set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -2596,7 +2646,7 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
        .set_fan_control_mode = smu_v13_0_set_fan_control_mode,
        .enable_mgpu_fan_boost = smu_v13_0_7_enable_mgpu_fan_boost,
        .get_power_limit = smu_v13_0_7_get_power_limit,
-       .set_power_limit = smu_v13_0_set_power_limit,
+       .set_power_limit = smu_v13_0_7_set_power_limit,
        .set_power_source = smu_v13_0_set_power_source,
        .get_power_profile_mode = smu_v13_0_7_get_power_profile_mode,
        .set_power_profile_mode = smu_v13_0_7_set_power_profile_mode,
index ef31033439bc15a896ed8748b7a62a8b46336c13..29d91493b101acb5234c9a2fe76441925b346f55 100644 (file)
@@ -1762,6 +1762,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
        u8 request = msg->request & ~DP_AUX_I2C_MOT;
        int ret = 0;
 
+       mutex_lock(&ctx->aux_lock);
        pm_runtime_get_sync(dev);
        msg->reply = 0;
        switch (request) {
@@ -1778,6 +1779,7 @@ static ssize_t anx7625_aux_transfer(struct drm_dp_aux *aux,
                                        msg->size, msg->buffer);
        pm_runtime_mark_last_busy(dev);
        pm_runtime_put_autosuspend(dev);
+       mutex_unlock(&ctx->aux_lock);
 
        return ret;
 }
@@ -2474,7 +2476,9 @@ static void anx7625_bridge_atomic_disable(struct drm_bridge *bridge,
        ctx->connector = NULL;
        anx7625_dp_stop(ctx);
 
-       pm_runtime_put_sync(dev);
+       mutex_lock(&ctx->aux_lock);
+       pm_runtime_put_sync_suspend(dev);
+       mutex_unlock(&ctx->aux_lock);
 }
 
 static enum drm_connector_status
@@ -2668,6 +2672,7 @@ static int anx7625_i2c_probe(struct i2c_client *client)
 
        mutex_init(&platform->lock);
        mutex_init(&platform->hdcp_wq_lock);
+       mutex_init(&platform->aux_lock);
 
        INIT_DELAYED_WORK(&platform->hdcp_work, hdcp_check_work_func);
        platform->hdcp_workqueue = create_workqueue("hdcp workqueue");
index 66ebee7f3d832534ec64b780bdfa985bbfcfc896..39ed35d338363390d2fe37b765d4e0e48dc0118e 100644 (file)
@@ -475,6 +475,8 @@ struct anx7625_data {
        struct workqueue_struct *hdcp_workqueue;
        /* Lock for hdcp work queue */
        struct mutex hdcp_wq_lock;
+       /* Lock for aux transfer and disable */
+       struct mutex aux_lock;
        char edid_block;
        struct display_timing dt;
        u8 display_timing_valid;
index 541e4f5afc4c86a4e87b74a016885d6231afb892..14d4dcf239da835955f1d594579dd165288bd63f 100644 (file)
@@ -107,6 +107,7 @@ struct ps8640 {
        struct device_link *link;
        bool pre_enabled;
        bool need_post_hpd_delay;
+       struct mutex aux_lock;
 };
 
 static const struct regmap_config ps8640_regmap_config[] = {
@@ -345,11 +346,20 @@ static ssize_t ps8640_aux_transfer(struct drm_dp_aux *aux,
        struct device *dev = &ps_bridge->page[PAGE0_DP_CNTL]->dev;
        int ret;
 
+       mutex_lock(&ps_bridge->aux_lock);
        pm_runtime_get_sync(dev);
+       ret = _ps8640_wait_hpd_asserted(ps_bridge, 200 * 1000);
+       if (ret) {
+               pm_runtime_put_sync_suspend(dev);
+               goto exit;
+       }
        ret = ps8640_aux_transfer_msg(aux, msg);
        pm_runtime_mark_last_busy(dev);
        pm_runtime_put_autosuspend(dev);
 
+exit:
+       mutex_unlock(&ps_bridge->aux_lock);
+
        return ret;
 }
 
@@ -470,7 +480,18 @@ static void ps8640_atomic_post_disable(struct drm_bridge *bridge,
        ps_bridge->pre_enabled = false;
 
        ps8640_bridge_vdo_control(ps_bridge, DISABLE);
+
+       /*
+        * The bridge seems to expect everything to be power cycled at the
+        * disable process, so grab a lock here to make sure
+        * ps8640_aux_transfer() is not holding a runtime PM reference and
+        * preventing the bridge from suspend.
+        */
+       mutex_lock(&ps_bridge->aux_lock);
+
        pm_runtime_put_sync_suspend(&ps_bridge->page[PAGE0_DP_CNTL]->dev);
+
+       mutex_unlock(&ps_bridge->aux_lock);
 }
 
 static int ps8640_bridge_attach(struct drm_bridge *bridge,
@@ -619,6 +640,8 @@ static int ps8640_probe(struct i2c_client *client)
        if (!ps_bridge)
                return -ENOMEM;
 
+       mutex_init(&ps_bridge->aux_lock);
+
        ps_bridge->supplies[0].supply = "vdd12";
        ps_bridge->supplies[1].supply = "vdd33";
        ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
index be5914caa17d546601d11719976161624c1a420f..63a1a0c88be4d98d169996d341de5d0d1b6cae91 100644 (file)
@@ -969,10 +969,6 @@ static int samsung_dsim_init_link(struct samsung_dsim *dsi)
        reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
        reg &= ~DSIM_STOP_STATE_CNT_MASK;
        reg |= DSIM_STOP_STATE_CNT(driver_data->reg_values[STOP_STATE_CNT]);
-
-       if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
-               reg |= DSIM_FORCE_STOP_STATE;
-
        samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
 
        reg = DSIM_BTA_TIMEOUT(0xff) | DSIM_LPDR_TIMEOUT(0xffff);
@@ -1431,18 +1427,6 @@ static void samsung_dsim_disable_irq(struct samsung_dsim *dsi)
        disable_irq(dsi->irq);
 }
 
-static void samsung_dsim_set_stop_state(struct samsung_dsim *dsi, bool enable)
-{
-       u32 reg = samsung_dsim_read(dsi, DSIM_ESCMODE_REG);
-
-       if (enable)
-               reg |= DSIM_FORCE_STOP_STATE;
-       else
-               reg &= ~DSIM_FORCE_STOP_STATE;
-
-       samsung_dsim_write(dsi, DSIM_ESCMODE_REG, reg);
-}
-
 static int samsung_dsim_init(struct samsung_dsim *dsi)
 {
        const struct samsung_dsim_driver_data *driver_data = dsi->driver_data;
@@ -1492,9 +1476,6 @@ static void samsung_dsim_atomic_pre_enable(struct drm_bridge *bridge,
                ret = samsung_dsim_init(dsi);
                if (ret)
                        return;
-
-               samsung_dsim_set_display_mode(dsi);
-               samsung_dsim_set_display_enable(dsi, true);
        }
 }
 
@@ -1503,12 +1484,8 @@ static void samsung_dsim_atomic_enable(struct drm_bridge *bridge,
 {
        struct samsung_dsim *dsi = bridge_to_dsi(bridge);
 
-       if (samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type)) {
-               samsung_dsim_set_display_mode(dsi);
-               samsung_dsim_set_display_enable(dsi, true);
-       } else {
-               samsung_dsim_set_stop_state(dsi, false);
-       }
+       samsung_dsim_set_display_mode(dsi);
+       samsung_dsim_set_display_enable(dsi, true);
 
        dsi->state |= DSIM_STATE_VIDOUT_AVAILABLE;
 }
@@ -1521,9 +1498,6 @@ static void samsung_dsim_atomic_disable(struct drm_bridge *bridge,
        if (!(dsi->state & DSIM_STATE_ENABLED))
                return;
 
-       if (!samsung_dsim_hw_is_exynos(dsi->plat_data->hw_type))
-               samsung_dsim_set_stop_state(dsi, true);
-
        dsi->state &= ~DSIM_STATE_VIDOUT_AVAILABLE;
 }
 
@@ -1828,8 +1802,6 @@ static ssize_t samsung_dsim_host_transfer(struct mipi_dsi_host *host,
        if (ret)
                return ret;
 
-       samsung_dsim_set_stop_state(dsi, false);
-
        ret = mipi_dsi_create_packet(&xfer.packet, msg);
        if (ret < 0)
                return ret;
index 2bdc5b439bebd56407af3b5b04892b3ac90678d4..4560ae9cbce15095eddaf6296396960a7887ab06 100644 (file)
@@ -1080,6 +1080,26 @@ static int sii902x_init(struct sii902x *sii902x)
                        return ret;
        }
 
+       ret = sii902x_audio_codec_init(sii902x, dev);
+       if (ret)
+               return ret;
+
+       i2c_set_clientdata(sii902x->i2c, sii902x);
+
+       sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
+                                       1, 0, I2C_MUX_GATE,
+                                       sii902x_i2c_bypass_select,
+                                       sii902x_i2c_bypass_deselect);
+       if (!sii902x->i2cmux) {
+               ret = -ENOMEM;
+               goto err_unreg_audio;
+       }
+
+       sii902x->i2cmux->priv = sii902x;
+       ret = i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
+       if (ret)
+               goto err_unreg_audio;
+
        sii902x->bridge.funcs = &sii902x_bridge_funcs;
        sii902x->bridge.of_node = dev->of_node;
        sii902x->bridge.timings = &default_sii902x_timings;
@@ -1090,19 +1110,13 @@ static int sii902x_init(struct sii902x *sii902x)
 
        drm_bridge_add(&sii902x->bridge);
 
-       sii902x_audio_codec_init(sii902x, dev);
-
-       i2c_set_clientdata(sii902x->i2c, sii902x);
+       return 0;
 
-       sii902x->i2cmux = i2c_mux_alloc(sii902x->i2c->adapter, dev,
-                                       1, 0, I2C_MUX_GATE,
-                                       sii902x_i2c_bypass_select,
-                                       sii902x_i2c_bypass_deselect);
-       if (!sii902x->i2cmux)
-               return -ENOMEM;
+err_unreg_audio:
+       if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
+               platform_device_unregister(sii902x->audio.pdev);
 
-       sii902x->i2cmux->priv = sii902x;
-       return i2c_mux_add_adapter(sii902x->i2cmux, 0, 0, 0);
+       return ret;
 }
 
 static int sii902x_probe(struct i2c_client *client)
@@ -1170,12 +1184,14 @@ static int sii902x_probe(struct i2c_client *client)
 }
 
 static void sii902x_remove(struct i2c_client *client)
-
 {
        struct sii902x *sii902x = i2c_get_clientdata(client);
 
-       i2c_mux_del_adapters(sii902x->i2cmux);
        drm_bridge_remove(&sii902x->bridge);
+       i2c_mux_del_adapters(sii902x->i2cmux);
+
+       if (!PTR_ERR_OR_ZERO(sii902x->audio.pdev))
+               platform_device_unregister(sii902x->audio.pdev);
 }
 
 static const struct of_device_id sii902x_dt_ids[] = {
index bd6c24d4213cdf2f6bcb132848330f43e4546efd..f7c6b60629c2ba5b178145977d8490a6e094ce71 100644 (file)
@@ -5491,6 +5491,7 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
  *   - 0 if the new state is valid
  *   - %-ENOSPC, if the new state is invalid, because of BW limitation
  *         @failing_port is set to:
+ *
  *         - The non-root port where a BW limit check failed
  *           with all the ports downstream of @failing_port passing
  *           the BW limit check.
@@ -5499,6 +5500,7 @@ EXPORT_SYMBOL(drm_dp_mst_atomic_enable_dsc);
  *         - %NULL if the BW limit check failed at the root port
  *           with all the ports downstream of the root port passing
  *           the BW limit check.
+ *
  *   - %-EINVAL, if the new state is invalid, because the root port has
  *     too many payloads.
  */
index 776f2f0b602debb88a6c820add8d737332f2938e..0ef7bc8848b0798b125f7a65ff04cf4586f13d71 100644 (file)
@@ -319,9 +319,9 @@ static void decon_win_set_bldmod(struct decon_context *ctx, unsigned int win,
 static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
                                 struct drm_framebuffer *fb)
 {
-       struct exynos_drm_plane plane = ctx->planes[win];
+       struct exynos_drm_plane *plane = &ctx->planes[win];
        struct exynos_drm_plane_state *state =
-               to_exynos_plane_state(plane.base.state);
+               to_exynos_plane_state(plane->base.state);
        unsigned int alpha = state->base.alpha;
        unsigned int pixel_alpha;
        unsigned long val;
index a9f1c5c058940178c8318484fcea422f1428de69..f2145227a1e0ce889d2ce0a3926a79e64c832fc9 100644 (file)
@@ -480,7 +480,7 @@ static void fimd_commit(struct exynos_drm_crtc *crtc)
        struct fimd_context *ctx = crtc->ctx;
        struct drm_display_mode *mode = &crtc->base.state->adjusted_mode;
        const struct fimd_driver_data *driver_data = ctx->driver_data;
-       void *timing_base = ctx->regs + driver_data->timing_base;
+       void __iomem *timing_base = ctx->regs + driver_data->timing_base;
        u32 val;
 
        if (ctx->suspended)
@@ -661,9 +661,9 @@ static void fimd_win_set_bldmod(struct fimd_context *ctx, unsigned int win,
 static void fimd_win_set_pixfmt(struct fimd_context *ctx, unsigned int win,
                                struct drm_framebuffer *fb, int width)
 {
-       struct exynos_drm_plane plane = ctx->planes[win];
+       struct exynos_drm_plane *plane = &ctx->planes[win];
        struct exynos_drm_plane_state *state =
-               to_exynos_plane_state(plane.base.state);
+               to_exynos_plane_state(plane->base.state);
        uint32_t pixel_format = fb->format->format;
        unsigned int alpha = state->base.alpha;
        u32 val = WINCONx_ENWIN;
index e9a769590415dcd0d7899df16254d7c20cdea8b1..180507a477009d6e424cc5aede8e18255127b3f1 100644 (file)
@@ -1341,7 +1341,7 @@ static int __maybe_unused gsc_runtime_resume(struct device *dev)
        for (i = 0; i < ctx->num_clocks; i++) {
                ret = clk_prepare_enable(ctx->clocks[i]);
                if (ret) {
-                       while (--i > 0)
+                       while (--i >= 0)
                                clk_disable_unprepare(ctx->clocks[i]);
                        return ret;
                }
index e777686190ca241f0ed288b2ad32645d41f1a288..c13f14edb50889baa604b044d2324a371e444ed5 100644 (file)
@@ -17,7 +17,6 @@ subdir-ccflags-y += $(call cc-option, -Wunused-const-variable)
 subdir-ccflags-y += $(call cc-option, -Wpacked-not-aligned)
 subdir-ccflags-y += $(call cc-option, -Wformat-overflow)
 subdir-ccflags-y += $(call cc-option, -Wformat-truncation)
-subdir-ccflags-y += $(call cc-option, -Wstringop-overflow)
 subdir-ccflags-y += $(call cc-option, -Wstringop-truncation)
 # The following turn off the warnings enabled by -Wextra
 ifeq ($(findstring 2, $(KBUILD_EXTRA_WARN)),)
index ac456a2275dbad62cb9a4ac7f706333c73dd03aa..eda4a8b885904de71bb6e3bb1998fa1242a1b9a7 100644 (file)
@@ -1155,6 +1155,7 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
        }
 
        intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_INIT_OTP);
+       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
 
        /* ensure all panel commands dispatched before enabling transcoder */
        wait_for_cmds_dispatched_to_panel(encoder);
@@ -1255,8 +1256,6 @@ static void gen11_dsi_enable(struct intel_atomic_state *state,
        /* step6d: enable dsi transcoder */
        gen11_dsi_enable_transcoder(encoder);
 
-       intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
-
        /* step7: enable backlight */
        intel_backlight_enable(crtc_state, conn_state);
        intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
index 8f702c3fc62d483e6ba92d4d02537576975441ae..57bbf3e3af92fbb0325d0c41765f7a0f0d0ac806 100644 (file)
@@ -1525,8 +1525,18 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
         * can rely on frontbuffer tracking.
         */
        mask = EDP_PSR_DEBUG_MASK_MEMUP |
-              EDP_PSR_DEBUG_MASK_HPD |
-              EDP_PSR_DEBUG_MASK_LPSP;
+              EDP_PSR_DEBUG_MASK_HPD;
+
+       /*
+        * For some unknown reason on HSW non-ULT (or at least on
+        * Dell Latitude E6540) external displays start to flicker
+        * when PSR is enabled on the eDP. SR/PC6 residency is much
+        * higher than should be possible with an external display.
+        * As a workaround leave LPSP unmasked to prevent PSR entry
+        * when external displays are active.
+        */
+       if (DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL_ULT(dev_priv))
+               mask |= EDP_PSR_DEBUG_MASK_LPSP;
 
        if (DISPLAY_VER(dev_priv) < 20)
                mask |= EDP_PSR_DEBUG_MASK_MAX_SLEEP;
index 5057d976fa578cebe2e9e847c6e78634d2b08968..ca762ea5541361bb023e8b0288470502797f1a15 100644 (file)
@@ -62,7 +62,7 @@ nouveau_fence_signal(struct nouveau_fence *fence)
        if (test_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags)) {
                struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 
-               if (atomic_dec_and_test(&fctx->notify_ref))
+               if (!--fctx->notify_ref)
                        drop = 1;
        }
 
@@ -103,7 +103,6 @@ nouveau_fence_context_kill(struct nouveau_fence_chan *fctx, int error)
 void
 nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
 {
-       cancel_work_sync(&fctx->allow_block_work);
        nouveau_fence_context_kill(fctx, 0);
        nvif_event_dtor(&fctx->event);
        fctx->dead = 1;
@@ -168,18 +167,6 @@ nouveau_fence_wait_uevent_handler(struct nvif_event *event, void *repv, u32 repc
        return ret;
 }
 
-static void
-nouveau_fence_work_allow_block(struct work_struct *work)
-{
-       struct nouveau_fence_chan *fctx = container_of(work, struct nouveau_fence_chan,
-                                                      allow_block_work);
-
-       if (atomic_read(&fctx->notify_ref) == 0)
-               nvif_event_block(&fctx->event);
-       else
-               nvif_event_allow(&fctx->event);
-}
-
 void
 nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
 {
@@ -191,7 +178,6 @@ nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_cha
        } args;
        int ret;
 
-       INIT_WORK(&fctx->allow_block_work, nouveau_fence_work_allow_block);
        INIT_LIST_HEAD(&fctx->flip);
        INIT_LIST_HEAD(&fctx->pending);
        spin_lock_init(&fctx->lock);
@@ -535,19 +521,15 @@ static bool nouveau_fence_enable_signaling(struct dma_fence *f)
        struct nouveau_fence *fence = from_fence(f);
        struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
        bool ret;
-       bool do_work;
 
-       if (atomic_inc_return(&fctx->notify_ref) == 0)
-               do_work = true;
+       if (!fctx->notify_ref++)
+               nvif_event_allow(&fctx->event);
 
        ret = nouveau_fence_no_signaling(f);
        if (ret)
                set_bit(DMA_FENCE_FLAG_USER_BITS, &fence->base.flags);
-       else if (atomic_dec_and_test(&fctx->notify_ref))
-               do_work = true;
-
-       if (do_work)
-               schedule_work(&fctx->allow_block_work);
+       else if (!--fctx->notify_ref)
+               nvif_event_block(&fctx->event);
 
        return ret;
 }
index 28f5cf013b8983240204d028c8367249a63912e0..64d33ae7f35610fe763cd34f717335ab7b24ea4e 100644 (file)
@@ -3,7 +3,6 @@
 #define __NOUVEAU_FENCE_H__
 
 #include <linux/dma-fence.h>
-#include <linux/workqueue.h>
 #include <nvif/event.h>
 
 struct nouveau_drm;
@@ -46,9 +45,7 @@ struct nouveau_fence_chan {
        char name[32];
 
        struct nvif_event event;
-       struct work_struct allow_block_work;
-       atomic_t notify_ref;
-       int dead, killed;
+       int notify_ref, dead, killed;
 };
 
 struct nouveau_fence_priv {
index dad938cf6decfb0658a73439a6ca602c78fce2fb..8f3783742208b60d8b5b9ad7c6e2ceab4e9fc9e4 100644 (file)
@@ -539,6 +539,8 @@ config DRM_PANEL_RAYDIUM_RM692E5
        depends on OF
        depends on DRM_MIPI_DSI
        depends on BACKLIGHT_CLASS_DEVICE
+       select DRM_DISPLAY_DP_HELPER
+       select DRM_DISPLAY_HELPER
        help
          Say Y here if you want to enable support for Raydium RM692E5-based
          display panels, such as the one found in the Fairphone 5 smartphone.
index ea5a857793827af1a0bfe90d88bf2a3a71065f11..f23d8832a1ad055483b1f513557cb3d2807e3692 100644 (file)
@@ -309,7 +309,7 @@ static const struct s6d7aa0_panel_desc s6d7aa0_lsl080al02_desc = {
        .off_func = s6d7aa0_lsl080al02_off,
        .drm_mode = &s6d7aa0_lsl080al02_mode,
        .mode_flags = MIPI_DSI_MODE_VSYNC_FLUSH | MIPI_DSI_MODE_VIDEO_NO_HFP,
-       .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+       .bus_flags = 0,
 
        .has_backlight = false,
        .use_passwd3 = false,
index 2214cb09678cd6a234359c2cb7972c9beb3f5851..d493ee735c7349b2ae1a21abff870859c0ea2af4 100644 (file)
@@ -3948,6 +3948,7 @@ static const struct panel_desc tianma_tm070jdhg30 = {
        },
        .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
        .connector_type = DRM_MODE_CONNECTOR_LVDS,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH,
 };
 
 static const struct panel_desc tianma_tm070jvhg33 = {
@@ -3960,6 +3961,7 @@ static const struct panel_desc tianma_tm070jvhg33 = {
        },
        .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
        .connector_type = DRM_MODE_CONNECTOR_LVDS,
+       .bus_flags = DRM_BUS_FLAG_DE_HIGH,
 };
 
 static const struct display_timing tianma_tm070rvhg71_timing = {
index 550492a7a031d7827b2e167098c495908bee82aa..85f082396d42da982589fe69183c71743c69a89b 100644 (file)
@@ -1178,21 +1178,20 @@ static void drm_sched_run_job_work(struct work_struct *w)
        struct drm_sched_entity *entity;
        struct dma_fence *fence;
        struct drm_sched_fence *s_fence;
-       struct drm_sched_job *sched_job;
+       struct drm_sched_job *sched_job = NULL;
        int r;
 
        if (READ_ONCE(sched->pause_submit))
                return;
 
-       entity = drm_sched_select_entity(sched);
+       /* Find entity with a ready job */
+       while (!sched_job && (entity = drm_sched_select_entity(sched))) {
+               sched_job = drm_sched_entity_pop_job(entity);
+               if (!sched_job)
+                       complete_all(&entity->entity_idle);
+       }
        if (!entity)
-               return;
-
-       sched_job = drm_sched_entity_pop_job(entity);
-       if (!sched_job) {
-               complete_all(&entity->entity_idle);
                return; /* No more work */
-       }
 
        s_fence = sched_job->s_fence;
 
index 4e9247cf9977f5677126ffbdcf56c97446c769b4..1eb0c304f9607f6ae4034638a2cf8e3ee8da06ca 100644 (file)
@@ -188,13 +188,13 @@ out:
 
 static void drm_test_mm_debug(struct kunit *test)
 {
+       struct drm_printer p = drm_debug_printer(test->name);
        struct drm_mm mm;
        struct drm_mm_node nodes[2];
 
        /* Create a small drm_mm with a couple of nodes and a few holes, and
         * check that the debug iterator doesn't explode over a trivial drm_mm.
         */
-
        drm_mm_init(&mm, 0, 4096);
 
        memset(nodes, 0, sizeof(nodes));
@@ -209,6 +209,9 @@ static void drm_test_mm_debug(struct kunit *test)
        KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[1]),
                               "failed to reserve node[0] {start=%lld, size=%lld)\n",
                               nodes[0].start, nodes[0].size);
+
+       drm_mm_print(&mm, &p);
+       KUNIT_SUCCEED(test);
 }
 
 static bool expect_insert(struct kunit *test, struct drm_mm *mm,
index f5187b384ae9ac8eedede8e6a0d4d56eb8af1670..76027960054f1140e768ae21b30e5a3015437d02 100644 (file)
@@ -95,11 +95,17 @@ static int ttm_global_init(void)
        ttm_pool_mgr_init(num_pages);
        ttm_tt_mgr_init(num_pages, num_dma32);
 
-       glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
+       glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32 |
+                                          __GFP_NOWARN);
 
+       /* Retry without GFP_DMA32 for platforms DMA32 is not available */
        if (unlikely(glob->dummy_read_page == NULL)) {
-               ret = -ENOMEM;
-               goto out;
+               glob->dummy_read_page = alloc_page(__GFP_ZERO);
+               if (unlikely(glob->dummy_read_page == NULL)) {
+                       ret = -ENOMEM;
+                       goto out;
+               }
+               pr_warn("Using GFP_DMA32 fallback for dummy_read_page\n");
        }
 
        INIT_LIST_HEAD(&glob->device_list);
@@ -195,7 +201,7 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
                    bool use_dma_alloc, bool use_dma32)
 {
        struct ttm_global *glob = &ttm_glob;
-       int ret;
+       int ret, nid;
 
        if (WARN_ON(vma_manager == NULL))
                return -EINVAL;
@@ -215,7 +221,12 @@ int ttm_device_init(struct ttm_device *bdev, const struct ttm_device_funcs *func
 
        ttm_sys_man_init(bdev);
 
-       ttm_pool_init(&bdev->pool, dev, dev_to_node(dev), use_dma_alloc, use_dma32);
+       if (dev)
+               nid = dev_to_node(dev);
+       else
+               nid = NUMA_NO_NODE;
+
+       ttm_pool_init(&bdev->pool, dev, nid, use_dma_alloc, use_dma32);
 
        bdev->vma_manager = vma_manager;
        spin_lock_init(&bdev->lru_lock);
index fcff41dd2315b710dc9de6ccdb361922c61d2602..88f63d526b22365b42b90e90d5b451a56e3fda52 100644 (file)
@@ -147,6 +147,13 @@ v3d_job_allocate(void **container, size_t size)
        return 0;
 }
 
+static void
+v3d_job_deallocate(void **container)
+{
+       kfree(*container);
+       *container = NULL;
+}
+
 static int
 v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv,
             struct v3d_job *job, void (*free)(struct kref *ref),
@@ -273,8 +280,10 @@ v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
 
        ret = v3d_job_init(v3d, file_priv, &(*job)->base,
                           v3d_job_free, args->in_sync, se, V3D_CSD);
-       if (ret)
+       if (ret) {
+               v3d_job_deallocate((void *)job);
                return ret;
+       }
 
        ret = v3d_job_allocate((void *)clean_job, sizeof(**clean_job));
        if (ret)
@@ -282,8 +291,10 @@ v3d_setup_csd_jobs_and_bos(struct drm_file *file_priv,
 
        ret = v3d_job_init(v3d, file_priv, *clean_job,
                           v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
-       if (ret)
+       if (ret) {
+               v3d_job_deallocate((void *)clean_job);
                return ret;
+       }
 
        (*job)->args = *args;
 
@@ -860,8 +871,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 
        ret = v3d_job_init(v3d, file_priv, &render->base,
                           v3d_render_job_free, args->in_sync_rcl, &se, V3D_RENDER);
-       if (ret)
+       if (ret) {
+               v3d_job_deallocate((void *)&render);
                goto fail;
+       }
 
        render->start = args->rcl_start;
        render->end = args->rcl_end;
@@ -874,8 +887,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 
                ret = v3d_job_init(v3d, file_priv, &bin->base,
                                   v3d_job_free, args->in_sync_bcl, &se, V3D_BIN);
-               if (ret)
+               if (ret) {
+                       v3d_job_deallocate((void *)&bin);
                        goto fail;
+               }
 
                bin->start = args->bcl_start;
                bin->end = args->bcl_end;
@@ -892,8 +907,10 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
 
                ret = v3d_job_init(v3d, file_priv, clean_job,
                                   v3d_job_free, 0, NULL, V3D_CACHE_CLEAN);
-               if (ret)
+               if (ret) {
+                       v3d_job_deallocate((void *)&clean_job);
                        goto fail;
+               }
 
                last_job = clean_job;
        } else {
@@ -1015,8 +1032,10 @@ v3d_submit_tfu_ioctl(struct drm_device *dev, void *data,
 
        ret = v3d_job_init(v3d, file_priv, &job->base,
                           v3d_job_free, args->in_sync, &se, V3D_TFU);
-       if (ret)
+       if (ret) {
+               v3d_job_deallocate((void *)&job);
                goto fail;
+       }
 
        job->base.bo = kcalloc(ARRAY_SIZE(args->bo_handles),
                               sizeof(*job->base.bo), GFP_KERNEL);
@@ -1233,8 +1252,10 @@ v3d_submit_cpu_ioctl(struct drm_device *dev, void *data,
 
        ret = v3d_job_init(v3d, file_priv, &cpu_job->base,
                           v3d_job_free, 0, &se, V3D_CPU);
-       if (ret)
+       if (ret) {
+               v3d_job_deallocate((void *)&cpu_job);
                goto fail;
+       }
 
        clean_job = cpu_job->indirect_csd.clean_job;
        csd_job = cpu_job->indirect_csd.job;
index 5f19550cc845360ada430477180405fe66bf84b9..68d9f6116bdfc3522ee5d6d94ef2bb763ec81090 100644 (file)
@@ -35,12 +35,10 @@ static inline int i915_gem_object_read_from_page(struct xe_bo *bo,
                                          u32 ofs, u64 *ptr, u32 size)
 {
        struct ttm_bo_kmap_obj map;
-       void *virtual;
+       void *src;
        bool is_iomem;
        int ret;
 
-       XE_WARN_ON(size != 8);
-
        ret = xe_bo_lock(bo, true);
        if (ret)
                return ret;
@@ -50,11 +48,12 @@ static inline int i915_gem_object_read_from_page(struct xe_bo *bo,
                goto out_unlock;
 
        ofs &= ~PAGE_MASK;
-       virtual = ttm_kmap_obj_virtual(&map, &is_iomem);
+       src = ttm_kmap_obj_virtual(&map, &is_iomem);
+       src += ofs;
        if (is_iomem)
-               *ptr = readq((void __iomem *)(virtual + ofs));
+               memcpy_fromio(ptr, (void __iomem *)src, size);
        else
-               *ptr = *(u64 *)(virtual + ofs);
+               memcpy(ptr, src, size);
 
        ttm_bo_kunmap(&map);
 out_unlock:
index a53c22a1958247cbd703264aeb49195620b6754a..b4715b78ef3bf952bacd5ed7e1739c2fe0cfa813 100644 (file)
@@ -74,9 +74,6 @@ static const struct platform_test_case cases[] = {
        SUBPLATFORM_CASE(DG2, G11, B1),
        SUBPLATFORM_CASE(DG2, G12, A0),
        SUBPLATFORM_CASE(DG2, G12, A1),
-       PLATFORM_CASE(PVC, B0),
-       PLATFORM_CASE(PVC, B1),
-       PLATFORM_CASE(PVC, C0),
        GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0),
        GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0),
        GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0),
index b8d8da5466708c6903ccb3ade3852a7ac9911235..1f0b4b9ce84f585ea599ccaf7f4641c3d139121f 100644 (file)
@@ -613,7 +613,7 @@ void xe_device_wmb(struct xe_device *xe)
 u32 xe_device_ccs_bytes(struct xe_device *xe, u64 size)
 {
        return xe_device_has_flat_ccs(xe) ?
-               DIV_ROUND_UP(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
+               DIV_ROUND_UP_ULL(size, NUM_BYTES_PER_CCS_BYTE(xe)) : 0;
 }
 
 bool xe_device_mem_access_ongoing(struct xe_device *xe)
index 64ed303728fda98d2c4edb2a4a0e1f0b810812d2..da2627ed6ae7a94114ec4e1d0aa6f04495103540 100644 (file)
@@ -175,7 +175,7 @@ static int xe_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
        return 0;
 }
 
-const struct dma_buf_ops xe_dmabuf_ops = {
+static const struct dma_buf_ops xe_dmabuf_ops = {
        .attach = xe_dma_buf_attach,
        .detach = xe_dma_buf_detach,
        .pin = xe_dma_buf_pin,
index 6ef2aa1eae8b095e958e74fda4b5c42e205436bf..174ed2185481e32d568551e62f1839181a1771d4 100644 (file)
@@ -419,7 +419,7 @@ static int xe_hwmon_pcode_read_i1(struct xe_gt *gt, u32 *uval)
 
        return xe_pcode_read(gt, PCODE_MBOX(PCODE_POWER_SETUP,
                             POWER_SETUP_SUBCOMMAND_READ_I1, 0),
-                            uval, 0);
+                            uval, NULL);
 }
 
 static int xe_hwmon_pcode_write_i1(struct xe_gt *gt, u32 uval)
index e05e9e7282b68abdcab839a9134efd09e60750f2..5c6c5462425217c9301560a626d67ec7386bd418 100644 (file)
@@ -472,7 +472,7 @@ static void emit_pte(struct xe_migrate *m,
        /* Indirect access needs compression enabled uncached PAT index */
        if (GRAPHICS_VERx100(xe) >= 2000)
                pat_index = is_comp_pte ? xe->pat.idx[XE_CACHE_NONE_COMPRESSION] :
-                                         xe->pat.idx[XE_CACHE_NONE];
+                                         xe->pat.idx[XE_CACHE_WB];
        else
                pat_index = xe->pat.idx[XE_CACHE_WB];
 
@@ -760,14 +760,14 @@ struct dma_fence *xe_migrate_copy(struct xe_migrate *m,
                if (src_is_vram && xe_migrate_allow_identity(src_L0, &src_it))
                        xe_res_next(&src_it, src_L0);
                else
-                       emit_pte(m, bb, src_L0_pt, src_is_vram, true, &src_it, src_L0,
-                                src);
+                       emit_pte(m, bb, src_L0_pt, src_is_vram, copy_system_ccs,
+                                &src_it, src_L0, src);
 
                if (dst_is_vram && xe_migrate_allow_identity(src_L0, &dst_it))
                        xe_res_next(&dst_it, src_L0);
                else
-                       emit_pte(m, bb, dst_L0_pt, dst_is_vram, true, &dst_it, src_L0,
-                                dst);
+                       emit_pte(m, bb, dst_L0_pt, dst_is_vram, copy_system_ccs,
+                                &dst_it, src_L0, dst);
 
                if (copy_system_ccs)
                        emit_pte(m, bb, ccs_pt, false, false, &ccs_it, ccs_size, src);
@@ -1009,8 +1009,8 @@ struct dma_fence *xe_migrate_clear(struct xe_migrate *m,
                if (clear_vram && xe_migrate_allow_identity(clear_L0, &src_it))
                        xe_res_next(&src_it, clear_L0);
                else
-                       emit_pte(m, bb, clear_L0_pt, clear_vram, true, &src_it, clear_L0,
-                                dst);
+                       emit_pte(m, bb, clear_L0_pt, clear_vram, clear_system_ccs,
+                                &src_it, clear_L0, dst);
 
                bb->cs[bb->len++] = MI_BATCH_BUFFER_END;
                update_idx = bb->len;
index c8c5d74b6e9041ec53c38ba81d184b83037427ab..5f6b53ea5528b2c904ce0c4ee30e39c4a16139b7 100644 (file)
@@ -272,8 +272,8 @@ int xe_mmio_probe_vram(struct xe_device *xe)
                drm_info(&xe->drm, "VRAM[%u, %u]: Actual physical size %pa, usable size exclude stolen %pa, CPU accessible size %pa\n", id,
                         tile->id, &tile->mem.vram.actual_physical_size, &tile->mem.vram.usable_size, &tile->mem.vram.io_size);
                drm_info(&xe->drm, "VRAM[%u, %u]: DPA range: [%pa-%llx], io range: [%pa-%llx]\n", id, tile->id,
-                        &tile->mem.vram.dpa_base, tile->mem.vram.dpa_base + tile->mem.vram.actual_physical_size,
-                        &tile->mem.vram.io_start, tile->mem.vram.io_start + tile->mem.vram.io_size);
+                        &tile->mem.vram.dpa_base, tile->mem.vram.dpa_base + (u64)tile->mem.vram.actual_physical_size,
+                        &tile->mem.vram.io_start, tile->mem.vram.io_start + (u64)tile->mem.vram.io_size);
 
                /* calculate total size using tile size to get the correct HW sizing */
                total_size += tile_size;
index 10b6995fbf294690a36234dc3c36bf741245b7a9..53833ab81424ceeca8edd95e15a2c6a34e681f6c 100644 (file)
@@ -1855,10 +1855,8 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
        mutex_lock(&xef->vm.lock);
        err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL);
        mutex_unlock(&xef->vm.lock);
-       if (err) {
-               xe_vm_close_and_put(vm);
-               return err;
-       }
+       if (err)
+               goto err_close_and_put;
 
        if (xe->info.has_asid) {
                mutex_lock(&xe->usm.lock);
@@ -1866,11 +1864,9 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
                                      XA_LIMIT(1, XE_MAX_ASID - 1),
                                      &xe->usm.next_asid, GFP_KERNEL);
                mutex_unlock(&xe->usm.lock);
-               if (err < 0) {
-                       xe_vm_close_and_put(vm);
-                       return err;
-               }
-               err = 0;
+               if (err < 0)
+                       goto err_free_id;
+
                vm->usm.asid = asid;
        }
 
@@ -1888,6 +1884,15 @@ int xe_vm_create_ioctl(struct drm_device *dev, void *data,
 #endif
 
        return 0;
+
+err_free_id:
+       mutex_lock(&xef->vm.lock);
+       xa_erase(&xef->vm.xa, id);
+       mutex_unlock(&xef->vm.lock);
+err_close_and_put:
+       xe_vm_close_and_put(vm);
+
+       return err;
 }
 
 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data,
index 24f0d799fd98ed318f2f1d2fc7b682d5ebf77e4c..286f8b16c7bde7fbc0bca0705d470748d9f5eeb5 100644 (file)
@@ -2262,7 +2262,7 @@ static void fix_read_error(struct r1conf *conf, struct r1bio *r1_bio)
        int sectors = r1_bio->sectors;
        int read_disk = r1_bio->read_disk;
        struct mddev *mddev = conf->mddev;
-       struct md_rdev *rdev = rcu_dereference(conf->mirrors[read_disk].rdev);
+       struct md_rdev *rdev = conf->mirrors[read_disk].rdev;
 
        if (exceed_read_errors(mddev, rdev)) {
                r1_bio->bios[r1_bio->read_disk] = IO_BLOCKED;
index 41a832dd1426bae695dc90b385976b2bf4b7a304..b6bf8f232f4880ffcd1f7ff0bd00ddb0bbebccb9 100644 (file)
@@ -989,7 +989,7 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory,
        bool no_previous_buffers = !q_num_bufs;
        int ret = 0;
 
-       if (q->num_buffers == q->max_num_buffers) {
+       if (q_num_bufs == q->max_num_buffers) {
                dprintk(q, 1, "maximum number of buffers already allocated\n");
                return -ENOBUFS;
        }
index 54d572c3b515d67722c4dbe7490437bc83c30b96..c575198e83547ab99719eca7e81dc6e7c0e601d4 100644 (file)
@@ -671,8 +671,20 @@ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
 }
 EXPORT_SYMBOL(vb2_querybuf);
 
-static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
+static void vb2_set_flags_and_caps(struct vb2_queue *q, u32 memory,
+                                  u32 *flags, u32 *caps, u32 *max_num_bufs)
 {
+       if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
+               /*
+                * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
+                * but in order to avoid bugs we zero out all bits.
+                */
+               *flags = 0;
+       } else {
+               /* Clear all unknown flags. */
+               *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
+       }
+
        *caps = V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS;
        if (q->io_modes & VB2_MMAP)
                *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP;
@@ -686,21 +698,9 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps)
                *caps |= V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS;
        if (q->supports_requests)
                *caps |= V4L2_BUF_CAP_SUPPORTS_REQUESTS;
-}
-
-static void validate_memory_flags(struct vb2_queue *q,
-                                 int memory,
-                                 u32 *flags)
-{
-       if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) {
-               /*
-                * This needs to clear V4L2_MEMORY_FLAG_NON_COHERENT only,
-                * but in order to avoid bugs we zero out all bits.
-                */
-               *flags = 0;
-       } else {
-               /* Clear all unknown flags. */
-               *flags &= V4L2_MEMORY_FLAG_NON_COHERENT;
+       if (max_num_bufs) {
+               *max_num_bufs = q->max_num_buffers;
+               *caps |= V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS;
        }
 }
 
@@ -709,8 +709,8 @@ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
        int ret = vb2_verify_memory_type(q, req->memory, req->type);
        u32 flags = req->flags;
 
-       fill_buf_caps(q, &req->capabilities);
-       validate_memory_flags(q, req->memory, &flags);
+       vb2_set_flags_and_caps(q, req->memory, &flags,
+                              &req->capabilities, NULL);
        req->flags = flags;
        return ret ? ret : vb2_core_reqbufs(q, req->memory,
                                            req->flags, &req->count);
@@ -751,11 +751,9 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
        int ret = vb2_verify_memory_type(q, create->memory, f->type);
        unsigned i;
 
-       fill_buf_caps(q, &create->capabilities);
-       validate_memory_flags(q, create->memory, &create->flags);
        create->index = vb2_get_num_buffers(q);
-       create->max_num_buffers = q->max_num_buffers;
-       create->capabilities |= V4L2_BUF_CAP_SUPPORTS_MAX_NUM_BUFFERS;
+       vb2_set_flags_and_caps(q, create->memory, &create->flags,
+                              &create->capabilities, &create->max_num_buffers);
        if (create->count == 0)
                return ret != -EBUSY ? ret : 0;
 
@@ -1006,8 +1004,8 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv,
        int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type);
        u32 flags = p->flags;
 
-       fill_buf_caps(vdev->queue, &p->capabilities);
-       validate_memory_flags(vdev->queue, p->memory, &flags);
+       vb2_set_flags_and_caps(vdev->queue, p->memory, &flags,
+                              &p->capabilities, NULL);
        p->flags = flags;
        if (res)
                return res;
@@ -1026,12 +1024,11 @@ int vb2_ioctl_create_bufs(struct file *file, void *priv,
                          struct v4l2_create_buffers *p)
 {
        struct video_device *vdev = video_devdata(file);
-       int res = vb2_verify_memory_type(vdev->queue, p->memory,
-                       p->format.type);
+       int res = vb2_verify_memory_type(vdev->queue, p->memory, p->format.type);
 
-       p->index = vdev->queue->num_buffers;
-       fill_buf_caps(vdev->queue, &p->capabilities);
-       validate_memory_flags(vdev->queue, p->memory, &p->flags);
+       p->index = vb2_get_num_buffers(vdev->queue);
+       vb2_set_flags_and_caps(vdev->queue, p->memory, &p->flags,
+                              &p->capabilities, &p->max_num_buffers);
        /*
         * If count == 0, then just check if memory and type are valid.
         * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0.
index bfe4caa79cc9800f7b01839fbbb768c73010a72e..0d90b5820bef7286694129ec0c4ed4f436d399b2 100644 (file)
@@ -272,7 +272,7 @@ static const struct wave5_match_data ti_wave521c_data = {
 };
 
 static const struct of_device_id wave5_dt_ids[] = {
-       { .compatible = "ti,k3-j721s2-wave521c", .data = &ti_wave521c_data },
+       { .compatible = "ti,j721s2-wave521c", .data = &ti_wave521c_data },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, wave5_dt_ids);
index 0e0aa40168588fff69ff76fc9fa0b3b442319f58..c5636245f1cad225bbd1a638a0bea855db566235 100644 (file)
@@ -100,4 +100,5 @@ static void __exit ns8390_module_exit(void)
 module_init(ns8390_module_init);
 module_exit(ns8390_module_exit);
 #endif /* MODULE */
+MODULE_DESCRIPTION("National Semiconductor 8390 core driver");
 MODULE_LICENSE("GPL");
index 6834742057b3eb041065793057b86d31aba9d2c1..6d429b11e9c6aa5ce0a1ea3a6c3925a3672dd2bc 100644 (file)
@@ -102,4 +102,5 @@ static void __exit NS8390p_cleanup_module(void)
 
 module_init(NS8390p_init_module);
 module_exit(NS8390p_cleanup_module);
+MODULE_DESCRIPTION("National Semiconductor 8390 core for ISA driver");
 MODULE_LICENSE("GPL");
index a09f383dd249f1e1782d20de475bdac76a90c54d..828edca8d30c59dec13c8a764fe0f2ed39ceb8ea 100644 (file)
@@ -610,4 +610,5 @@ static int init_pcmcia(void)
        return 1;
 }
 
+MODULE_DESCRIPTION("National Semiconductor 8390 Amiga PCMCIA ethernet driver");
 MODULE_LICENSE("GPL");
index 24f49a8ff903ff3ae8496074c19df0235b7965cf..fd9dcdc356e681b4eba0698d7f61bbd0196fcb8f 100644 (file)
@@ -270,4 +270,5 @@ static void __exit hydra_cleanup_module(void)
 module_init(hydra_init_module);
 module_exit(hydra_cleanup_module);
 
+MODULE_DESCRIPTION("Zorro-II Hydra 8390 ethernet driver");
 MODULE_LICENSE("GPL");
index 265976e3b64ab227c55924bd8e0581b24b506d42..6cc0e190aa79c129ca65becb4699a5c55e034340 100644 (file)
@@ -296,4 +296,5 @@ static void __exit stnic_cleanup(void)
 
 module_init(stnic_probe);
 module_exit(stnic_cleanup);
+MODULE_DESCRIPTION("National Semiconductor DP83902AV ethernet driver");
 MODULE_LICENSE("GPL");
index d70390e9d03d9bfe421554ef9e50ce78123bddfe..c24dd4fe7a10666a25b53fc5246280c9891b10ac 100644 (file)
@@ -443,4 +443,5 @@ static void __exit zorro8390_cleanup_module(void)
 module_init(zorro8390_init_module);
 module_exit(zorro8390_cleanup_module);
 
+MODULE_DESCRIPTION("Zorro NS8390-based ethernet driver");
 MODULE_LICENSE("GPL");
index 3e7c8671cd116485252414e3dc3235d80054495d..72df1bb101728872fb8ac4b4905657ce0a67096b 100644 (file)
@@ -793,5 +793,6 @@ static struct platform_driver bcm4908_enet_driver = {
 };
 module_platform_driver(bcm4908_enet_driver);
 
+MODULE_DESCRIPTION("Broadcom BCM4908 Gigabit Ethernet driver");
 MODULE_LICENSE("GPL v2");
 MODULE_DEVICE_TABLE(of, bcm4908_enet_of_match);
index 9b83d536169940ea7f2b861c44fe256faed305aa..50b8e97a811d205fb9ed57c37a74dad701607552 100644 (file)
@@ -260,4 +260,5 @@ void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
 EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
 
 MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA MDIO helpers");
 MODULE_LICENSE("GPL");
index 6e4f36aaf5db6a6f869141e3c87bac4891cc7e50..36f9bad28e6a90da7456a8ec3e40a38038351c84 100644 (file)
@@ -362,4 +362,5 @@ module_init(bgmac_init)
 module_exit(bgmac_exit)
 
 MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit BCMA interface driver");
 MODULE_LICENSE("GPL");
index 0b21fd5bd4575e7a76d52e704051d3e6d30c0f72..77425c7a32dbf882672c9e664b2b006d9bc1e5f9 100644 (file)
@@ -298,4 +298,5 @@ static struct platform_driver bgmac_enet_driver = {
 };
 
 module_platform_driver(bgmac_enet_driver);
+MODULE_DESCRIPTION("Broadcom iProc GBit platform interface driver");
 MODULE_LICENSE("GPL");
index 448a1b90de5ebcf6de79a6b749f5bf279875d0e3..6ffdc42294074f86b08e92accb705e7e58409967 100644 (file)
@@ -1626,4 +1626,5 @@ int bgmac_enet_resume(struct bgmac *bgmac)
 EXPORT_SYMBOL_GPL(bgmac_enet_resume);
 
 MODULE_AUTHOR("Rafał Miłecki");
+MODULE_DESCRIPTION("Broadcom iProc GBit driver");
 MODULE_LICENSE("GPL");
index 0aacd3c6ed5c0bbf2e02f1ddddc5dc292ae84a07..39845d556bafc949cdf4e599007db26c11b414ac 100644 (file)
@@ -3817,7 +3817,7 @@ static int bnxt_alloc_cp_rings(struct bnxt *bp)
 {
        bool sh = !!(bp->flags & BNXT_FLAG_SHARED_RINGS);
        int i, j, rc, ulp_base_vec, ulp_msix;
-       int tcs = netdev_get_num_tc(bp->dev);
+       int tcs = bp->num_tc;
 
        if (!tcs)
                tcs = 1;
@@ -5935,8 +5935,12 @@ static u16 bnxt_get_max_rss_ring(struct bnxt *bp)
 
 int bnxt_get_nr_rss_ctxs(struct bnxt *bp, int rx_rings)
 {
-       if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
-               return DIV_ROUND_UP(rx_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+       if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+               if (!rx_rings)
+                       return 0;
+               return bnxt_calc_nr_ring_pages(rx_rings - 1,
+                                              BNXT_RSS_TABLE_ENTRIES_P5);
+       }
        if (BNXT_CHIP_TYPE_NITRO_A0(bp))
                return 2;
        return 1;
@@ -6926,7 +6930,7 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
                        if (cp < (rx + tx)) {
                                rc = __bnxt_trim_rings(bp, &rx, &tx, cp, false);
                                if (rc)
-                                       return rc;
+                                       goto get_rings_exit;
                                if (bp->flags & BNXT_FLAG_AGG_RINGS)
                                        rx <<= 1;
                                hw_resc->resv_rx_rings = rx;
@@ -6938,8 +6942,9 @@ static int bnxt_hwrm_get_rings(struct bnxt *bp)
                hw_resc->resv_cp_rings = cp;
                hw_resc->resv_stat_ctxs = stats;
        }
+get_rings_exit:
        hwrm_req_drop(bp, req);
-       return 0;
+       return rc;
 }
 
 int __bnxt_hwrm_get_tx_rings(struct bnxt *bp, u16 fid, int *tx_rings)
@@ -7000,10 +7005,11 @@ __bnxt_hwrm_reserve_pf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
 
                req->num_rx_rings = cpu_to_le16(rx_rings);
                if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+                       u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+
                        req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
                        req->num_msix = cpu_to_le16(cp_rings);
-                       req->num_rsscos_ctxs =
-                               cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+                       req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
                } else {
                        req->num_cmpl_rings = cpu_to_le16(cp_rings);
                        req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -7050,8 +7056,10 @@ __bnxt_hwrm_reserve_vf_rings(struct bnxt *bp, int tx_rings, int rx_rings,
        req->num_tx_rings = cpu_to_le16(tx_rings);
        req->num_rx_rings = cpu_to_le16(rx_rings);
        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS) {
+               u16 rss_ctx = bnxt_get_nr_rss_ctxs(bp, ring_grps);
+
                req->num_cmpl_rings = cpu_to_le16(tx_rings + ring_grps);
-               req->num_rsscos_ctxs = cpu_to_le16(DIV_ROUND_UP(ring_grps, 64));
+               req->num_rsscos_ctxs = cpu_to_le16(rss_ctx);
        } else {
                req->num_cmpl_rings = cpu_to_le16(cp_rings);
                req->num_hw_ring_grps = cpu_to_le16(ring_grps);
@@ -9938,7 +9946,7 @@ static int __bnxt_num_tx_to_cp(struct bnxt *bp, int tx, int tx_sets, int tx_xdp)
 
 int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
 {
-       int tcs = netdev_get_num_tc(bp->dev);
+       int tcs = bp->num_tc;
 
        if (!tcs)
                tcs = 1;
@@ -9947,7 +9955,7 @@ int bnxt_num_tx_to_cp(struct bnxt *bp, int tx)
 
 static int bnxt_num_cp_to_tx(struct bnxt *bp, int tx_cp)
 {
-       int tcs = netdev_get_num_tc(bp->dev);
+       int tcs = bp->num_tc;
 
        return (tx_cp - bp->tx_nr_rings_xdp) * tcs +
               bp->tx_nr_rings_xdp;
@@ -9977,7 +9985,7 @@ static void bnxt_setup_msix(struct bnxt *bp)
        struct net_device *dev = bp->dev;
        int tcs, i;
 
-       tcs = netdev_get_num_tc(dev);
+       tcs = bp->num_tc;
        if (tcs) {
                int i, off, count;
 
@@ -10009,8 +10017,10 @@ static void bnxt_setup_inta(struct bnxt *bp)
 {
        const int len = sizeof(bp->irq_tbl[0].name);
 
-       if (netdev_get_num_tc(bp->dev))
+       if (bp->num_tc) {
                netdev_reset_tc(bp->dev);
+               bp->num_tc = 0;
+       }
 
        snprintf(bp->irq_tbl[0].name, len, "%s-%s-%d", bp->dev->name, "TxRx",
                 0);
@@ -10236,8 +10246,8 @@ static void bnxt_clear_int_mode(struct bnxt *bp)
 
 int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
 {
-       int tcs = netdev_get_num_tc(bp->dev);
        bool irq_cleared = false;
+       int tcs = bp->num_tc;
        int rc;
 
        if (!bnxt_need_reserve_rings(bp))
@@ -10263,6 +10273,7 @@ int bnxt_reserve_rings(struct bnxt *bp, bool irq_re_init)
                    bp->tx_nr_rings - bp->tx_nr_rings_xdp)) {
                netdev_err(bp->dev, "tx ring reservation failure\n");
                netdev_reset_tc(bp->dev);
+               bp->num_tc = 0;
                if (bp->tx_nr_rings_xdp)
                        bp->tx_nr_rings_per_tc = bp->tx_nr_rings_xdp;
                else
@@ -11564,10 +11575,12 @@ int bnxt_half_open_nic(struct bnxt *bp)
                netdev_err(bp->dev, "bnxt_alloc_mem err: %x\n", rc);
                goto half_open_err;
        }
+       bnxt_init_napi(bp);
        set_bit(BNXT_STATE_HALF_OPEN, &bp->state);
        rc = bnxt_init_nic(bp, true);
        if (rc) {
                clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
+               bnxt_del_napi(bp);
                netdev_err(bp->dev, "bnxt_init_nic err: %x\n", rc);
                goto half_open_err;
        }
@@ -11586,6 +11599,7 @@ half_open_err:
 void bnxt_half_close_nic(struct bnxt *bp)
 {
        bnxt_hwrm_resource_free(bp, false, true);
+       bnxt_del_napi(bp);
        bnxt_free_skbs(bp);
        bnxt_free_mem(bp, true);
        clear_bit(BNXT_STATE_HALF_OPEN, &bp->state);
@@ -13232,6 +13246,11 @@ static int bnxt_fw_init_one_p1(struct bnxt *bp)
 
        bp->fw_cap = 0;
        rc = bnxt_hwrm_ver_get(bp);
+       /* FW may be unresponsive after FLR. FLR must complete within 100 msec
+        * so wait before continuing with recovery.
+        */
+       if (rc)
+               msleep(100);
        bnxt_try_map_fw_health_reg(bp);
        if (rc) {
                rc = bnxt_try_recover_fw(bp);
@@ -13784,7 +13803,7 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
                return -EINVAL;
        }
 
-       if (netdev_get_num_tc(dev) == tc)
+       if (bp->num_tc == tc)
                return 0;
 
        if (bp->flags & BNXT_FLAG_SHARED_RINGS)
@@ -13802,9 +13821,11 @@ int bnxt_setup_mq_tc(struct net_device *dev, u8 tc)
        if (tc) {
                bp->tx_nr_rings = bp->tx_nr_rings_per_tc * tc;
                netdev_set_num_tc(dev, tc);
+               bp->num_tc = tc;
        } else {
                bp->tx_nr_rings = bp->tx_nr_rings_per_tc;
                netdev_reset_tc(dev);
+               bp->num_tc = 0;
        }
        bp->tx_nr_rings += bp->tx_nr_rings_xdp;
        tx_cp = bnxt_num_tx_to_cp(bp, bp->tx_nr_rings);
index b8ef1717cb65fb128b6a60d8148829f3c2b6af36..47338b48ca203d2ebea4c72b433690a0afde3c58 100644 (file)
@@ -2225,6 +2225,7 @@ struct bnxt {
        u8                      tc_to_qidx[BNXT_MAX_QUEUE];
        u8                      q_ids[BNXT_MAX_QUEUE];
        u8                      max_q;
+       u8                      num_tc;
 
        unsigned int            current_interval;
 #define BNXT_TIMER_INTERVAL    HZ
index 63e0670383852af5b6ab4a7c28c9d25e0ea64a1b..0dbb880a7aa0e721f98e42d17f845596a702abc4 100644 (file)
@@ -228,7 +228,7 @@ static int bnxt_queue_remap(struct bnxt *bp, unsigned int lltc_mask)
                }
        }
        if (bp->ieee_ets) {
-               int tc = netdev_get_num_tc(bp->dev);
+               int tc = bp->num_tc;
 
                if (!tc)
                        tc = 1;
index 27b983c0a8a9cdfb3f928fdc026ce7480307ff31..dc4ca706b0e299d9df7da1b2edba240becd68878 100644 (file)
@@ -884,7 +884,7 @@ static void bnxt_get_channels(struct net_device *dev,
        if (max_tx_sch_inputs)
                max_tx_rings = min_t(int, max_tx_rings, max_tx_sch_inputs);
 
-       tcs = netdev_get_num_tc(dev);
+       tcs = bp->num_tc;
        tx_grps = max(tcs, 1);
        if (bp->tx_nr_rings_xdp)
                tx_grps++;
@@ -944,7 +944,7 @@ static int bnxt_set_channels(struct net_device *dev,
        if (channel->combined_count)
                sh = true;
 
-       tcs = netdev_get_num_tc(dev);
+       tcs = bp->num_tc;
 
        req_tx_rings = sh ? channel->combined_count : channel->tx_count;
        req_rx_rings = sh ? channel->combined_count : channel->rx_count;
@@ -1574,7 +1574,8 @@ u32 bnxt_get_rxfh_indir_size(struct net_device *dev)
        struct bnxt *bp = netdev_priv(dev);
 
        if (bp->flags & BNXT_FLAG_CHIP_P5_PLUS)
-               return ALIGN(bp->rx_nr_rings, BNXT_RSS_TABLE_ENTRIES_P5);
+               return bnxt_get_nr_rss_ctxs(bp, bp->rx_nr_rings) *
+                      BNXT_RSS_TABLE_ENTRIES_P5;
        return HW_HASH_INDEX_SIZE;
 }
 
index c2b25fc623ecc08410e8fc45cb391d5a555cc1d9..4079538bc310eaaeee0ee568f6fcc3fbf2b4e758 100644 (file)
@@ -407,7 +407,7 @@ static int bnxt_xdp_set(struct bnxt *bp, struct bpf_prog *prog)
        if (prog)
                tx_xdp = bp->rx_nr_rings;
 
-       tc = netdev_get_num_tc(dev);
+       tc = bp->num_tc;
        if (!tc)
                tc = 1;
        rc = bnxt_check_rings(bp, bp->tx_nr_rings_per_tc, bp->rx_nr_rings,
index 9cc6303c82ffb7f2680d3a01e4ca4c7fb227574c..f38d31bfab1bbcecafacaa4adf2e1ee67bd332b6 100644 (file)
@@ -27,6 +27,7 @@
 #include "octeon_network.h"
 
 MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Core");
 MODULE_LICENSE("GPL");
 
 /* OOM task polling interval */
index 1c2a540db13d8a6806c0de8f3e31998133f8b429..1f495cfd7959b045c8186f6e84a2cbea43eeb1f2 100644 (file)
@@ -868,5 +868,6 @@ static struct platform_driver ep93xx_eth_driver = {
 
 module_platform_driver(ep93xx_eth_driver);
 
+MODULE_DESCRIPTION("Cirrus EP93xx Ethernet driver");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:ep93xx-eth");
index df40c720e7b23d517433743efb883edb8f8d4cf4..9aeff2b37a61299587c22dc39b207204c3412543 100644 (file)
@@ -1485,7 +1485,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi,
 
                        xdp_prepare_buff(&xdp, page_address(entry->page),
                                         XDP_PACKET_HEADROOM + TSNEP_RX_INLINE_METADATA_SIZE,
-                                        length, false);
+                                        length - ETH_FCS_LEN, false);
 
                        consume = tsnep_xdp_run_prog(rx, prog, &xdp,
                                                     &xdp_status, tx_nq, tx);
@@ -1568,7 +1568,7 @@ static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi,
                prefetch(entry->xdp->data);
                length = __le32_to_cpu(entry->desc_wb->properties) &
                         TSNEP_DESC_LENGTH_MASK;
-               xsk_buff_set_size(entry->xdp, length);
+               xsk_buff_set_size(entry->xdp, length - ETH_FCS_LEN);
                xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool);
 
                /* RX metadata with timestamps is in front of actual data,
@@ -1762,6 +1762,19 @@ static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx)
                        allocated--;
                }
        }
+
+       /* set need wakeup flag immediately if ring is not filled completely,
+        * first polling would be too late as need wakeup signalisation would
+        * be delayed for an indefinite time
+        */
+       if (xsk_uses_need_wakeup(rx->xsk_pool)) {
+               int desc_available = tsnep_rx_desc_available(rx);
+
+               if (desc_available)
+                       xsk_set_rx_need_wakeup(rx->xsk_pool);
+               else
+                       xsk_clear_rx_need_wakeup(rx->xsk_pool);
+       }
 }
 
 static bool tsnep_pending(struct tsnep_queue *queue)
index 07c2b701b5fa9793d30e27892536eca28ba0504a..9ebe751c1df0758c75ee493ddaa63876807f49be 100644 (file)
@@ -661,4 +661,5 @@ static struct platform_driver nps_enet_driver = {
 module_platform_driver(nps_enet_driver);
 
 MODULE_AUTHOR("EZchip Semiconductor");
+MODULE_DESCRIPTION("EZchip NPS Ethernet driver");
 MODULE_LICENSE("GPL v2");
index cffbf27c4656b27b0694f2a1ac88ad596c6196b4..bfdbdab443ae0ddcf93dca777f134e659b5d4040 100644 (file)
@@ -3216,4 +3216,5 @@ void enetc_pci_remove(struct pci_dev *pdev)
 }
 EXPORT_SYMBOL_GPL(enetc_pci_remove);
 
+MODULE_DESCRIPTION("NXP ENETC Ethernet driver");
 MODULE_LICENSE("Dual BSD/GPL");
index d42594f322750f5ff5d4cf039b1115e5d4424f31..432523b2c789216b21440e4e6576c06ae30b674c 100644 (file)
@@ -2036,6 +2036,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
 
                /* if any of the above changed restart the FEC */
                if (status_change) {
+                       netif_stop_queue(ndev);
                        napi_disable(&fep->napi);
                        netif_tx_lock_bh(ndev);
                        fec_restart(ndev);
@@ -2045,6 +2046,7 @@ static void fec_enet_adjust_link(struct net_device *ndev)
                }
        } else {
                if (fep->link) {
+                       netif_stop_queue(ndev);
                        napi_disable(&fep->napi);
                        netif_tx_lock_bh(ndev);
                        fec_stop(ndev);
@@ -4769,4 +4771,5 @@ static struct platform_driver fec_driver = {
 
 module_platform_driver(fec_driver);
 
+MODULE_DESCRIPTION("NXP Fast Ethernet Controller (FEC) driver");
 MODULE_LICENSE("GPL");
index 70dd982a5edce68a63a8f3b94d59c9947dd0045a..026f7270a54de8bf398516b4e563f35d4825a3d1 100644 (file)
@@ -531,4 +531,5 @@ static struct platform_driver fsl_pq_mdio_driver = {
 
 module_platform_driver(fsl_pq_mdio_driver);
 
+MODULE_DESCRIPTION("Freescale PQ MDIO helpers");
 MODULE_LICENSE("GPL");
index ae8f9f135725b4de88e9d95858025ce6ef41c650..6e7fd473abfd001eb45e8b5bda8978fff9eec26b 100644 (file)
@@ -3588,40 +3588,55 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        struct i40e_hmc_obj_rxq rx_ctx;
        int err = 0;
        bool ok;
-       int ret;
 
        bitmap_zero(ring->state, __I40E_RING_STATE_NBITS);
 
        /* clear the context structure first */
        memset(&rx_ctx, 0, sizeof(rx_ctx));
 
-       if (ring->vsi->type == I40E_VSI_MAIN)
-               xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+       ring->rx_buf_len = vsi->rx_buf_len;
+
+       /* XDP RX-queue info only needed for RX rings exposed to XDP */
+       if (ring->vsi->type != I40E_VSI_MAIN)
+               goto skip;
+
+       if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+               err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+                                        ring->queue_index,
+                                        ring->q_vector->napi.napi_id,
+                                        ring->rx_buf_len);
+               if (err)
+                       return err;
+       }
 
        ring->xsk_pool = i40e_xsk_pool(ring);
        if (ring->xsk_pool) {
-               ring->rx_buf_len =
-                 xsk_pool_get_rx_frame_size(ring->xsk_pool);
-               ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+               xdp_rxq_info_unreg(&ring->xdp_rxq);
+               ring->rx_buf_len = xsk_pool_get_rx_frame_size(ring->xsk_pool);
+               err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+                                        ring->queue_index,
+                                        ring->q_vector->napi.napi_id,
+                                        ring->rx_buf_len);
+               if (err)
+                       return err;
+               err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
                                                 MEM_TYPE_XSK_BUFF_POOL,
                                                 NULL);
-               if (ret)
-                       return ret;
+               if (err)
+                       return err;
                dev_info(&vsi->back->pdev->dev,
                         "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
                         ring->queue_index);
 
        } else {
-               ring->rx_buf_len = vsi->rx_buf_len;
-               if (ring->vsi->type == I40E_VSI_MAIN) {
-                       ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
-                                                        MEM_TYPE_PAGE_SHARED,
-                                                        NULL);
-                       if (ret)
-                               return ret;
-               }
+               err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+                                                MEM_TYPE_PAGE_SHARED,
+                                                NULL);
+               if (err)
+                       return err;
        }
 
+skip:
        xdp_init_buff(&ring->xdp, i40e_rx_pg_size(ring) / 2, &ring->xdp_rxq);
 
        rx_ctx.dbuff = DIV_ROUND_UP(ring->rx_buf_len,
index 971ba33220381b799e4900f6f2231b58bcb877e4..0d7177083708f29d3b4deba11d00abdcb017f886 100644 (file)
@@ -1548,7 +1548,6 @@ void i40e_free_rx_resources(struct i40e_ring *rx_ring)
 int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
 {
        struct device *dev = rx_ring->dev;
-       int err;
 
        u64_stats_init(&rx_ring->syncp);
 
@@ -1569,14 +1568,6 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
        rx_ring->next_to_process = 0;
        rx_ring->next_to_use = 0;
 
-       /* XDP RX-queue info only needed for RX rings exposed to XDP */
-       if (rx_ring->vsi->type == I40E_VSI_MAIN) {
-               err = xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
-                                      rx_ring->queue_index, rx_ring->q_vector->napi.napi_id);
-               if (err < 0)
-                       return err;
-       }
-
        rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
 
        rx_ring->rx_bi =
@@ -2087,7 +2078,8 @@ static void i40e_put_rx_buffer(struct i40e_ring *rx_ring,
 static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
                                  struct xdp_buff *xdp)
 {
-       u32 next = rx_ring->next_to_clean;
+       u32 nr_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+       u32 next = rx_ring->next_to_clean, i = 0;
        struct i40e_rx_buffer *rx_buffer;
 
        xdp->flags = 0;
@@ -2100,10 +2092,10 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
                if (!rx_buffer->page)
                        continue;
 
-               if (xdp_res == I40E_XDP_CONSUMED)
-                       rx_buffer->pagecnt_bias++;
-               else
+               if (xdp_res != I40E_XDP_CONSUMED)
                        i40e_rx_buffer_flip(rx_buffer, xdp->frame_sz);
+               else if (i++ <= nr_frags)
+                       rx_buffer->pagecnt_bias++;
 
                /* EOP buffer will be put in i40e_clean_rx_irq() */
                if (next == rx_ring->next_to_process)
@@ -2117,20 +2109,20 @@ static void i40e_process_rx_buffs(struct i40e_ring *rx_ring, int xdp_res,
  * i40e_construct_skb - Allocate skb and populate it
  * @rx_ring: rx descriptor ring to transact packets on
  * @xdp: xdp_buff pointing to the data
- * @nr_frags: number of buffers for the packet
  *
  * This function allocates an skb.  It then populates it with the page
  * data from the current receive descriptor, taking care to set up the
  * skb correctly.
  */
 static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
-                                         struct xdp_buff *xdp,
-                                         u32 nr_frags)
+                                         struct xdp_buff *xdp)
 {
        unsigned int size = xdp->data_end - xdp->data;
        struct i40e_rx_buffer *rx_buffer;
+       struct skb_shared_info *sinfo;
        unsigned int headlen;
        struct sk_buff *skb;
+       u32 nr_frags = 0;
 
        /* prefetch first cache line of first page */
        net_prefetch(xdp->data);
@@ -2168,6 +2160,10 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
        memcpy(__skb_put(skb, headlen), xdp->data,
               ALIGN(headlen, sizeof(long)));
 
+       if (unlikely(xdp_buff_has_frags(xdp))) {
+               sinfo = xdp_get_shared_info_from_buff(xdp);
+               nr_frags = sinfo->nr_frags;
+       }
        rx_buffer = i40e_rx_bi(rx_ring, rx_ring->next_to_clean);
        /* update all of the pointers */
        size -= headlen;
@@ -2187,9 +2183,8 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
        }
 
        if (unlikely(xdp_buff_has_frags(xdp))) {
-               struct skb_shared_info *sinfo, *skinfo = skb_shinfo(skb);
+               struct skb_shared_info *skinfo = skb_shinfo(skb);
 
-               sinfo = xdp_get_shared_info_from_buff(xdp);
                memcpy(&skinfo->frags[skinfo->nr_frags], &sinfo->frags[0],
                       sizeof(skb_frag_t) * nr_frags);
 
@@ -2212,17 +2207,17 @@ static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
  * i40e_build_skb - Build skb around an existing buffer
  * @rx_ring: Rx descriptor ring to transact packets on
  * @xdp: xdp_buff pointing to the data
- * @nr_frags: number of buffers for the packet
  *
  * This function builds an skb around an existing Rx buffer, taking care
  * to set up the skb correctly and avoid any memcpy overhead.
  */
 static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
-                                     struct xdp_buff *xdp,
-                                     u32 nr_frags)
+                                     struct xdp_buff *xdp)
 {
        unsigned int metasize = xdp->data - xdp->data_meta;
+       struct skb_shared_info *sinfo;
        struct sk_buff *skb;
+       u32 nr_frags;
 
        /* Prefetch first cache line of first page. If xdp->data_meta
         * is unused, this points exactly as xdp->data, otherwise we
@@ -2231,6 +2226,11 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
         */
        net_prefetch(xdp->data_meta);
 
+       if (unlikely(xdp_buff_has_frags(xdp))) {
+               sinfo = xdp_get_shared_info_from_buff(xdp);
+               nr_frags = sinfo->nr_frags;
+       }
+
        /* build an skb around the page buffer */
        skb = napi_build_skb(xdp->data_hard_start, xdp->frame_sz);
        if (unlikely(!skb))
@@ -2243,9 +2243,6 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
                skb_metadata_set(skb, metasize);
 
        if (unlikely(xdp_buff_has_frags(xdp))) {
-               struct skb_shared_info *sinfo;
-
-               sinfo = xdp_get_shared_info_from_buff(xdp);
                xdp_update_skb_shared_info(skb, nr_frags,
                                           sinfo->xdp_frags_size,
                                           nr_frags * xdp->frame_sz,
@@ -2589,9 +2586,9 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget,
                        total_rx_bytes += size;
                } else {
                        if (ring_uses_build_skb(rx_ring))
-                               skb = i40e_build_skb(rx_ring, xdp, nfrags);
+                               skb = i40e_build_skb(rx_ring, xdp);
                        else
-                               skb = i40e_construct_skb(rx_ring, xdp, nfrags);
+                               skb = i40e_construct_skb(rx_ring, xdp);
 
                        /* drop if we failed to retrieve a buffer */
                        if (!skb) {
index af7d5fa6cdc15552935b03e5beaaaaac856b7d3f..11500003af0d47dbfb203ea51914c2f452b42368 100644 (file)
@@ -414,7 +414,8 @@ i40e_add_xsk_frag(struct i40e_ring *rx_ring, struct xdp_buff *first,
        }
 
        __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
-                                  virt_to_page(xdp->data_hard_start), 0, size);
+                                  virt_to_page(xdp->data_hard_start),
+                                  XDP_PACKET_HEADROOM, size);
        sinfo->xdp_frags_size += size;
        xsk_buff_add_frag(xdp);
 
@@ -498,7 +499,6 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget)
                xdp_res = i40e_run_xdp_zc(rx_ring, first, xdp_prog);
                i40e_handle_xdp_result_zc(rx_ring, first, rx_desc, &rx_packets,
                                          &rx_bytes, xdp_res, &failure);
-               first->flags = 0;
                next_to_clean = next_to_process;
                if (failure)
                        break;
index 533b923cae2d078dfecdc902d4605d08b0d7391e..7ac847718882e29b38071ca6b8adb47ca063f1d7 100644 (file)
@@ -547,19 +547,27 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
        ring->rx_buf_len = ring->vsi->rx_buf_len;
 
        if (ring->vsi->type == ICE_VSI_PF) {
-               if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
-                       /* coverity[check_return] */
-                       __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
-                                          ring->q_index,
-                                          ring->q_vector->napi.napi_id,
-                                          ring->vsi->rx_buf_len);
+               if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+                       err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+                                                ring->q_index,
+                                                ring->q_vector->napi.napi_id,
+                                                ring->rx_buf_len);
+                       if (err)
+                               return err;
+               }
 
                ring->xsk_pool = ice_xsk_pool(ring);
                if (ring->xsk_pool) {
-                       xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+                       xdp_rxq_info_unreg(&ring->xdp_rxq);
 
                        ring->rx_buf_len =
                                xsk_pool_get_rx_frame_size(ring->xsk_pool);
+                       err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+                                                ring->q_index,
+                                                ring->q_vector->napi.napi_id,
+                                                ring->rx_buf_len);
+                       if (err)
+                               return err;
                        err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
                                                         MEM_TYPE_XSK_BUFF_POOL,
                                                         NULL);
@@ -571,13 +579,14 @@ int ice_vsi_cfg_rxq(struct ice_rx_ring *ring)
                        dev_info(dev, "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
                                 ring->q_index);
                } else {
-                       if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
-                               /* coverity[check_return] */
-                               __xdp_rxq_info_reg(&ring->xdp_rxq,
-                                                  ring->netdev,
-                                                  ring->q_index,
-                                                  ring->q_vector->napi.napi_id,
-                                                  ring->vsi->rx_buf_len);
+                       if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) {
+                               err = __xdp_rxq_info_reg(&ring->xdp_rxq, ring->netdev,
+                                                        ring->q_index,
+                                                        ring->q_vector->napi.napi_id,
+                                                        ring->rx_buf_len);
+                               if (err)
+                                       return err;
+                       }
 
                        err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
                                                         MEM_TYPE_PAGE_SHARED,
index 74d13cc5a3a7f1f62e6657e058548b243e2d438b..97d41d6ebf1fb69419e2cf13dae17db08fd27910 100644 (file)
@@ -513,11 +513,6 @@ int ice_setup_rx_ring(struct ice_rx_ring *rx_ring)
        if (ice_is_xdp_ena_vsi(rx_ring->vsi))
                WRITE_ONCE(rx_ring->xdp_prog, rx_ring->vsi->xdp_prog);
 
-       if (rx_ring->vsi->type == ICE_VSI_PF &&
-           !xdp_rxq_info_is_reg(&rx_ring->xdp_rxq))
-               if (xdp_rxq_info_reg(&rx_ring->xdp_rxq, rx_ring->netdev,
-                                    rx_ring->q_index, rx_ring->q_vector->napi.napi_id))
-                       goto err;
        return 0;
 
 err:
@@ -603,9 +598,7 @@ out_failure:
                ret = ICE_XDP_CONSUMED;
        }
 exit:
-       rx_buf->act = ret;
-       if (unlikely(xdp_buff_has_frags(xdp)))
-               ice_set_rx_bufs_act(xdp, rx_ring, ret);
+       ice_set_rx_bufs_act(xdp, rx_ring, ret);
 }
 
 /**
@@ -893,14 +886,17 @@ ice_add_xdp_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *xdp,
        }
 
        if (unlikely(sinfo->nr_frags == MAX_SKB_FRAGS)) {
-               if (unlikely(xdp_buff_has_frags(xdp)))
-                       ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
+               ice_set_rx_bufs_act(xdp, rx_ring, ICE_XDP_CONSUMED);
                return -ENOMEM;
        }
 
        __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++, rx_buf->page,
                                   rx_buf->page_offset, size);
        sinfo->xdp_frags_size += size;
+       /* remember frag count before XDP prog execution; bpf_xdp_adjust_tail()
+        * can pop off frags but driver has to handle it on its own
+        */
+       rx_ring->nr_frags = sinfo->nr_frags;
 
        if (page_is_pfmemalloc(rx_buf->page))
                xdp_buff_set_frag_pfmemalloc(xdp);
@@ -1251,6 +1247,7 @@ int ice_clean_rx_irq(struct ice_rx_ring *rx_ring, int budget)
 
                xdp->data = NULL;
                rx_ring->first_desc = ntc;
+               rx_ring->nr_frags = 0;
                continue;
 construct_skb:
                if (likely(ice_ring_uses_build_skb(rx_ring)))
@@ -1266,10 +1263,12 @@ construct_skb:
                                                    ICE_XDP_CONSUMED);
                        xdp->data = NULL;
                        rx_ring->first_desc = ntc;
+                       rx_ring->nr_frags = 0;
                        break;
                }
                xdp->data = NULL;
                rx_ring->first_desc = ntc;
+               rx_ring->nr_frags = 0;
 
                stat_err_bits = BIT(ICE_RX_FLEX_DESC_STATUS0_RXE_S);
                if (unlikely(ice_test_staterr(rx_desc->wb.status_error0,
index b3379ff736747887a7404c5d020b865bc10a5024..af955b0e5dc5caeb3ce6ca3f9671772763270f52 100644 (file)
@@ -358,6 +358,7 @@ struct ice_rx_ring {
        struct ice_tx_ring *xdp_ring;
        struct ice_rx_ring *next;       /* pointer to next ring in q_vector */
        struct xsk_buff_pool *xsk_pool;
+       u32 nr_frags;
        dma_addr_t dma;                 /* physical address of ring */
        u16 rx_buf_len;
        u8 dcb_tc;                      /* Traffic class of ring */
index 762047508619603028cac48e5e091d4a584084c2..afcead4baef4b1552bdd152ee5414c8127b0b992 100644 (file)
  * act: action to store onto Rx buffers related to XDP buffer parts
  *
  * Set action that should be taken before putting Rx buffer from first frag
- * to one before last. Last one is handled by caller of this function as it
- * is the EOP frag that is currently being processed. This function is
- * supposed to be called only when XDP buffer contains frags.
+ * to the last.
  */
 static inline void
 ice_set_rx_bufs_act(struct xdp_buff *xdp, const struct ice_rx_ring *rx_ring,
                    const unsigned int act)
 {
-       const struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
-       u32 first = rx_ring->first_desc;
-       u32 nr_frags = sinfo->nr_frags;
+       u32 sinfo_frags = xdp_get_shared_info_from_buff(xdp)->nr_frags;
+       u32 nr_frags = rx_ring->nr_frags + 1;
+       u32 idx = rx_ring->first_desc;
        u32 cnt = rx_ring->count;
        struct ice_rx_buf *buf;
 
        for (int i = 0; i < nr_frags; i++) {
-               buf = &rx_ring->rx_buf[first];
+               buf = &rx_ring->rx_buf[idx];
                buf->act = act;
 
-               if (++first == cnt)
-                       first = 0;
+               if (++idx == cnt)
+                       idx = 0;
+       }
+
+       /* adjust pagecnt_bias on frags freed by XDP prog */
+       if (sinfo_frags < rx_ring->nr_frags && act == ICE_XDP_CONSUMED) {
+               u32 delta = rx_ring->nr_frags - sinfo_frags;
+
+               while (delta) {
+                       if (idx == 0)
+                               idx = cnt - 1;
+                       else
+                               idx--;
+                       buf = &rx_ring->rx_buf[idx];
+                       buf->pagecnt_bias--;
+                       delta--;
+               }
        }
 }
 
index 5d1ae8e4058a4ae43bb0fb2be98070cf1f2e9559..8b81a16770459373026f2436099c0280e29f9022 100644 (file)
@@ -825,7 +825,8 @@ ice_add_xsk_frag(struct ice_rx_ring *rx_ring, struct xdp_buff *first,
        }
 
        __skb_fill_page_desc_noacc(sinfo, sinfo->nr_frags++,
-                                  virt_to_page(xdp->data_hard_start), 0, size);
+                                  virt_to_page(xdp->data_hard_start),
+                                  XDP_PACKET_HEADROOM, size);
        sinfo->xdp_frags_size += size;
        xsk_buff_add_frag(xdp);
 
@@ -895,7 +896,6 @@ int ice_clean_rx_irq_zc(struct ice_rx_ring *rx_ring, int budget)
 
                if (!first) {
                        first = xdp;
-                       xdp_buff_clear_frags_flag(first);
                } else if (ice_add_xsk_frag(rx_ring, first, xdp, size)) {
                        break;
                }
index 5fea2fd957eb3563ac839b0cc966cde9de40a2ba..58179bd733ff05bf5d31cc0b6e4855d075fcae8d 100644 (file)
@@ -783,6 +783,8 @@ static int idpf_cfg_netdev(struct idpf_vport *vport)
        /* setup watchdog timeout value to be 5 second */
        netdev->watchdog_timeo = 5 * HZ;
 
+       netdev->dev_port = idx;
+
        /* configure default MTU size */
        netdev->min_mtu = ETH_MIN_MTU;
        netdev->max_mtu = vport->max_mtu;
index 5182fe737c3727629fd4a24d516f2459126ddf2b..ff54fbe41bccc89ce954eadb770ceb5786b719ac 100644 (file)
@@ -318,4 +318,5 @@ static struct platform_driver liteeth_driver = {
 module_platform_driver(liteeth_driver);
 
 MODULE_AUTHOR("Joel Stanley <joel@jms.id.au>");
+MODULE_DESCRIPTION("LiteX Liteeth Ethernet driver");
 MODULE_LICENSE("GPL");
index 820b1fabe297a209dd2620092115a01361c755fd..23adf53c2aa1c08086bff5758a99673e023c7de4 100644 (file)
@@ -614,12 +614,38 @@ static void mvpp23_bm_set_8pool_mode(struct mvpp2 *priv)
        mvpp2_write(priv, MVPP22_BM_POOL_BASE_ADDR_HIGH_REG, val);
 }
 
+/* Cleanup pool before actual initialization in the OS */
+static void mvpp2_bm_pool_cleanup(struct mvpp2 *priv, int pool_id)
+{
+       unsigned int thread = mvpp2_cpu_to_thread(priv, get_cpu());
+       u32 val;
+       int i;
+
+       /* Drain the BM from all possible residues left by firmware */
+       for (i = 0; i < MVPP2_BM_POOL_SIZE_MAX; i++)
+               mvpp2_thread_read(priv, thread, MVPP2_BM_PHY_ALLOC_REG(pool_id));
+
+       put_cpu();
+
+       /* Stop the BM pool */
+       val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(pool_id));
+       val |= MVPP2_BM_STOP_MASK;
+       mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(pool_id), val);
+}
+
 static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
 {
        enum dma_data_direction dma_dir = DMA_FROM_DEVICE;
        int i, err, poolnum = MVPP2_BM_POOLS_NUM;
        struct mvpp2_port *port;
 
+       if (priv->percpu_pools)
+               poolnum = mvpp2_get_nrxqs(priv) * 2;
+
+       /* Clean up the pool state in case it contains stale state */
+       for (i = 0; i < poolnum; i++)
+               mvpp2_bm_pool_cleanup(priv, i);
+
        if (priv->percpu_pools) {
                for (i = 0; i < priv->port_count; i++) {
                        port = priv->port_list[i];
@@ -629,7 +655,6 @@ static int mvpp2_bm_init(struct device *dev, struct mvpp2 *priv)
                        }
                }
 
-               poolnum = mvpp2_get_nrxqs(priv) * 2;
                for (i = 0; i < poolnum; i++) {
                        /* the pool in use */
                        int pn = i / (poolnum / 2);
index 9690ac01f02c8db9b9ed2e524b05aa124b077c7a..b92264d0a77e71075495f5cc0e02c330e3c279bd 100644 (file)
@@ -413,4 +413,5 @@ const char *otx2_mbox_id2name(u16 id)
 EXPORT_SYMBOL(otx2_mbox_id2name);
 
 MODULE_AUTHOR("Marvell.");
+MODULE_DESCRIPTION("Marvell RVU NIC Mbox helpers");
 MODULE_LICENSE("GPL v2");
index a7b1f9686c09a9a0d6370ee85e3cc33d8b4cd302..4957412ff1f65a8d0621410127d7d58d1cdd175f 100644 (file)
@@ -1923,6 +1923,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
 {
        const char *namep = mlx5_command_str(opcode);
        struct mlx5_cmd_stats *stats;
+       unsigned long flags;
 
        if (!err || !(strcmp(namep, "unknown command opcode")))
                return;
@@ -1930,7 +1931,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
        stats = xa_load(&dev->cmd.stats, opcode);
        if (!stats)
                return;
-       spin_lock_irq(&stats->lock);
+       spin_lock_irqsave(&stats->lock, flags);
        stats->failed++;
        if (err < 0)
                stats->last_failed_errno = -err;
@@ -1939,7 +1940,7 @@ static void cmd_status_log(struct mlx5_core_dev *dev, u16 opcode, u8 status,
                stats->last_failed_mbox_status = status;
                stats->last_failed_syndrome = syndrome;
        }
-       spin_unlock_irq(&stats->lock);
+       spin_unlock_irqrestore(&stats->lock, flags);
 }
 
 /* preserve -EREMOTEIO for outbox.status != OK, otherwise return err as is */
index 0bfe1ca8a364233a1d6fb92846d5b54d07d2bcdc..55c6ace0acd557b075c3bae6ff0818ca84fc3ae8 100644 (file)
@@ -1124,7 +1124,7 @@ static inline bool mlx5_tx_swp_supported(struct mlx5_core_dev *mdev)
 extern const struct ethtool_ops mlx5e_ethtool_ops;
 
 int mlx5e_create_mkey(struct mlx5_core_dev *mdev, u32 pdn, u32 *mkey);
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev);
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises);
 void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev);
 int mlx5e_refresh_tirs(struct mlx5e_priv *priv, bool enable_uc_lb,
                       bool enable_mc_lb);
index e1283531e0b810f78d3b18d20cd6e3ba56c9b84f..671adbad0a40f643bbd1f82e56233f7ae11872ce 100644 (file)
@@ -436,6 +436,7 @@ static int fs_any_create_groups(struct mlx5e_flow_table *ft)
        in = kvzalloc(inlen, GFP_KERNEL);
        if  (!in || !ft->g) {
                kfree(ft->g);
+               ft->g = NULL;
                kvfree(in);
                return -ENOMEM;
        }
index 284253b79266b937f4d654361c7b891278e9fda9..5d213a9886f11c4bed6a2b8c5e5bd708ce08bef3 100644 (file)
@@ -1064,8 +1064,8 @@ void mlx5e_build_sq_param(struct mlx5_core_dev *mdev,
        void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
        bool allow_swp;
 
-       allow_swp =
-               mlx5_geneve_tx_allowed(mdev) || !!mlx5_ipsec_device_caps(mdev);
+       allow_swp = mlx5_geneve_tx_allowed(mdev) ||
+                   (mlx5_ipsec_device_caps(mdev) & MLX5_IPSEC_CAP_CRYPTO);
        mlx5e_build_sq_param_common(mdev, param);
        MLX5_SET(wq, wq, log_wq_sz, params->log_sq_size);
        MLX5_SET(sqc, sqc, allow_swp, allow_swp);
index c206cc0a84832e6ebf104cc92990c43a81e94d60..078f56a3cbb2b389499c0b609908972af691a41c 100644 (file)
@@ -213,7 +213,7 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
        mlx5e_ptpsq_mark_ts_cqes_undelivered(ptpsq, hwtstamp);
 out:
        napi_consume_skb(skb, budget);
-       md_buff[*md_buff_sz++] = metadata_id;
+       md_buff[(*md_buff_sz)++] = metadata_id;
        if (unlikely(mlx5e_ptp_metadata_map_unhealthy(&ptpsq->metadata_map)) &&
            !test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
                queue_work(ptpsq->txqsq.priv->wq, &ptpsq->report_unhealthy_work);
index 161c5190c236a0d8d048bd6a253a36cdeb12b9bc..05612d9c6080c776e9bdded54d9848f8829748fa 100644 (file)
@@ -336,12 +336,17 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
        /* iv len */
        aes_gcm->icv_len = x->aead->alg_icv_len;
 
+       attrs->dir = x->xso.dir;
+
        /* esn */
        if (x->props.flags & XFRM_STATE_ESN) {
                attrs->replay_esn.trigger = true;
                attrs->replay_esn.esn = sa_entry->esn_state.esn;
                attrs->replay_esn.esn_msb = sa_entry->esn_state.esn_msb;
                attrs->replay_esn.overlap = sa_entry->esn_state.overlap;
+               if (attrs->dir == XFRM_DEV_OFFLOAD_OUT)
+                       goto skip_replay_window;
+
                switch (x->replay_esn->replay_window) {
                case 32:
                        attrs->replay_esn.replay_window =
@@ -365,7 +370,7 @@ void mlx5e_ipsec_build_accel_xfrm_attrs(struct mlx5e_ipsec_sa_entry *sa_entry,
                }
        }
 
-       attrs->dir = x->xso.dir;
+skip_replay_window:
        /* spi */
        attrs->spi = be32_to_cpu(x->id.spi);
 
@@ -501,7 +506,8 @@ static int mlx5e_xfrm_validate_state(struct mlx5_core_dev *mdev,
                        return -EINVAL;
                }
 
-               if (x->replay_esn && x->replay_esn->replay_window != 32 &&
+               if (x->replay_esn && x->xso.dir == XFRM_DEV_OFFLOAD_IN &&
+                   x->replay_esn->replay_window != 32 &&
                    x->replay_esn->replay_window != 64 &&
                    x->replay_esn->replay_window != 128 &&
                    x->replay_esn->replay_window != 256) {
index bb7f86c993e5579735d0310aa70b5c53b3a3ae9e..e66f486faafe1a6b0cfc75f0f11b2e957b040842 100644 (file)
@@ -254,11 +254,13 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
 
        ft->g = kcalloc(MLX5E_ARFS_NUM_GROUPS,
                        sizeof(*ft->g), GFP_KERNEL);
-       in = kvzalloc(inlen, GFP_KERNEL);
-       if  (!in || !ft->g) {
-               kfree(ft->g);
-               kvfree(in);
+       if (!ft->g)
                return -ENOMEM;
+
+       in = kvzalloc(inlen, GFP_KERNEL);
+       if (!in) {
+               err = -ENOMEM;
+               goto err_free_g;
        }
 
        mc = MLX5_ADDR_OF(create_flow_group_in, in, match_criteria);
@@ -278,7 +280,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
                break;
        default:
                err = -EINVAL;
-               goto out;
+               goto err_free_in;
        }
 
        switch (type) {
@@ -300,7 +302,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
                break;
        default:
                err = -EINVAL;
-               goto out;
+               goto err_free_in;
        }
 
        MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
@@ -309,7 +311,7 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
        MLX5_SET_CFG(in, end_flow_index, ix - 1);
        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
        if (IS_ERR(ft->g[ft->num_groups]))
-               goto err;
+               goto err_clean_group;
        ft->num_groups++;
 
        memset(in, 0, inlen);
@@ -318,18 +320,20 @@ static int arfs_create_groups(struct mlx5e_flow_table *ft,
        MLX5_SET_CFG(in, end_flow_index, ix - 1);
        ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
        if (IS_ERR(ft->g[ft->num_groups]))
-               goto err;
+               goto err_clean_group;
        ft->num_groups++;
 
        kvfree(in);
        return 0;
 
-err:
+err_clean_group:
        err = PTR_ERR(ft->g[ft->num_groups]);
        ft->g[ft->num_groups] = NULL;
-out:
+err_free_in:
        kvfree(in);
-
+err_free_g:
+       kfree(ft->g);
+       ft->g = NULL;
        return err;
 }
 
index 67f546683e85a3fa0bed05baab33790c66eb9168..6ed3a32b7e226d497234e4fa7b244bf9629b5710 100644 (file)
@@ -95,7 +95,7 @@ static void mlx5e_destroy_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PO
 {
        int tc, i;
 
-       for (i = 0; i < MLX5_MAX_PORTS; i++)
+       for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++)
                for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++)
                        mlx5e_destroy_tis(mdev, tisn[i][tc]);
 }
@@ -110,7 +110,7 @@ static int mlx5e_create_tises(struct mlx5_core_dev *mdev, u32 tisn[MLX5_MAX_PORT
        int tc, i;
        int err;
 
-       for (i = 0; i < MLX5_MAX_PORTS; i++) {
+       for (i = 0; i < mlx5e_get_num_lag_ports(mdev); i++) {
                for (tc = 0; tc < MLX5_MAX_NUM_TC; tc++) {
                        u32 in[MLX5_ST_SZ_DW(create_tis_in)] = {};
                        void *tisc;
@@ -140,7 +140,7 @@ err_close_tises:
        return err;
 }
 
-int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
+int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev, bool create_tises)
 {
        struct mlx5e_hw_objs *res = &mdev->mlx5e_res.hw_objs;
        int err;
@@ -169,11 +169,15 @@ int mlx5e_create_mdev_resources(struct mlx5_core_dev *mdev)
                goto err_destroy_mkey;
        }
 
-       err = mlx5e_create_tises(mdev, res->tisn);
-       if (err) {
-               mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
-               goto err_destroy_bfreg;
+       if (create_tises) {
+               err = mlx5e_create_tises(mdev, res->tisn);
+               if (err) {
+                       mlx5_core_err(mdev, "alloc tises failed, %d\n", err);
+                       goto err_destroy_bfreg;
+               }
+               res->tisn_valid = true;
        }
+
        INIT_LIST_HEAD(&res->td.tirs_list);
        mutex_init(&res->td.list_lock);
 
@@ -203,7 +207,8 @@ void mlx5e_destroy_mdev_resources(struct mlx5_core_dev *mdev)
 
        mlx5_crypto_dek_cleanup(mdev->mlx5e_res.dek_priv);
        mdev->mlx5e_res.dek_priv = NULL;
-       mlx5e_destroy_tises(mdev, res->tisn);
+       if (res->tisn_valid)
+               mlx5e_destroy_tises(mdev, res->tisn);
        mlx5_free_bfreg(mdev, &res->bfreg);
        mlx5_core_destroy_mkey(mdev, res->mkey);
        mlx5_core_dealloc_transport_domain(mdev, res->td.tdn);
index b5f1c4ca38bac97d860ed2bb5f37eac332e6e24c..c8e8f512803efb7aea48e90259e852a55882403c 100644 (file)
@@ -5992,7 +5992,7 @@ static int mlx5e_resume(struct auxiliary_device *adev)
        if (netif_device_present(netdev))
                return 0;
 
-       err = mlx5e_create_mdev_resources(mdev);
+       err = mlx5e_create_mdev_resources(mdev, true);
        if (err)
                return err;
 
index 30932c9c9a8f08bca2c8025f0a5685f79695e54d..9fb2c057bd78723420478d93001e74e7599d646e 100644 (file)
@@ -761,7 +761,7 @@ static int mlx5e_hairpin_create_indirect_rqt(struct mlx5e_hairpin *hp)
 
        err = mlx5e_rss_params_indir_init(&indir, mdev,
                                          mlx5e_rqt_size(mdev, hp->num_channels),
-                                         mlx5e_rqt_size(mdev, priv->max_nch));
+                                         mlx5e_rqt_size(mdev, hp->num_channels));
        if (err)
                return err;
 
@@ -2014,9 +2014,10 @@ static void mlx5e_tc_del_fdb_peer_flow(struct mlx5e_tc_flow *flow,
        list_for_each_entry_safe(peer_flow, tmp, &flow->peer_flows, peer_flows) {
                if (peer_index != mlx5_get_dev_index(peer_flow->priv->mdev))
                        continue;
+
+               list_del(&peer_flow->peer_flows);
                if (refcount_dec_and_test(&peer_flow->refcnt)) {
                        mlx5e_tc_del_fdb_flow(peer_flow->priv, peer_flow);
-                       list_del(&peer_flow->peer_flows);
                        kfree(peer_flow);
                }
        }
index a7ed87e9d8426befdbda753b52732400e003f1b8..22dd30cf8033f93134d08ed77fe32dc73b6bbaf2 100644 (file)
@@ -83,6 +83,7 @@ mlx5_esw_bridge_mdb_flow_create(u16 esw_owner_vhca_id, struct mlx5_esw_bridge_md
                i++;
        }
 
+       rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
        rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
        dmac_v = MLX5_ADDR_OF(fte_match_param, rule_spec->match_value, outer_headers.dmac_47_16);
        ether_addr_copy(dmac_v, entry->key.addr);
@@ -587,6 +588,7 @@ mlx5_esw_bridge_mcast_vlan_flow_create(u16 vlan_proto, struct mlx5_esw_bridge_po
        if (!rule_spec)
                return ERR_PTR(-ENOMEM);
 
+       rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
        rule_spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
 
        flow_act.action |= MLX5_FLOW_CONTEXT_ACTION_PACKET_REFORMAT;
@@ -662,6 +664,7 @@ mlx5_esw_bridge_mcast_fwd_flow_create(struct mlx5_esw_bridge_port *port)
                dest.vport.flags = MLX5_FLOW_DEST_VPORT_VHCA_ID;
                dest.vport.vhca_id = port->esw_owner_vhca_id;
        }
+       rule_spec->flow_context.flags |= FLOW_CONTEXT_UPLINK_HAIRPIN_EN;
        handle = mlx5_add_flow_rules(port->mcast.ft, rule_spec, &flow_act, &dest, 1);
 
        kvfree(rule_spec);
index 1616a6144f7b42d4c7415bc02a05dbf63c61c420..9b8599c200e2c0990009162b90b1e368e784cdef 100644 (file)
@@ -566,6 +566,8 @@ static int mlx5_cmd_set_fte(struct mlx5_core_dev *dev,
                 fte->flow_context.flow_tag);
        MLX5_SET(flow_context, in_flow_context, flow_source,
                 fte->flow_context.flow_source);
+       MLX5_SET(flow_context, in_flow_context, uplink_hairpin_en,
+                !!(fte->flow_context.flags & FLOW_CONTEXT_UPLINK_HAIRPIN_EN));
 
        MLX5_SET(flow_context, in_flow_context, extended_destination,
                 extended_dest);
index 58845121954c19db3bdc454046c161654ef37308..d77be1b4dd9c557b70ba74e3ebb37ac7994a4486 100644 (file)
@@ -783,7 +783,7 @@ static int mlx5_rdma_setup_rn(struct ib_device *ibdev, u32 port_num,
                }
 
                /* This should only be called once per mdev */
-               err = mlx5e_create_mdev_resources(mdev);
+               err = mlx5e_create_mdev_resources(mdev, false);
                if (err)
                        goto destroy_ht;
        }
index 40c7be12404168094e60d0ca5dbedbde77ea1402..58bd749b5e4de07a19320e223a0103b8ae7ded25 100644 (file)
@@ -98,7 +98,7 @@ static int create_aso_cq(struct mlx5_aso_cq *cq, void *cqc_data)
        mlx5_fill_page_frag_array(&cq->wq_ctrl.buf,
                                  (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
 
-       MLX5_SET(cqc,   cqc, cq_period_mode, DIM_CQ_PERIOD_MODE_START_FROM_EQE);
+       MLX5_SET(cqc,   cqc, cq_period_mode, MLX5_CQ_PERIOD_MODE_START_FROM_EQE);
        MLX5_SET(cqc,   cqc, c_eqn_or_apu_element, eqn);
        MLX5_SET(cqc,   cqc, uar_page,      mdev->priv.uar->index);
        MLX5_SET(cqc,   cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
index 6f9790e97fed20821f48392732a90d12e2450a01..2ebb61ef3ea9f6a906601b41c723ba9f7834afda 100644 (file)
@@ -788,6 +788,7 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
                switch (action_type) {
                case DR_ACTION_TYP_DROP:
                        attr.final_icm_addr = nic_dmn->drop_icm_addr;
+                       attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
                        break;
                case DR_ACTION_TYP_FT:
                        dest_action = action;
@@ -873,11 +874,17 @@ int mlx5dr_actions_build_ste_arr(struct mlx5dr_matcher *matcher,
                                                        action->sampler->tx_icm_addr;
                        break;
                case DR_ACTION_TYP_VPORT:
-                       attr.hit_gvmi = action->vport->caps->vhca_gvmi;
-                       dest_action = action;
-                       attr.final_icm_addr = rx_rule ?
-                               action->vport->caps->icm_address_rx :
-                               action->vport->caps->icm_address_tx;
+                       if (unlikely(rx_rule && action->vport->caps->num == MLX5_VPORT_UPLINK)) {
+                               /* can't go to uplink on RX rule - dropping instead */
+                               attr.final_icm_addr = nic_dmn->drop_icm_addr;
+                               attr.hit_gvmi = nic_dmn->drop_icm_addr >> 48;
+                       } else {
+                               attr.hit_gvmi = action->vport->caps->vhca_gvmi;
+                               dest_action = action;
+                               attr.final_icm_addr = rx_rule ?
+                                                     action->vport->caps->icm_address_rx :
+                                                     action->vport->caps->icm_address_tx;
+                       }
                        break;
                case DR_ACTION_TYP_POP_VLAN:
                        if (!rx_rule && !(dmn->ste_ctx->actions_caps &
index 21753f32786850bd010bded5a13db6eb83fa3ade..1005bb6935b65c0d6bb2b68f71744c2857085eed 100644 (file)
@@ -440,6 +440,27 @@ out:
 }
 EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_system_image_guid);
 
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group)
+{
+       int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+       u32 *out;
+       int err;
+
+       out = kvzalloc(outlen, GFP_KERNEL);
+       if (!out)
+               return -ENOMEM;
+
+       err = mlx5_query_nic_vport_context(mdev, 0, out);
+       if (err)
+               goto out;
+
+       *sd_group = MLX5_GET(query_nic_vport_context_out, out,
+                            nic_vport_context.sd_group);
+out:
+       kvfree(out);
+       return err;
+}
+
 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid)
 {
        u32 *out;
index a0e46369ae158bf51ddcbd562414e172d97af201..b334eb16da23aa49af2a0849dc86127a0a69494a 100644 (file)
@@ -7542,6 +7542,9 @@ int stmmac_dvr_probe(struct device *device,
                dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
                        ERR_PTR(ret));
 
+       /* Wait a bit for the reset to take effect */
+       udelay(10);
+
        /* Init MAC and get the capabilities */
        ret = stmmac_hw_init(priv);
        if (ret)
index 704e949484d0c1684247302fb29f45b7ffa3b3e1..b9b5554ea8620ed7249fbdc8870779c6ad658f1b 100644 (file)
@@ -221,21 +221,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
 
        mem_size = FJES_DEV_REQ_BUF_SIZE(hw->max_epid);
        hw->hw_info.req_buf = kzalloc(mem_size, GFP_KERNEL);
-       if (!(hw->hw_info.req_buf))
-               return -ENOMEM;
+       if (!(hw->hw_info.req_buf)) {
+               result = -ENOMEM;
+               goto free_ep_info;
+       }
 
        hw->hw_info.req_buf_size = mem_size;
 
        mem_size = FJES_DEV_RES_BUF_SIZE(hw->max_epid);
        hw->hw_info.res_buf = kzalloc(mem_size, GFP_KERNEL);
-       if (!(hw->hw_info.res_buf))
-               return -ENOMEM;
+       if (!(hw->hw_info.res_buf)) {
+               result = -ENOMEM;
+               goto free_req_buf;
+       }
 
        hw->hw_info.res_buf_size = mem_size;
 
        result = fjes_hw_alloc_shared_status_region(hw);
        if (result)
-               return result;
+               goto free_res_buf;
 
        hw->hw_info.buffer_share_bit = 0;
        hw->hw_info.buffer_unshare_reserve_bit = 0;
@@ -246,11 +250,11 @@ static int fjes_hw_setup(struct fjes_hw *hw)
 
                        result = fjes_hw_alloc_epbuf(&buf_pair->tx);
                        if (result)
-                               return result;
+                               goto free_epbuf;
 
                        result = fjes_hw_alloc_epbuf(&buf_pair->rx);
                        if (result)
-                               return result;
+                               goto free_epbuf;
 
                        spin_lock_irqsave(&hw->rx_status_lock, flags);
                        fjes_hw_setup_epbuf(&buf_pair->tx, mac,
@@ -273,6 +277,25 @@ static int fjes_hw_setup(struct fjes_hw *hw)
        fjes_hw_init_command_registers(hw, &param);
 
        return 0;
+
+free_epbuf:
+       for (epidx = 0; epidx < hw->max_epid ; epidx++) {
+               if (epidx == hw->my_epid)
+                       continue;
+               fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].tx);
+               fjes_hw_free_epbuf(&hw->ep_shm_info[epidx].rx);
+       }
+       fjes_hw_free_shared_status_region(hw);
+free_res_buf:
+       kfree(hw->hw_info.res_buf);
+       hw->hw_info.res_buf = NULL;
+free_req_buf:
+       kfree(hw->hw_info.req_buf);
+       hw->hw_info.req_buf = NULL;
+free_ep_info:
+       kfree(hw->ep_shm_info);
+       hw->ep_shm_info = NULL;
+       return result;
 }
 
 static void fjes_hw_cleanup(struct fjes_hw *hw)
index 4406427d4617d58d300be5c46a368df5223d2219..273bd8a20122cdbec238326febb0227ca7889c8d 100644 (file)
@@ -44,7 +44,7 @@
 
 static unsigned int ring_size __ro_after_init = 128;
 module_param(ring_size, uint, 0444);
-MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
+MODULE_PARM_DESC(ring_size, "Ring buffer size (# of 4K pages)");
 unsigned int netvsc_ring_bytes __ro_after_init;
 
 static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
@@ -2807,7 +2807,7 @@ static int __init netvsc_drv_init(void)
                pr_info("Increased ring_size to %u (min allowed)\n",
                        ring_size);
        }
-       netvsc_ring_bytes = ring_size * PAGE_SIZE;
+       netvsc_ring_bytes = VMBUS_RING_SIZE(ring_size * 4096);
 
        register_netdevice_notifier(&netvsc_netdev_notifier);
 
index e34816638569e4e11d7a554a7f0fdc1fe6cb07b9..7f5426285c61b1e35afd74d4c044f80c77f34e7f 100644 (file)
@@ -607,11 +607,26 @@ static struct sk_buff *macsec_encrypt(struct sk_buff *skb,
                return ERR_PTR(-EINVAL);
        }
 
-       ret = skb_ensure_writable_head_tail(skb, dev);
-       if (unlikely(ret < 0)) {
-               macsec_txsa_put(tx_sa);
-               kfree_skb(skb);
-               return ERR_PTR(ret);
+       if (unlikely(skb_headroom(skb) < MACSEC_NEEDED_HEADROOM ||
+                    skb_tailroom(skb) < MACSEC_NEEDED_TAILROOM)) {
+               struct sk_buff *nskb = skb_copy_expand(skb,
+                                                      MACSEC_NEEDED_HEADROOM,
+                                                      MACSEC_NEEDED_TAILROOM,
+                                                      GFP_ATOMIC);
+               if (likely(nskb)) {
+                       consume_skb(skb);
+                       skb = nskb;
+               } else {
+                       macsec_txsa_put(tx_sa);
+                       kfree_skb(skb);
+                       return ERR_PTR(-ENOMEM);
+               }
+       } else {
+               skb = skb_unshare(skb, GFP_ATOMIC);
+               if (!skb) {
+                       macsec_txsa_put(tx_sa);
+                       return ERR_PTR(-ENOMEM);
+               }
        }
 
        unprotected_len = skb->len;
index 81c20eb4b54b918517866a262404e3641988a66d..dad720138baafc57f3b7efb9afd36d82ec5a1b83 100644 (file)
  */
 #define LAN8814_1PPM_FORMAT                    17179
 
+#define PTP_RX_VERSION                         0x0248
+#define PTP_TX_VERSION                         0x0288
+#define PTP_MAX_VERSION(x)                     (((x) & GENMASK(7, 0)) << 8)
+#define PTP_MIN_VERSION(x)                     ((x) & GENMASK(7, 0))
+
 #define PTP_RX_MOD                             0x024F
 #define PTP_RX_MOD_BAD_UDPV4_CHKSUM_FORCE_FCS_DIS_ BIT(3)
 #define PTP_RX_TIMESTAMP_EN                    0x024D
@@ -3150,6 +3155,12 @@ static void lan8814_ptp_init(struct phy_device *phydev)
        lanphy_write_page_reg(phydev, 5, PTP_TX_PARSE_IP_ADDR_EN, 0);
        lanphy_write_page_reg(phydev, 5, PTP_RX_PARSE_IP_ADDR_EN, 0);
 
+       /* Disable checking for minorVersionPTP field */
+       lanphy_write_page_reg(phydev, 5, PTP_RX_VERSION,
+                             PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
+       lanphy_write_page_reg(phydev, 5, PTP_TX_VERSION,
+                             PTP_MAX_VERSION(0xff) | PTP_MIN_VERSION(0x0));
+
        skb_queue_head_init(&ptp_priv->tx_queue);
        skb_queue_head_init(&ptp_priv->rx_queue);
        INIT_LIST_HEAD(&ptp_priv->rx_ts_list);
index afa5497f7c35c3ab5682e66440afc8a888d14414..4a4f8c8e79fa12dc84a8c83cefbf964dd40e1aa2 100644 (file)
@@ -1630,13 +1630,19 @@ static int tun_xdp_act(struct tun_struct *tun, struct bpf_prog *xdp_prog,
        switch (act) {
        case XDP_REDIRECT:
                err = xdp_do_redirect(tun->dev, xdp, xdp_prog);
-               if (err)
+               if (err) {
+                       dev_core_stats_rx_dropped_inc(tun->dev);
                        return err;
+               }
+               dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
                break;
        case XDP_TX:
                err = tun_xdp_tx(tun->dev, xdp);
-               if (err < 0)
+               if (err < 0) {
+                       dev_core_stats_rx_dropped_inc(tun->dev);
                        return err;
+               }
+               dev_sw_netstats_rx_add(tun->dev, xdp->data_end - xdp->data);
                break;
        case XDP_PASS:
                break;
index 7e3b6779f4e969369a9e6713b9235241efa9ac44..02e160d831bed13f3358034048ce5b03d36dc090 100644 (file)
@@ -368,10 +368,6 @@ struct ath11k_vif {
        struct ieee80211_chanctx_conf chanctx;
        struct ath11k_arp_ns_offload arp_ns_offload;
        struct ath11k_rekey_data rekey_data;
-
-#ifdef CONFIG_ATH11K_DEBUGFS
-       struct dentry *debugfs_twt;
-#endif /* CONFIG_ATH11K_DEBUGFS */
 };
 
 struct ath11k_vif_iter {
index a847bc0d50c0f0b955e93947e49b771d41756ea1..a48e737ef35d661f670373617bef8f0525358543 100644 (file)
@@ -1894,35 +1894,30 @@ static const struct file_operations ath11k_fops_twt_resume_dialog = {
        .open = simple_open
 };
 
-void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
+void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif)
 {
+       struct ath11k_vif *arvif = ath11k_vif_to_arvif(vif);
        struct ath11k_base *ab = arvif->ar->ab;
+       struct dentry *debugfs_twt;
 
        if (arvif->vif->type != NL80211_IFTYPE_AP &&
            !(arvif->vif->type == NL80211_IFTYPE_STATION &&
              test_bit(WMI_TLV_SERVICE_STA_TWT, ab->wmi_ab.svc_map)))
                return;
 
-       arvif->debugfs_twt = debugfs_create_dir("twt",
-                                               arvif->vif->debugfs_dir);
-       debugfs_create_file("add_dialog", 0200, arvif->debugfs_twt,
+       debugfs_twt = debugfs_create_dir("twt",
+                                        arvif->vif->debugfs_dir);
+       debugfs_create_file("add_dialog", 0200, debugfs_twt,
                            arvif, &ath11k_fops_twt_add_dialog);
 
-       debugfs_create_file("del_dialog", 0200, arvif->debugfs_twt,
+       debugfs_create_file("del_dialog", 0200, debugfs_twt,
                            arvif, &ath11k_fops_twt_del_dialog);
 
-       debugfs_create_file("pause_dialog", 0200, arvif->debugfs_twt,
+       debugfs_create_file("pause_dialog", 0200, debugfs_twt,
                            arvif, &ath11k_fops_twt_pause_dialog);
 
-       debugfs_create_file("resume_dialog", 0200, arvif->debugfs_twt,
+       debugfs_create_file("resume_dialog", 0200, debugfs_twt,
                            arvif, &ath11k_fops_twt_resume_dialog);
 }
 
-void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
-{
-       if (!arvif->debugfs_twt)
-               return;
-
-       debugfs_remove_recursive(arvif->debugfs_twt);
-       arvif->debugfs_twt = NULL;
-}
index 44d15845f39a6735f3ef15224ea12ace13079ef4..a39e458637b01366b430e138bbc53126196b512f 100644 (file)
@@ -307,8 +307,8 @@ static inline int ath11k_debugfs_rx_filter(struct ath11k *ar)
        return ar->debug.rx_filter;
 }
 
-void ath11k_debugfs_add_interface(struct ath11k_vif *arvif);
-void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif);
+void ath11k_debugfs_op_vif_add(struct ieee80211_hw *hw,
+                              struct ieee80211_vif *vif);
 void ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
                                     enum wmi_direct_buffer_module id,
                                     enum ath11k_dbg_dbr_event event,
@@ -387,14 +387,6 @@ static inline int ath11k_debugfs_get_fw_stats(struct ath11k *ar,
        return 0;
 }
 
-static inline void ath11k_debugfs_add_interface(struct ath11k_vif *arvif)
-{
-}
-
-static inline void ath11k_debugfs_remove_interface(struct ath11k_vif *arvif)
-{
-}
-
 static inline void
 ath11k_debugfs_add_dbring_entry(struct ath11k *ar,
                                enum wmi_direct_buffer_module id,
index db241589424d519607429b34ffd9946b32c525a9..b13525bbbb8087acbdc15247a0a428a74fd5f8b9 100644 (file)
@@ -6756,13 +6756,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
                goto err;
        }
 
-       /* In the case of hardware recovery, debugfs files are
-        * not deleted since ieee80211_ops.remove_interface() is
-        * not invoked. In such cases, try to delete the files.
-        * These will be re-created later.
-        */
-       ath11k_debugfs_remove_interface(arvif);
-
        memset(arvif, 0, sizeof(*arvif));
 
        arvif->ar = ar;
@@ -6939,8 +6932,6 @@ static int ath11k_mac_op_add_interface(struct ieee80211_hw *hw,
 
        ath11k_dp_vdev_tx_attach(ar, arvif);
 
-       ath11k_debugfs_add_interface(arvif);
-
        if (vif->type != NL80211_IFTYPE_MONITOR &&
            test_bit(ATH11K_FLAG_MONITOR_CONF_ENABLED, &ar->monitor_flags)) {
                ret = ath11k_mac_monitor_vdev_create(ar);
@@ -7056,8 +7047,6 @@ err_vdev_del:
        /* Recalc txpower for remaining vdev */
        ath11k_mac_txpower_recalc(ar);
 
-       ath11k_debugfs_remove_interface(arvif);
-
        /* TODO: recal traffic pause state based on the available vdevs */
 
        mutex_unlock(&ar->conf_mutex);
@@ -9153,6 +9142,7 @@ static const struct ieee80211_ops ath11k_ops = {
 #endif
 
 #ifdef CONFIG_ATH11K_DEBUGFS
+       .vif_add_debugfs                = ath11k_debugfs_op_vif_add,
        .sta_add_debugfs                = ath11k_debugfs_sta_op_add,
 #endif
 
index 3b14f647674350e3fdef138eb880b07df6eb5770..72075720969c06b2378d84d48305e84df467f201 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
 /*
- * Copyright (C) 2018-2023 Intel Corporation
+ * Copyright (C) 2018-2024 Intel Corporation
  */
 #include <linux/firmware.h>
 #include "iwl-drv.h"
@@ -1096,7 +1096,7 @@ static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
                node_trig = (void *)node_tlv->data;
        }
 
-       memcpy(node_trig->data + offset, trig->data, trig_data_len);
+       memcpy((u8 *)node_trig->data + offset, trig->data, trig_data_len);
        node_tlv->length = cpu_to_le32(size);
 
        if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
index b52cce38115d0a9e5e7655af324fdc3e266f6011..c4fe70e05b9b87771613d8569b216a1cf91ac550 100644 (file)
@@ -125,7 +125,7 @@ int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw)
                           "FW rev %s - Softmac protocol %x.%x\n",
                           fw_version, priv->fw_var >> 8, priv->fw_var & 0xff);
                snprintf(dev->wiphy->fw_version, sizeof(dev->wiphy->fw_version),
-                               "%s - %x.%x", fw_version,
+                               "%.19s - %x.%x", fw_version,
                                priv->fw_var >> 8, priv->fw_var & 0xff);
        }
 
index 88f760a7cbc35469e20be2d09f9b2cfb92b8362a..d7503aef599f04bec326900fe918a974e55bc5cc 100644 (file)
@@ -463,12 +463,25 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
        }
 
        for (shinfo->nr_frags = 0; nr_slots > 0 && shinfo->nr_frags < MAX_SKB_FRAGS;
-            shinfo->nr_frags++, gop++, nr_slots--) {
+            nr_slots--) {
+               if (unlikely(!txp->size)) {
+                       unsigned long flags;
+
+                       spin_lock_irqsave(&queue->response_lock, flags);
+                       make_tx_response(queue, txp, 0, XEN_NETIF_RSP_OKAY);
+                       push_tx_responses(queue);
+                       spin_unlock_irqrestore(&queue->response_lock, flags);
+                       ++txp;
+                       continue;
+               }
+
                index = pending_index(queue->pending_cons++);
                pending_idx = queue->pending_ring[index];
                xenvif_tx_create_map_op(queue, pending_idx, txp,
                                        txp == first ? extra_count : 0, gop);
                frag_set_pending_idx(&frags[shinfo->nr_frags], pending_idx);
+               ++shinfo->nr_frags;
+               ++gop;
 
                if (txp == first)
                        txp = txfrags;
@@ -481,20 +494,39 @@ static void xenvif_get_requests(struct xenvif_queue *queue,
                shinfo = skb_shinfo(nskb);
                frags = shinfo->frags;
 
-               for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots;
-                    shinfo->nr_frags++, txp++, gop++) {
+               for (shinfo->nr_frags = 0; shinfo->nr_frags < nr_slots; ++txp) {
+                       if (unlikely(!txp->size)) {
+                               unsigned long flags;
+
+                               spin_lock_irqsave(&queue->response_lock, flags);
+                               make_tx_response(queue, txp, 0,
+                                                XEN_NETIF_RSP_OKAY);
+                               push_tx_responses(queue);
+                               spin_unlock_irqrestore(&queue->response_lock,
+                                                      flags);
+                               continue;
+                       }
+
                        index = pending_index(queue->pending_cons++);
                        pending_idx = queue->pending_ring[index];
                        xenvif_tx_create_map_op(queue, pending_idx, txp, 0,
                                                gop);
                        frag_set_pending_idx(&frags[shinfo->nr_frags],
                                             pending_idx);
+                       ++shinfo->nr_frags;
+                       ++gop;
                }
 
-               skb_shinfo(skb)->frag_list = nskb;
-       } else if (nskb) {
+               if (shinfo->nr_frags) {
+                       skb_shinfo(skb)->frag_list = nskb;
+                       nskb = NULL;
+               }
+       }
+
+       if (nskb) {
                /* A frag_list skb was allocated but it is no longer needed
-                * because enough slots were converted to copy ops above.
+                * because enough slots were converted to copy ops above or some
+                * were empty.
                 */
                kfree_skb(nskb);
        }
index 1dd84c7a79de97f44b25c48fd241db068492b4f9..b1995ac268d77a9c56c81ce6e65048e2c465c449 100644 (file)
@@ -1170,7 +1170,7 @@ static int mlxbf_pmc_program_crspace_counter(int blk_num, uint32_t cnt_num,
        int ret;
 
        addr = pmc->block[blk_num].mmio_base +
-               (rounddown(cnt_num, 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
+               ((cnt_num / 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
        ret = mlxbf_pmc_readl(addr, &word);
        if (ret)
                return ret;
@@ -1413,7 +1413,7 @@ static int mlxbf_pmc_read_crspace_event(int blk_num, uint32_t cnt_num,
        int ret;
 
        addr = pmc->block[blk_num].mmio_base +
-               (rounddown(cnt_num, 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
+               ((cnt_num / 2) * MLXBF_PMC_CRSPACE_PERFSEL_SZ);
        ret = mlxbf_pmc_readl(addr, &word);
        if (ret)
                return ret;
index ed16ec422a7b33e9b529bfe1912d0ff687e4db5d..b8d1e32e97ebafaa1d0091d32d110b9854022ff9 100644 (file)
@@ -47,6 +47,9 @@
 /* Message with data needs at least two words (for header & data). */
 #define MLXBF_TMFIFO_DATA_MIN_WORDS            2
 
+/* Tx timeout in milliseconds. */
+#define TMFIFO_TX_TIMEOUT                      2000
+
 /* ACPI UID for BlueField-3. */
 #define TMFIFO_BF3_UID                         1
 
@@ -62,12 +65,14 @@ struct mlxbf_tmfifo;
  * @drop_desc: dummy desc for packet dropping
  * @cur_len: processed length of the current descriptor
  * @rem_len: remaining length of the pending packet
+ * @rem_padding: remaining bytes to send as paddings
  * @pkt_len: total length of the pending packet
  * @next_avail: next avail descriptor id
  * @num: vring size (number of descriptors)
  * @align: vring alignment size
  * @index: vring index
  * @vdev_id: vring virtio id (VIRTIO_ID_xxx)
+ * @tx_timeout: expire time of last tx packet
  * @fifo: pointer to the tmfifo structure
  */
 struct mlxbf_tmfifo_vring {
@@ -79,12 +84,14 @@ struct mlxbf_tmfifo_vring {
        struct vring_desc drop_desc;
        int cur_len;
        int rem_len;
+       int rem_padding;
        u32 pkt_len;
        u16 next_avail;
        int num;
        int align;
        int index;
        int vdev_id;
+       unsigned long tx_timeout;
        struct mlxbf_tmfifo *fifo;
 };
 
@@ -819,6 +826,50 @@ mlxbf_tmfifo_desc_done:
        return true;
 }
 
+static void mlxbf_tmfifo_check_tx_timeout(struct mlxbf_tmfifo_vring *vring)
+{
+       unsigned long flags;
+
+       /* Only handle Tx timeout for network vdev. */
+       if (vring->vdev_id != VIRTIO_ID_NET)
+               return;
+
+       /* Initialize the timeout or return if not expired. */
+       if (!vring->tx_timeout) {
+               /* Initialize the timeout. */
+               vring->tx_timeout = jiffies +
+                       msecs_to_jiffies(TMFIFO_TX_TIMEOUT);
+               return;
+       } else if (time_before(jiffies, vring->tx_timeout)) {
+               /* Return if not timeout yet. */
+               return;
+       }
+
+       /*
+        * Drop the packet after timeout. The outstanding packet is
+        * released and the remaining bytes will be sent with padding byte 0x00
+        * as a recovery. On the peer(host) side, the padding bytes 0x00 will be
+        * either dropped directly, or appended into existing outstanding packet
+        * thus dropped as corrupted network packet.
+        */
+       vring->rem_padding = round_up(vring->rem_len, sizeof(u64));
+       mlxbf_tmfifo_release_pkt(vring);
+       vring->cur_len = 0;
+       vring->rem_len = 0;
+       vring->fifo->vring[0] = NULL;
+
+       /*
+        * Make sure the load/store are in order before
+        * returning back to virtio.
+        */
+       virtio_mb(false);
+
+       /* Notify upper layer. */
+       spin_lock_irqsave(&vring->fifo->spin_lock[0], flags);
+       vring_interrupt(0, vring->vq);
+       spin_unlock_irqrestore(&vring->fifo->spin_lock[0], flags);
+}
+
 /* Rx & Tx processing of a queue. */
 static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
 {
@@ -841,6 +892,7 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
                return;
 
        do {
+retry:
                /* Get available FIFO space. */
                if (avail == 0) {
                        if (is_rx)
@@ -851,6 +903,17 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
                                break;
                }
 
+               /* Insert paddings for discarded Tx packet. */
+               if (!is_rx) {
+                       vring->tx_timeout = 0;
+                       while (vring->rem_padding >= sizeof(u64)) {
+                               writeq(0, vring->fifo->tx.data);
+                               vring->rem_padding -= sizeof(u64);
+                               if (--avail == 0)
+                                       goto retry;
+                       }
+               }
+
                /* Console output always comes from the Tx buffer. */
                if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
                        mlxbf_tmfifo_console_tx(fifo, avail);
@@ -860,6 +923,10 @@ static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
                /* Handle one descriptor. */
                more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
        } while (more);
+
+       /* Check Tx timeout. */
+       if (avail <= 0 && !is_rx)
+               mlxbf_tmfifo_check_tx_timeout(vring);
 }
 
 /* Handle Rx or Tx queues. */
index f246252bddd85d97f3232617a53cb437b75a92c0..f4fa8bd8bda832a622078d84e2a7181d5f65cb88 100644 (file)
@@ -10,6 +10,7 @@ config AMD_PMF
        depends on AMD_NB
        select ACPI_PLATFORM_PROFILE
        depends on TEE && AMDTEE
+       depends on AMD_SFH_HID
        help
          This driver provides support for the AMD Platform Management Framework.
          The goal is to enhance end user experience by making AMD PCs smarter,
index a0423942f771e457457cc88cb604913eeb5b2657..a3dec14c30043ecc9c1d109247452d6d19949976 100644 (file)
@@ -10,6 +10,7 @@
  */
 
 #include <acpi/button.h>
+#include <linux/amd-pmf-io.h>
 #include <linux/power_supply.h>
 #include <linux/units.h>
 #include "pmf.h"
@@ -44,6 +45,8 @@ void amd_pmf_dump_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *
        dev_dbg(dev->dev, "Max C0 Residency: %u\n", in->ev_info.max_c0residency);
        dev_dbg(dev->dev, "GFX Busy: %u\n", in->ev_info.gfx_busy);
        dev_dbg(dev->dev, "LID State: %s\n", in->ev_info.lid_state ? "close" : "open");
+       dev_dbg(dev->dev, "User Presence: %s\n", in->ev_info.user_present ? "Present" : "Away");
+       dev_dbg(dev->dev, "Ambient Light: %d\n", in->ev_info.ambient_light);
        dev_dbg(dev->dev, "==== TA inputs END ====\n");
 }
 #else
@@ -147,6 +150,38 @@ static int amd_pmf_get_slider_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_
        return 0;
 }
 
+static int amd_pmf_get_sensor_info(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
+{
+       struct amd_sfh_info sfh_info;
+       int ret;
+
+       /* Get ALS data */
+       ret = amd_get_sfh_info(&sfh_info, MT_ALS);
+       if (!ret)
+               in->ev_info.ambient_light = sfh_info.ambient_light;
+       else
+               return ret;
+
+       /* get HPD data */
+       ret = amd_get_sfh_info(&sfh_info, MT_HPD);
+       if (ret)
+               return ret;
+
+       switch (sfh_info.user_present) {
+       case SFH_NOT_DETECTED:
+               in->ev_info.user_present = 0xff; /* assume no sensors connected */
+               break;
+       case SFH_USER_PRESENT:
+               in->ev_info.user_present = 1;
+               break;
+       case SFH_USER_AWAY:
+               in->ev_info.user_present = 0;
+               break;
+       }
+
+       return 0;
+}
+
 void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_table *in)
 {
        /* TA side lid open is 1 and close is 0, hence the ! here */
@@ -155,4 +190,5 @@ void amd_pmf_populate_ta_inputs(struct amd_pmf_dev *dev, struct ta_pmf_enact_tab
        amd_pmf_get_smu_info(dev, in);
        amd_pmf_get_battery_info(dev, in);
        amd_pmf_get_slider_info(dev, in);
+       amd_pmf_get_sensor_info(dev, in);
 }
index 502ce93d5cddac57f2f482080ea8a0800ea5123f..f8c0177afb0dae60d4f67f2876ba98c6100d1ceb 100644 (file)
@@ -298,8 +298,10 @@ static ssize_t amd_pmf_get_pb_data(struct file *filp, const char __user *buf,
        if (!new_policy_buf)
                return -ENOMEM;
 
-       if (copy_from_user(new_policy_buf, buf, length))
+       if (copy_from_user(new_policy_buf, buf, length)) {
+               kfree(new_policy_buf);
                return -EFAULT;
+       }
 
        kfree(dev->policy_buf);
        dev->policy_buf = new_policy_buf;
index a1ee1a74fc3c4cb7e7bc62cda0297acdbe942d54..2cf3b4a8813f9b30cb5a79aaf2ee6acee2474c68 100644 (file)
@@ -399,7 +399,8 @@ int ifs_load_firmware(struct device *dev)
        if (fw->size != expected_size) {
                dev_err(dev, "File size mismatch (expected %u, actual %zu). Corrupted IFS image.\n",
                        expected_size, fw->size);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto release;
        }
 
        ret = image_sanity_check(dev, (struct microcode_header_intel *)fw->data);
index 33ab207493e3e62946dc5d76c2eda98805725888..33bb58dc3f78c30a304a7a35595666152c34e908 100644 (file)
@@ -23,23 +23,23 @@ static int (*uncore_read)(struct uncore_data *data, unsigned int *min, unsigned
 static int (*uncore_write)(struct uncore_data *data, unsigned int input, unsigned int min_max);
 static int (*uncore_read_freq)(struct uncore_data *data, unsigned int *freq);
 
-static ssize_t show_domain_id(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_domain_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 {
-       struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_dev_attr);
+       struct uncore_data *data = container_of(attr, struct uncore_data, domain_id_kobj_attr);
 
        return sprintf(buf, "%u\n", data->domain_id);
 }
 
-static ssize_t show_fabric_cluster_id(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_fabric_cluster_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 {
-       struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_dev_attr);
+       struct uncore_data *data = container_of(attr, struct uncore_data, fabric_cluster_id_kobj_attr);
 
        return sprintf(buf, "%u\n", data->cluster_id);
 }
 
-static ssize_t show_package_id(struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t show_package_id(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 {
-       struct uncore_data *data = container_of(attr, struct uncore_data, package_id_dev_attr);
+       struct uncore_data *data = container_of(attr, struct uncore_data, package_id_kobj_attr);
 
        return sprintf(buf, "%u\n", data->package_id);
 }
@@ -97,30 +97,30 @@ static ssize_t show_perf_status_freq_khz(struct uncore_data *data, char *buf)
 }
 
 #define store_uncore_min_max(name, min_max)                            \
-       static ssize_t store_##name(struct device *dev,         \
-                                    struct device_attribute *attr,     \
+       static ssize_t store_##name(struct kobject *kobj,               \
+                                    struct kobj_attribute *attr,       \
                                     const char *buf, size_t count)     \
        {                                                               \
-               struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
+               struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
                                                                        \
                return store_min_max_freq_khz(data, buf, count, \
                                              min_max);         \
        }
 
 #define show_uncore_min_max(name, min_max)                             \
-       static ssize_t show_##name(struct device *dev,          \
-                                   struct device_attribute *attr, char *buf)\
+       static ssize_t show_##name(struct kobject *kobj,                \
+                                   struct kobj_attribute *attr, char *buf)\
        {                                                               \
-               struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
+               struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
                                                                        \
                return show_min_max_freq_khz(data, buf, min_max);       \
        }
 
 #define show_uncore_perf_status(name)                                  \
-       static ssize_t show_##name(struct device *dev,          \
-                                  struct device_attribute *attr, char *buf)\
+       static ssize_t show_##name(struct kobject *kobj,                \
+                                  struct kobj_attribute *attr, char *buf)\
        {                                                               \
-               struct uncore_data *data = container_of(attr, struct uncore_data, name##_dev_attr);\
+               struct uncore_data *data = container_of(attr, struct uncore_data, name##_kobj_attr);\
                                                                        \
                return show_perf_status_freq_khz(data, buf); \
        }
@@ -134,11 +134,11 @@ show_uncore_min_max(max_freq_khz, 1);
 show_uncore_perf_status(current_freq_khz);
 
 #define show_uncore_data(member_name)                                  \
-       static ssize_t show_##member_name(struct device *dev,   \
-                                          struct device_attribute *attr, char *buf)\
+       static ssize_t show_##member_name(struct kobject *kobj, \
+                                          struct kobj_attribute *attr, char *buf)\
        {                                                               \
                struct uncore_data *data = container_of(attr, struct uncore_data,\
-                                                         member_name##_dev_attr);\
+                                                         member_name##_kobj_attr);\
                                                                        \
                return sysfs_emit(buf, "%u\n",                          \
                                 data->member_name);                    \
@@ -149,29 +149,29 @@ show_uncore_data(initial_max_freq_khz);
 
 #define init_attribute_rw(_name)                                       \
        do {                                                            \
-               sysfs_attr_init(&data->_name##_dev_attr.attr);  \
-               data->_name##_dev_attr.show = show_##_name;             \
-               data->_name##_dev_attr.store = store_##_name;           \
-               data->_name##_dev_attr.attr.name = #_name;              \
-               data->_name##_dev_attr.attr.mode = 0644;                \
+               sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+               data->_name##_kobj_attr.show = show_##_name;            \
+               data->_name##_kobj_attr.store = store_##_name;          \
+               data->_name##_kobj_attr.attr.name = #_name;             \
+               data->_name##_kobj_attr.attr.mode = 0644;               \
        } while (0)
 
 #define init_attribute_ro(_name)                                       \
        do {                                                            \
-               sysfs_attr_init(&data->_name##_dev_attr.attr);  \
-               data->_name##_dev_attr.show = show_##_name;             \
-               data->_name##_dev_attr.store = NULL;                    \
-               data->_name##_dev_attr.attr.name = #_name;              \
-               data->_name##_dev_attr.attr.mode = 0444;                \
+               sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+               data->_name##_kobj_attr.show = show_##_name;            \
+               data->_name##_kobj_attr.store = NULL;                   \
+               data->_name##_kobj_attr.attr.name = #_name;             \
+               data->_name##_kobj_attr.attr.mode = 0444;               \
        } while (0)
 
 #define init_attribute_root_ro(_name)                                  \
        do {                                                            \
-               sysfs_attr_init(&data->_name##_dev_attr.attr);  \
-               data->_name##_dev_attr.show = show_##_name;             \
-               data->_name##_dev_attr.store = NULL;                    \
-               data->_name##_dev_attr.attr.name = #_name;              \
-               data->_name##_dev_attr.attr.mode = 0400;                \
+               sysfs_attr_init(&data->_name##_kobj_attr.attr); \
+               data->_name##_kobj_attr.show = show_##_name;            \
+               data->_name##_kobj_attr.store = NULL;                   \
+               data->_name##_kobj_attr.attr.name = #_name;             \
+               data->_name##_kobj_attr.attr.mode = 0400;               \
        } while (0)
 
 static int create_attr_group(struct uncore_data *data, char *name)
@@ -186,21 +186,21 @@ static int create_attr_group(struct uncore_data *data, char *name)
 
        if (data->domain_id != UNCORE_DOMAIN_ID_INVALID) {
                init_attribute_root_ro(domain_id);
-               data->uncore_attrs[index++] = &data->domain_id_dev_attr.attr;
+               data->uncore_attrs[index++] = &data->domain_id_kobj_attr.attr;
                init_attribute_root_ro(fabric_cluster_id);
-               data->uncore_attrs[index++] = &data->fabric_cluster_id_dev_attr.attr;
+               data->uncore_attrs[index++] = &data->fabric_cluster_id_kobj_attr.attr;
                init_attribute_root_ro(package_id);
-               data->uncore_attrs[index++] = &data->package_id_dev_attr.attr;
+               data->uncore_attrs[index++] = &data->package_id_kobj_attr.attr;
        }
 
-       data->uncore_attrs[index++] = &data->max_freq_khz_dev_attr.attr;
-       data->uncore_attrs[index++] = &data->min_freq_khz_dev_attr.attr;
-       data->uncore_attrs[index++] = &data->initial_min_freq_khz_dev_attr.attr;
-       data->uncore_attrs[index++] = &data->initial_max_freq_khz_dev_attr.attr;
+       data->uncore_attrs[index++] = &data->max_freq_khz_kobj_attr.attr;
+       data->uncore_attrs[index++] = &data->min_freq_khz_kobj_attr.attr;
+       data->uncore_attrs[index++] = &data->initial_min_freq_khz_kobj_attr.attr;
+       data->uncore_attrs[index++] = &data->initial_max_freq_khz_kobj_attr.attr;
 
        ret = uncore_read_freq(data, &freq);
        if (!ret)
-               data->uncore_attrs[index++] = &data->current_freq_khz_dev_attr.attr;
+               data->uncore_attrs[index++] = &data->current_freq_khz_kobj_attr.attr;
 
        data->uncore_attrs[index] = NULL;
 
index 7afb69977c7e8c80b0db3ba819799434e41ff60a..0e5bf507e555209a69ba61e8e8eaaf7392209bfa 100644 (file)
  * @instance_id:       Unique instance id to append to directory name
  * @name:              Sysfs entry name for this instance
  * @uncore_attr_group: Attribute group storage
- * @max_freq_khz_dev_attr: Storage for device attribute max_freq_khz
- * @mix_freq_khz_dev_attr: Storage for device attribute min_freq_khz
- * @initial_max_freq_khz_dev_attr: Storage for device attribute initial_max_freq_khz
- * @initial_min_freq_khz_dev_attr: Storage for device attribute initial_min_freq_khz
- * @current_freq_khz_dev_attr: Storage for device attribute current_freq_khz
- * @domain_id_dev_attr: Storage for device attribute domain_id
- * @fabric_cluster_id_dev_attr: Storage for device attribute fabric_cluster_id
- * @package_id_dev_attr: Storage for device attribute package_id
+ * @max_freq_khz_kobj_attr: Storage for kobject attribute max_freq_khz
+ * @mix_freq_khz_kobj_attr: Storage for kobject attribute min_freq_khz
+ * @initial_max_freq_khz_kobj_attr: Storage for kobject attribute initial_max_freq_khz
+ * @initial_min_freq_khz_kobj_attr: Storage for kobject attribute initial_min_freq_khz
+ * @current_freq_khz_kobj_attr: Storage for kobject attribute current_freq_khz
+ * @domain_id_kobj_attr: Storage for kobject attribute domain_id
+ * @fabric_cluster_id_kobj_attr: Storage for kobject attribute fabric_cluster_id
+ * @package_id_kobj_attr: Storage for kobject attribute package_id
  * @uncore_attrs:      Attribute storage for group creation
  *
  * This structure is used to encapsulate all data related to uncore sysfs
@@ -53,14 +53,14 @@ struct uncore_data {
        char name[32];
 
        struct attribute_group uncore_attr_group;
-       struct device_attribute max_freq_khz_dev_attr;
-       struct device_attribute min_freq_khz_dev_attr;
-       struct device_attribute initial_max_freq_khz_dev_attr;
-       struct device_attribute initial_min_freq_khz_dev_attr;
-       struct device_attribute current_freq_khz_dev_attr;
-       struct device_attribute domain_id_dev_attr;
-       struct device_attribute fabric_cluster_id_dev_attr;
-       struct device_attribute package_id_dev_attr;
+       struct kobj_attribute max_freq_khz_kobj_attr;
+       struct kobj_attribute min_freq_khz_kobj_attr;
+       struct kobj_attribute initial_max_freq_khz_kobj_attr;
+       struct kobj_attribute initial_min_freq_khz_kobj_attr;
+       struct kobj_attribute current_freq_khz_kobj_attr;
+       struct kobj_attribute domain_id_kobj_attr;
+       struct kobj_attribute fabric_cluster_id_kobj_attr;
+       struct kobj_attribute package_id_kobj_attr;
        struct attribute *uncore_attrs[9];
 };
 
index 9cf5ed0f8dc2848b9f85f59dafd3cd43373bf960..040153ad67c1cb7c36fe85616cb97d10f7f99c46 100644 (file)
@@ -32,7 +32,7 @@ static int get_fwu_request(struct device *dev, u32 *out)
                return -ENODEV;
 
        if (obj->type != ACPI_TYPE_INTEGER) {
-               dev_warn(dev, "wmi_query_block returned invalid value\n");
+               dev_warn(dev, "wmidev_block_query returned invalid value\n");
                kfree(obj);
                return -EINVAL;
        }
@@ -55,7 +55,7 @@ static int set_fwu_request(struct device *dev, u32 in)
 
        status = wmidev_block_set(to_wmi_device(dev), 0, &input);
        if (ACPI_FAILURE(status)) {
-               dev_err(dev, "wmi_set_block failed\n");
+               dev_err(dev, "wmidev_block_set failed\n");
                return -ENODEV;
        }
 
index 1cf2471d54ddef765b017fd079864b3ffb868fdc..6bd14d0132dbd73b1ea497679d4dd8297671859e 100644 (file)
@@ -26,6 +26,21 @@ static const struct x86_cpu_id p2sb_cpu_ids[] = {
        {}
 };
 
+/*
+ * Cache BAR0 of P2SB device functions 0 to 7.
+ * TODO: The constant 8 is the number of functions that PCI specification
+ *       defines. Same definitions exist tree-wide. Unify this definition and
+ *       the other definitions then move to include/uapi/linux/pci.h.
+ */
+#define NR_P2SB_RES_CACHE 8
+
+struct p2sb_res_cache {
+       u32 bus_dev_id;
+       struct resource res;
+};
+
+static struct p2sb_res_cache p2sb_resources[NR_P2SB_RES_CACHE];
+
 static int p2sb_get_devfn(unsigned int *devfn)
 {
        unsigned int fn = P2SB_DEVFN_DEFAULT;
@@ -39,10 +54,18 @@ static int p2sb_get_devfn(unsigned int *devfn)
        return 0;
 }
 
+static bool p2sb_valid_resource(struct resource *res)
+{
+       if (res->flags)
+               return true;
+
+       return false;
+}
+
 /* Copy resource from the first BAR of the device in question */
-static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
+static void p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
 {
-       struct resource *bar0 = &pdev->resource[0];
+       struct resource *bar0 = pci_resource_n(pdev, 0);
 
        /* Make sure we have no dangling pointers in the output */
        memset(mem, 0, sizeof(*mem));
@@ -56,49 +79,66 @@ static int p2sb_read_bar0(struct pci_dev *pdev, struct resource *mem)
        mem->end = bar0->end;
        mem->flags = bar0->flags;
        mem->desc = bar0->desc;
-
-       return 0;
 }
 
-static int p2sb_scan_and_read(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+static void p2sb_scan_and_cache_devfn(struct pci_bus *bus, unsigned int devfn)
 {
+       struct p2sb_res_cache *cache = &p2sb_resources[PCI_FUNC(devfn)];
        struct pci_dev *pdev;
-       int ret;
 
        pdev = pci_scan_single_device(bus, devfn);
        if (!pdev)
-               return -ENODEV;
+               return;
 
-       ret = p2sb_read_bar0(pdev, mem);
+       p2sb_read_bar0(pdev, &cache->res);
+       cache->bus_dev_id = bus->dev.id;
 
        pci_stop_and_remove_bus_device(pdev);
-       return ret;
 }
 
-/**
- * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
- * @bus: PCI bus to communicate with
- * @devfn: PCI slot and function to communicate with
- * @mem: memory resource to be filled in
- *
- * The BIOS prevents the P2SB device from being enumerated by the PCI
- * subsystem, so we need to unhide and hide it back to lookup the BAR.
- *
- * if @bus is NULL, the bus 0 in domain 0 will be used.
- * If @devfn is 0, it will be replaced by devfn of the P2SB device.
- *
- * Caller must provide a valid pointer to @mem.
- *
- * Locking is handled by pci_rescan_remove_lock mutex.
- *
- * Return:
- * 0 on success or appropriate errno value on error.
- */
-int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+static int p2sb_scan_and_cache(struct pci_bus *bus, unsigned int devfn)
+{
+       unsigned int slot, fn;
+
+       if (PCI_FUNC(devfn) == 0) {
+               /*
+                * When function number of the P2SB device is zero, scan it and
+                * other function numbers, and if devices are available, cache
+                * their BAR0s.
+                */
+               slot = PCI_SLOT(devfn);
+               for (fn = 0; fn < NR_P2SB_RES_CACHE; fn++)
+                       p2sb_scan_and_cache_devfn(bus, PCI_DEVFN(slot, fn));
+       } else {
+               /* Scan the P2SB device and cache its BAR0 */
+               p2sb_scan_and_cache_devfn(bus, devfn);
+       }
+
+       if (!p2sb_valid_resource(&p2sb_resources[PCI_FUNC(devfn)].res))
+               return -ENOENT;
+
+       return 0;
+}
+
+static struct pci_bus *p2sb_get_bus(struct pci_bus *bus)
+{
+       static struct pci_bus *p2sb_bus;
+
+       bus = bus ?: p2sb_bus;
+       if (bus)
+               return bus;
+
+       /* Assume P2SB is on the bus 0 in domain 0 */
+       p2sb_bus = pci_find_bus(0, 0);
+       return p2sb_bus;
+}
+
+static int p2sb_cache_resources(void)
 {
-       struct pci_dev *pdev_p2sb;
        unsigned int devfn_p2sb;
        u32 value = P2SBC_HIDE;
+       struct pci_bus *bus;
+       u16 class;
        int ret;
 
        /* Get devfn for P2SB device itself */
@@ -106,8 +146,17 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
        if (ret)
                return ret;
 
-       /* if @bus is NULL, use bus 0 in domain 0 */
-       bus = bus ?: pci_find_bus(0, 0);
+       bus = p2sb_get_bus(NULL);
+       if (!bus)
+               return -ENODEV;
+
+       /*
+        * When a device with same devfn exists and its device class is not
+        * PCI_CLASS_MEMORY_OTHER for P2SB, do not touch it.
+        */
+       pci_bus_read_config_word(bus, devfn_p2sb, PCI_CLASS_DEVICE, &class);
+       if (!PCI_POSSIBLE_ERROR(class) && class != PCI_CLASS_MEMORY_OTHER)
+               return -ENODEV;
 
        /*
         * Prevent concurrent PCI bus scan from seeing the P2SB device and
@@ -115,17 +164,16 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
         */
        pci_lock_rescan_remove();
 
-       /* Unhide the P2SB device, if needed */
+       /*
+        * The BIOS prevents the P2SB device from being enumerated by the PCI
+        * subsystem, so we need to unhide and hide it back to lookup the BAR.
+        * Unhide the P2SB device here, if needed.
+        */
        pci_bus_read_config_dword(bus, devfn_p2sb, P2SBC, &value);
        if (value & P2SBC_HIDE)
                pci_bus_write_config_dword(bus, devfn_p2sb, P2SBC, 0);
 
-       pdev_p2sb = pci_scan_single_device(bus, devfn_p2sb);
-       if (devfn)
-               ret = p2sb_scan_and_read(bus, devfn, mem);
-       else
-               ret = p2sb_read_bar0(pdev_p2sb, mem);
-       pci_stop_and_remove_bus_device(pdev_p2sb);
+       ret = p2sb_scan_and_cache(bus, devfn_p2sb);
 
        /* Hide the P2SB device, if it was hidden */
        if (value & P2SBC_HIDE)
@@ -133,12 +181,62 @@ int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
 
        pci_unlock_rescan_remove();
 
-       if (ret)
-               return ret;
+       return ret;
+}
+
+/**
+ * p2sb_bar - Get Primary to Sideband (P2SB) bridge device BAR
+ * @bus: PCI bus to communicate with
+ * @devfn: PCI slot and function to communicate with
+ * @mem: memory resource to be filled in
+ *
+ * If @bus is NULL, the bus 0 in domain 0 will be used.
+ * If @devfn is 0, it will be replaced by devfn of the P2SB device.
+ *
+ * Caller must provide a valid pointer to @mem.
+ *
+ * Return:
+ * 0 on success or appropriate errno value on error.
+ */
+int p2sb_bar(struct pci_bus *bus, unsigned int devfn, struct resource *mem)
+{
+       struct p2sb_res_cache *cache;
+       int ret;
+
+       bus = p2sb_get_bus(bus);
+       if (!bus)
+               return -ENODEV;
+
+       if (!devfn) {
+               ret = p2sb_get_devfn(&devfn);
+               if (ret)
+                       return ret;
+       }
 
-       if (mem->flags == 0)
+       cache = &p2sb_resources[PCI_FUNC(devfn)];
+       if (cache->bus_dev_id != bus->dev.id)
                return -ENODEV;
 
+       if (!p2sb_valid_resource(&cache->res))
+               return -ENOENT;
+
+       memcpy(mem, &cache->res, sizeof(*mem));
        return 0;
 }
 EXPORT_SYMBOL_GPL(p2sb_bar);
+
+static int __init p2sb_fs_init(void)
+{
+       p2sb_cache_resources();
+       return 0;
+}
+
+/*
+ * pci_rescan_remove_lock to avoid access to unhidden P2SB devices can
+ * not be locked in sysfs pci bus rescan path because of deadlock. To
+ * avoid the deadlock, access to P2SB devices with the lock at an early
+ * step in kernel initialization and cache required resources. This
+ * should happen after subsys_initcall which initializes PCI subsystem
+ * and before device_initcall which requires P2SB resources.
+ */
+fs_initcall(p2sb_fs_init);
index 0c6733772698408ef1a23b977d1a0698a19347d5..7aee5e9ff2b8dd5810f83cc0317ed329b4361d2e 100644 (file)
@@ -944,6 +944,32 @@ static const struct ts_dmi_data teclast_tbook11_data = {
        .properties     = teclast_tbook11_props,
 };
 
+static const struct property_entry teclast_x16_plus_props[] = {
+       PROPERTY_ENTRY_U32("touchscreen-min-x", 8),
+       PROPERTY_ENTRY_U32("touchscreen-min-y", 14),
+       PROPERTY_ENTRY_U32("touchscreen-size-x", 1916),
+       PROPERTY_ENTRY_U32("touchscreen-size-y", 1264),
+       PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"),
+       PROPERTY_ENTRY_STRING("firmware-name", "gsl3692-teclast-x16-plus.fw"),
+       PROPERTY_ENTRY_U32("silead,max-fingers", 10),
+       PROPERTY_ENTRY_BOOL("silead,home-button"),
+       { }
+};
+
+static const struct ts_dmi_data teclast_x16_plus_data = {
+       .embedded_fw = {
+               .name   = "silead/gsl3692-teclast-x16-plus.fw",
+               .prefix = { 0xf0, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00 },
+               .length = 43560,
+               .sha256 = { 0x9d, 0xb0, 0x3d, 0xf1, 0x00, 0x3c, 0xb5, 0x25,
+                           0x62, 0x8a, 0xa0, 0x93, 0x4b, 0xe0, 0x4e, 0x75,
+                           0xd1, 0x27, 0xb1, 0x65, 0x3c, 0xba, 0xa5, 0x0f,
+                           0xcd, 0xb4, 0xbe, 0x00, 0xbb, 0xf6, 0x43, 0x29 },
+       },
+       .acpi_name      = "MSSL1680:00",
+       .properties     = teclast_x16_plus_props,
+};
+
 static const struct property_entry teclast_x3_plus_props[] = {
        PROPERTY_ENTRY_U32("touchscreen-size-x", 1980),
        PROPERTY_ENTRY_U32("touchscreen-size-y", 1500),
@@ -1612,6 +1638,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = {
                        DMI_MATCH(DMI_PRODUCT_SKU, "E5A6_A1"),
                },
        },
+       {
+               /* Teclast X16 Plus */
+               .driver_data = (void *)&teclast_x16_plus_data,
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "TECLAST"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Default string"),
+                       DMI_MATCH(DMI_PRODUCT_SKU, "D3A5_A1"),
+               },
+       },
        {
                /* Teclast X3 Plus */
                .driver_data = (void *)&teclast_x3_plus_data,
index bd271a5730aa51f1c1e6286e2b481e865799b79a..3c288e8f404beb5d4887235c85654e9ac77cd425 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/list.h>
 #include <linux/module.h>
 #include <linux/platform_device.h>
+#include <linux/rwsem.h>
 #include <linux/slab.h>
 #include <linux/sysfs.h>
 #include <linux/types.h>
@@ -56,7 +57,6 @@ static_assert(__alignof__(struct guid_block) == 1);
 
 enum { /* wmi_block flags */
        WMI_READ_TAKES_NO_ARGS,
-       WMI_PROBED,
 };
 
 struct wmi_block {
@@ -64,8 +64,10 @@ struct wmi_block {
        struct list_head list;
        struct guid_block gblock;
        struct acpi_device *acpi_device;
+       struct rw_semaphore notify_lock;        /* Protects notify callback add/remove */
        wmi_notify_handler handler;
        void *handler_data;
+       bool driver_ready;
        unsigned long flags;
 };
 
@@ -219,6 +221,17 @@ static int wmidev_match_guid(struct device *dev, const void *data)
        return 0;
 }
 
+static int wmidev_match_notify_id(struct device *dev, const void *data)
+{
+       struct wmi_block *wblock = dev_to_wblock(dev);
+       const u32 *notify_id = data;
+
+       if (wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *notify_id)
+               return 1;
+
+       return 0;
+}
+
 static struct bus_type wmi_bus_type;
 
 static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
@@ -238,6 +251,17 @@ static struct wmi_device *wmi_find_device_by_guid(const char *guid_string)
        return dev_to_wdev(dev);
 }
 
+static struct wmi_device *wmi_find_event_by_notify_id(const u32 notify_id)
+{
+       struct device *dev;
+
+       dev = bus_find_device(&wmi_bus_type, NULL, &notify_id, wmidev_match_notify_id);
+       if (!dev)
+               return ERR_PTR(-ENODEV);
+
+       return to_wmi_device(dev);
+}
+
 static void wmi_device_put(struct wmi_device *wdev)
 {
        put_device(&wdev->dev);
@@ -572,32 +596,31 @@ acpi_status wmi_install_notify_handler(const char *guid,
                                       wmi_notify_handler handler,
                                       void *data)
 {
-       struct wmi_block *block;
-       acpi_status status = AE_NOT_EXIST;
-       guid_t guid_input;
-
-       if (!guid || !handler)
-               return AE_BAD_PARAMETER;
+       struct wmi_block *wblock;
+       struct wmi_device *wdev;
+       acpi_status status;
 
-       if (guid_parse(guid, &guid_input))
-               return AE_BAD_PARAMETER;
+       wdev = wmi_find_device_by_guid(guid);
+       if (IS_ERR(wdev))
+               return AE_ERROR;
 
-       list_for_each_entry(block, &wmi_block_list, list) {
-               acpi_status wmi_status;
+       wblock = container_of(wdev, struct wmi_block, dev);
 
-               if (guid_equal(&block->gblock.guid, &guid_input)) {
-                       if (block->handler)
-                               return AE_ALREADY_ACQUIRED;
+       down_write(&wblock->notify_lock);
+       if (wblock->handler) {
+               status = AE_ALREADY_ACQUIRED;
+       } else {
+               wblock->handler = handler;
+               wblock->handler_data = data;
 
-                       block->handler = handler;
-                       block->handler_data = data;
+               if (ACPI_FAILURE(wmi_method_enable(wblock, true)))
+                       dev_warn(&wblock->dev.dev, "Failed to enable device\n");
 
-                       wmi_status = wmi_method_enable(block, true);
-                       if ((wmi_status != AE_OK) ||
-                           ((wmi_status == AE_OK) && (status == AE_NOT_EXIST)))
-                               status = wmi_status;
-               }
+               status = AE_OK;
        }
+       up_write(&wblock->notify_lock);
+
+       wmi_device_put(wdev);
 
        return status;
 }
@@ -613,30 +636,31 @@ EXPORT_SYMBOL_GPL(wmi_install_notify_handler);
  */
 acpi_status wmi_remove_notify_handler(const char *guid)
 {
-       struct wmi_block *block;
-       acpi_status status = AE_NOT_EXIST;
-       guid_t guid_input;
+       struct wmi_block *wblock;
+       struct wmi_device *wdev;
+       acpi_status status;
 
-       if (!guid)
-               return AE_BAD_PARAMETER;
+       wdev = wmi_find_device_by_guid(guid);
+       if (IS_ERR(wdev))
+               return AE_ERROR;
 
-       if (guid_parse(guid, &guid_input))
-               return AE_BAD_PARAMETER;
+       wblock = container_of(wdev, struct wmi_block, dev);
 
-       list_for_each_entry(block, &wmi_block_list, list) {
-               acpi_status wmi_status;
+       down_write(&wblock->notify_lock);
+       if (!wblock->handler) {
+               status = AE_NULL_ENTRY;
+       } else {
+               if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
+                       dev_warn(&wblock->dev.dev, "Failed to disable device\n");
 
-               if (guid_equal(&block->gblock.guid, &guid_input)) {
-                       if (!block->handler)
-                               return AE_NULL_ENTRY;
+               wblock->handler = NULL;
+               wblock->handler_data = NULL;
 
-                       wmi_status = wmi_method_enable(block, false);
-                       block->handler = NULL;
-                       block->handler_data = NULL;
-                       if (wmi_status != AE_OK || (wmi_status == AE_OK && status == AE_NOT_EXIST))
-                               status = wmi_status;
-               }
+               status = AE_OK;
        }
+       up_write(&wblock->notify_lock);
+
+       wmi_device_put(wdev);
 
        return status;
 }
@@ -655,15 +679,19 @@ EXPORT_SYMBOL_GPL(wmi_remove_notify_handler);
 acpi_status wmi_get_event_data(u32 event, struct acpi_buffer *out)
 {
        struct wmi_block *wblock;
+       struct wmi_device *wdev;
+       acpi_status status;
 
-       list_for_each_entry(wblock, &wmi_block_list, list) {
-               struct guid_block *gblock = &wblock->gblock;
+       wdev = wmi_find_event_by_notify_id(event);
+       if (IS_ERR(wdev))
+               return AE_NOT_FOUND;
 
-               if ((gblock->flags & ACPI_WMI_EVENT) && gblock->notify_id == event)
-                       return get_event_data(wblock, out);
-       }
+       wblock = container_of(wdev, struct wmi_block, dev);
+       status = get_event_data(wblock, out);
 
-       return AE_NOT_FOUND;
+       wmi_device_put(wdev);
+
+       return status;
 }
 EXPORT_SYMBOL_GPL(wmi_get_event_data);
 
@@ -868,7 +896,7 @@ static int wmi_dev_probe(struct device *dev)
        if (wdriver->probe) {
                ret = wdriver->probe(dev_to_wdev(dev),
                                find_guid_context(wblock, wdriver));
-               if (!ret) {
+               if (ret) {
                        if (ACPI_FAILURE(wmi_method_enable(wblock, false)))
                                dev_warn(dev, "Failed to disable device\n");
 
@@ -876,7 +904,9 @@ static int wmi_dev_probe(struct device *dev)
                }
        }
 
-       set_bit(WMI_PROBED, &wblock->flags);
+       down_write(&wblock->notify_lock);
+       wblock->driver_ready = true;
+       up_write(&wblock->notify_lock);
 
        return 0;
 }
@@ -886,7 +916,9 @@ static void wmi_dev_remove(struct device *dev)
        struct wmi_block *wblock = dev_to_wblock(dev);
        struct wmi_driver *wdriver = drv_to_wdrv(dev->driver);
 
-       clear_bit(WMI_PROBED, &wblock->flags);
+       down_write(&wblock->notify_lock);
+       wblock->driver_ready = false;
+       up_write(&wblock->notify_lock);
 
        if (wdriver->remove)
                wdriver->remove(dev_to_wdev(dev));
@@ -999,6 +1031,8 @@ static int wmi_create_device(struct device *wmi_bus_dev,
                wblock->dev.setable = true;
 
  out_init:
+       init_rwsem(&wblock->notify_lock);
+       wblock->driver_ready = false;
        wblock->dev.dev.bus = &wmi_bus_type;
        wblock->dev.dev.parent = wmi_bus_dev;
 
@@ -1171,6 +1205,26 @@ acpi_wmi_ec_space_handler(u32 function, acpi_physical_address address,
        }
 }
 
+static void wmi_notify_driver(struct wmi_block *wblock)
+{
+       struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
+       struct acpi_buffer data = { ACPI_ALLOCATE_BUFFER, NULL };
+       acpi_status status;
+
+       if (!driver->no_notify_data) {
+               status = get_event_data(wblock, &data);
+               if (ACPI_FAILURE(status)) {
+                       dev_warn(&wblock->dev.dev, "Failed to get event data\n");
+                       return;
+               }
+       }
+
+       if (driver->notify)
+               driver->notify(&wblock->dev, data.pointer);
+
+       kfree(data.pointer);
+}
+
 static int wmi_notify_device(struct device *dev, void *data)
 {
        struct wmi_block *wblock = dev_to_wblock(dev);
@@ -1179,28 +1233,17 @@ static int wmi_notify_device(struct device *dev, void *data)
        if (!(wblock->gblock.flags & ACPI_WMI_EVENT && wblock->gblock.notify_id == *event))
                return 0;
 
-       /* If a driver is bound, then notify the driver. */
-       if (test_bit(WMI_PROBED, &wblock->flags) && wblock->dev.dev.driver) {
-               struct wmi_driver *driver = drv_to_wdrv(wblock->dev.dev.driver);
-               struct acpi_buffer evdata = { ACPI_ALLOCATE_BUFFER, NULL };
-               acpi_status status;
-
-               if (!driver->no_notify_data) {
-                       status = get_event_data(wblock, &evdata);
-                       if (ACPI_FAILURE(status)) {
-                               dev_warn(&wblock->dev.dev, "failed to get event data\n");
-                               return -EIO;
-                       }
-               }
-
-               if (driver->notify)
-                       driver->notify(&wblock->dev, evdata.pointer);
-
-               kfree(evdata.pointer);
-       } else if (wblock->handler) {
-               /* Legacy handler */
-               wblock->handler(*event, wblock->handler_data);
+       down_read(&wblock->notify_lock);
+       /* The WMI driver notify handler conflicts with the legacy WMI handler.
+        * Because of this the WMI driver notify handler takes precedence.
+        */
+       if (wblock->dev.dev.driver && wblock->driver_ready) {
+               wmi_notify_driver(wblock);
+       } else {
+               if (wblock->handler)
+                       wblock->handler(*event, wblock->handler_data);
        }
+       up_read(&wblock->notify_lock);
 
        acpi_bus_generate_netlink_event(wblock->acpi_device->pnp.device_class,
                                        dev_name(&wblock->dev.dev), *event, 0);
index 2a50fda3a628c3fdc9daa79d73437de565bc5891..625fd547ee60a79c3a8bae9e985cb74d06b9073f 100644 (file)
@@ -371,7 +371,6 @@ static u16 initio_se2_rd(unsigned long base, u8 addr)
  */
 static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
 {
-       u8 rb;
        u8 instr;
        int i;
 
@@ -400,7 +399,7 @@ static void initio_se2_wr(unsigned long base, u8 addr, u16 val)
                udelay(30);
                outb(SE2CS, base + TUL_NVRAM);                  /* -CLK */
                udelay(30);
-               if ((rb = inb(base + TUL_NVRAM)) & SE2DI)
+               if (inb(base + TUL_NVRAM) & SE2DI)
                        break;  /* write complete */
        }
        outb(0, base + TUL_NVRAM);                              /* -CS */
index 71f711cb0628a70d40efc99520ef2dc807494e75..355a0bc0828e749a45513309b942cfdae4878a7c 100644 (file)
@@ -3387,7 +3387,7 @@ static enum sci_status isci_io_request_build(struct isci_host *ihost,
                return SCI_FAILURE;
        }
 
-       return SCI_SUCCESS;
+       return status;
 }
 
 static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag)
index 79da4b1c1df0adc649954a45f2d630989f12a6d6..4f455884fdc440188124d845ef0535f4da418d22 100644 (file)
@@ -61,11 +61,11 @@ static int scsi_eh_try_stu(struct scsi_cmnd *scmd);
 static enum scsi_disposition scsi_try_to_abort_cmd(const struct scsi_host_template *,
                                                   struct scsi_cmnd *);
 
-void scsi_eh_wakeup(struct Scsi_Host *shost)
+void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy)
 {
        lockdep_assert_held(shost->host_lock);
 
-       if (scsi_host_busy(shost) == shost->host_failed) {
+       if (busy == shost->host_failed) {
                trace_scsi_eh_wakeup(shost);
                wake_up_process(shost->ehandler);
                SCSI_LOG_ERROR_RECOVERY(5, shost_printk(KERN_INFO, shost,
@@ -88,7 +88,7 @@ void scsi_schedule_eh(struct Scsi_Host *shost)
        if (scsi_host_set_state(shost, SHOST_RECOVERY) == 0 ||
            scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY) == 0) {
                shost->host_eh_scheduled++;
-               scsi_eh_wakeup(shost);
+               scsi_eh_wakeup(shost, scsi_host_busy(shost));
        }
 
        spin_unlock_irqrestore(shost->host_lock, flags);
@@ -286,7 +286,7 @@ static void scsi_eh_inc_host_failed(struct rcu_head *head)
 
        spin_lock_irqsave(shost->host_lock, flags);
        shost->host_failed++;
-       scsi_eh_wakeup(shost);
+       scsi_eh_wakeup(shost, scsi_host_busy(shost));
        spin_unlock_irqrestore(shost->host_lock, flags);
 }
 
index cf3864f720930988fbadc77b3c91c77fe2d3bb62..1fb80eae9a63a30787dee6583e2a0cfe5c96f0ce 100644 (file)
@@ -280,7 +280,7 @@ static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
        if (unlikely(scsi_host_in_recovery(shost))) {
                spin_lock_irqsave(shost->host_lock, flags);
                if (shost->host_failed || shost->host_eh_scheduled)
-                       scsi_eh_wakeup(shost);
+                       scsi_eh_wakeup(shost, scsi_host_busy(shost));
                spin_unlock_irqrestore(shost->host_lock, flags);
        }
        rcu_read_unlock();
index 3f0dfb97db6bd1b88755db1fb50dd6e968e385c6..1fbfe1b52c9f1a906ea6b0da7a6b273e2972a903 100644 (file)
@@ -92,7 +92,7 @@ extern void scmd_eh_abort_handler(struct work_struct *work);
 extern enum blk_eh_timer_return scsi_timeout(struct request *req);
 extern int scsi_error_handler(void *host);
 extern enum scsi_disposition scsi_decide_disposition(struct scsi_cmnd *cmd);
-extern void scsi_eh_wakeup(struct Scsi_Host *shost);
+extern void scsi_eh_wakeup(struct Scsi_Host *shost, unsigned int busy);
 extern void scsi_eh_scmd_add(struct scsi_cmnd *);
 void scsi_eh_ready_devs(struct Scsi_Host *shost,
                        struct list_head *work_q,
index a95936b18f695e3ef796098866ea07101e9e346d..7ceb982040a5dfe5d490f9a4bd306e99e5140a53 100644 (file)
@@ -330,6 +330,7 @@ enum storvsc_request_type {
  */
 
 static int storvsc_ringbuffer_size = (128 * 1024);
+static int aligned_ringbuffer_size;
 static u32 max_outstanding_req_per_channel;
 static int storvsc_change_queue_depth(struct scsi_device *sdev, int queue_depth);
 
@@ -687,8 +688,8 @@ static void handle_sc_creation(struct vmbus_channel *new_sc)
        new_sc->next_request_id_callback = storvsc_next_request_id;
 
        ret = vmbus_open(new_sc,
-                        storvsc_ringbuffer_size,
-                        storvsc_ringbuffer_size,
+                        aligned_ringbuffer_size,
+                        aligned_ringbuffer_size,
                         (void *)&props,
                         sizeof(struct vmstorage_channel_properties),
                         storvsc_on_channel_callback, new_sc);
@@ -1973,7 +1974,7 @@ static int storvsc_probe(struct hv_device *device,
        dma_set_min_align_mask(&device->device, HV_HYP_PAGE_SIZE - 1);
 
        stor_device->port_number = host->host_no;
-       ret = storvsc_connect_to_vsp(device, storvsc_ringbuffer_size, is_fc);
+       ret = storvsc_connect_to_vsp(device, aligned_ringbuffer_size, is_fc);
        if (ret)
                goto err_out1;
 
@@ -2164,7 +2165,7 @@ static int storvsc_resume(struct hv_device *hv_dev)
 {
        int ret;
 
-       ret = storvsc_connect_to_vsp(hv_dev, storvsc_ringbuffer_size,
+       ret = storvsc_connect_to_vsp(hv_dev, aligned_ringbuffer_size,
                                     hv_dev_is_fc(hv_dev));
        return ret;
 }
@@ -2198,8 +2199,9 @@ static int __init storvsc_drv_init(void)
         * the ring buffer indices) by the max request size (which is
         * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
         */
+       aligned_ringbuffer_size = VMBUS_RING_SIZE(storvsc_ringbuffer_size);
        max_outstanding_req_per_channel =
-               ((storvsc_ringbuffer_size - PAGE_SIZE) /
+               ((aligned_ringbuffer_size - PAGE_SIZE) /
                ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
                sizeof(struct vstor_packet) + sizeof(u64),
                sizeof(u64)));
index 4cf20be668a6021c6acfae56c19f0914586a7bf6..617eb892f4ad457feb5d4de3d9c1ceb88a010c61 100644 (file)
@@ -188,8 +188,6 @@ static void virtscsi_vq_done(struct virtio_scsi *vscsi,
                while ((buf = virtqueue_get_buf(vq, &len)) != NULL)
                        fn(vscsi, buf);
 
-               if (unlikely(virtqueue_is_broken(vq)))
-                       break;
        } while (!virtqueue_enable_cb(vq));
        spin_unlock_irqrestore(&virtscsi_vq->vq_lock, flags);
 }
index 780199bf351efbfb24422880ef39510c77def68f..49a0955e82d6cf5eef83e5f63ba8d31194c65324 100644 (file)
@@ -296,14 +296,14 @@ struct apple_mbox *apple_mbox_get(struct device *dev, int index)
        of_node_put(args.np);
 
        if (!pdev)
-               return ERR_PTR(EPROBE_DEFER);
+               return ERR_PTR(-EPROBE_DEFER);
 
        mbox = platform_get_drvdata(pdev);
        if (!mbox)
-               return ERR_PTR(EPROBE_DEFER);
+               return ERR_PTR(-EPROBE_DEFER);
 
        if (!device_link_add(dev, &pdev->dev, DL_FLAG_AUTOREMOVE_CONSUMER))
-               return ERR_PTR(ENODEV);
+               return ERR_PTR(-ENODEV);
 
        return mbox;
 }
index d96222e6d7d2d4022753d3120b4c36ea759dad75..cfdaa5eaec76db9b322272b54d4fdfdcff3db697 100644 (file)
@@ -19,7 +19,7 @@
 #include <linux/platform_device.h>
 #include <linux/slab.h>
 #include <linux/spi/spi.h>
-#include <linux/spi/spi-mem.h>
+#include <linux/mtd/spi-nor.h>
 #include <linux/sysfs.h>
 #include <linux/types.h>
 #include "spi-bcm-qspi.h"
@@ -1221,7 +1221,7 @@ static int bcm_qspi_exec_mem_op(struct spi_mem *mem,
 
        /* non-aligned and very short transfers are handled by MSPI */
        if (!IS_ALIGNED((uintptr_t)addr, 4) || !IS_ALIGNED((uintptr_t)buf, 4) ||
-           len < 4)
+           len < 4 || op->cmd.opcode == SPINOR_OP_RDSFDP)
                mspi_read = true;
 
        if (!has_bspi(qspi) || mspi_read)
index a50eb4db79de8e93cb61a9ea50bc8913ed3e4f1f..e5140532071d2b647ab77fa561f27630a334971a 100644 (file)
@@ -317,6 +317,15 @@ static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
        xspi->rx_bytes -= nrx;
 
        while (ntx || nrx) {
+               if (nrx) {
+                       u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
+
+                       if (xspi->rxbuf)
+                               *xspi->rxbuf++ = data;
+
+                       nrx--;
+               }
+
                if (ntx) {
                        if (xspi->txbuf)
                                cdns_spi_write(xspi, CDNS_SPI_TXD, *xspi->txbuf++);
@@ -326,14 +335,6 @@ static void cdns_spi_process_fifo(struct cdns_spi *xspi, int ntx, int nrx)
                        ntx--;
                }
 
-               if (nrx) {
-                       u8 data = cdns_spi_read(xspi, CDNS_SPI_RXD);
-
-                       if (xspi->rxbuf)
-                               *xspi->rxbuf++ = data;
-
-                       nrx--;
-               }
        }
 }
 
index f13073e1259364640b16b57323e5d027c10bdb0f..b24190526ce96420fe885e585b00fb820502bacd 100644 (file)
@@ -244,7 +244,10 @@ static int cs42l43_spi_probe(struct platform_device *pdev)
        priv->ctlr->use_gpio_descriptors = true;
        priv->ctlr->auto_runtime_pm = true;
 
-       devm_pm_runtime_enable(priv->dev);
+       ret = devm_pm_runtime_enable(priv->dev);
+       if (ret)
+               return ret;
+
        pm_runtime_idle(priv->dev);
 
        regmap_write(priv->regmap, CS42L43_TRAN_CONFIG6, CS42L43_FIFO_SIZE - 1);
index 9d22018f7985f11956fae5e06ffb0dbd180914f9..1301d14483d482dcaf05250a563a414db73c9dd4 100644 (file)
@@ -377,6 +377,11 @@ static const struct spi_controller_mem_ops hisi_sfc_v3xx_mem_ops = {
 static irqreturn_t hisi_sfc_v3xx_isr(int irq, void *data)
 {
        struct hisi_sfc_v3xx_host *host = data;
+       u32 reg;
+
+       reg = readl(host->regbase + HISI_SFC_V3XX_INT_STAT);
+       if (!reg)
+               return IRQ_NONE;
 
        hisi_sfc_v3xx_disable_int(host);
 
index 272bc871a848b833e6e673740f4be5f8f3a16294..546cdce525fc5b1b49b305b872e81d2b0aed0cb5 100644 (file)
@@ -1344,7 +1344,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
        controller->dma_tx = dma_request_chan(dev, "tx");
        if (IS_ERR(controller->dma_tx)) {
                ret = PTR_ERR(controller->dma_tx);
-               dev_dbg(dev, "can't get the TX DMA channel, error %d!\n", ret);
+               dev_err_probe(dev, ret, "can't get the TX DMA channel!\n");
                controller->dma_tx = NULL;
                goto err;
        }
@@ -1353,7 +1353,7 @@ static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
        controller->dma_rx = dma_request_chan(dev, "rx");
        if (IS_ERR(controller->dma_rx)) {
                ret = PTR_ERR(controller->dma_rx);
-               dev_dbg(dev, "can't get the RX DMA channel, error %d\n", ret);
+               dev_err_probe(dev, ret, "can't get the RX DMA channel!\n");
                controller->dma_rx = NULL;
                goto err;
        }
index 57d767a68e7b2766dcea5510809cf2f09e0bef63..07d20ca1164c357813e075b7a1a6763da735ab0a 100644 (file)
@@ -76,6 +76,7 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0x7a24), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0x7aa4), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0x7e23), (unsigned long)&cnl_info },
+       { PCI_VDEVICE(INTEL, 0x7f24), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0x9d24), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0x9da4), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0xa0a4), (unsigned long)&cnl_info },
@@ -84,7 +85,6 @@ static const struct pci_device_id intel_spi_pci_ids[] = {
        { PCI_VDEVICE(INTEL, 0xa2a4), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0xa324), (unsigned long)&cnl_info },
        { PCI_VDEVICE(INTEL, 0xa3a4), (unsigned long)&cnl_info },
-       { PCI_VDEVICE(INTEL, 0xae23), (unsigned long)&cnl_info },
        { },
 };
 MODULE_DEVICE_TABLE(pci, intel_spi_pci_ids);
index 7477a11e12be0e2bf47006ce9e579fdd9f1fda30..f2170f4b50775ea175c3d0c1e4a7ef0f809e6a52 100644 (file)
@@ -1717,6 +1717,10 @@ static int __spi_pump_transfer_message(struct spi_controller *ctlr,
                        pm_runtime_put_noidle(ctlr->dev.parent);
                        dev_err(&ctlr->dev, "Failed to power device: %d\n",
                                ret);
+
+                       msg->status = ret;
+                       spi_finalize_current_message(ctlr);
+
                        return ret;
                }
        }
index 5ac5cb60bae67b8caa54d47e0ebb740d6a4505ab..bc6eb0dd66a495f04ae5d0c398611895351c9b2f 100644 (file)
@@ -49,7 +49,6 @@
  */
 #define DEFAULT_DURATION_JIFFIES (6)
 
-static unsigned int target_mwait;
 static struct dentry *debug_dir;
 static bool poll_pkg_cstate_enable;
 
@@ -312,34 +311,6 @@ MODULE_PARM_DESC(window_size, "sliding window in number of clamping cycles\n"
        "\twindow size results in slower response time but more smooth\n"
        "\tclamping results. default to 2.");
 
-static void find_target_mwait(void)
-{
-       unsigned int eax, ebx, ecx, edx;
-       unsigned int highest_cstate = 0;
-       unsigned int highest_subcstate = 0;
-       int i;
-
-       if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
-               return;
-
-       cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
-
-       if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
-           !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
-               return;
-
-       edx >>= MWAIT_SUBSTATE_SIZE;
-       for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
-               if (edx & MWAIT_SUBSTATE_MASK) {
-                       highest_cstate = i;
-                       highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
-               }
-       }
-       target_mwait = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
-               (highest_subcstate - 1);
-
-}
-
 struct pkg_cstate_info {
        bool skip;
        int msr_index;
@@ -759,9 +730,6 @@ static int __init powerclamp_probe(void)
                return -ENODEV;
        }
 
-       /* find the deepest mwait value */
-       find_target_mwait();
-
        return 0;
 }
 
index 63af6ab034b5f1bb45992a4074f8862d528b38d3..1183e7a871f8b270a9ff2106cef15e44720184a4 100644 (file)
@@ -631,8 +631,7 @@ static void fbcon_prepare_logo(struct vc_data *vc, struct fb_info *info,
 
        if (logo_lines > vc->vc_bottom) {
                logo_shown = FBCON_LOGO_CANSHOW;
-               printk(KERN_INFO
-                      "fbcon_init: disable boot-logo (boot-logo bigger than screen).\n");
+               pr_info("fbcon: disable boot-logo (boot-logo bigger than screen).\n");
        } else {
                logo_shown = FBCON_LOGO_DRAW;
                vc->vc_top = logo_lines;
index dddd6afcb972a5c23a5969c2ced0638ccf0b5b34..ebc9aeffdde7c54321b19499715e128d594c0e61 100644 (file)
@@ -869,6 +869,9 @@ static int savagefb_check_var(struct fb_var_screeninfo   *var,
 
        DBG("savagefb_check_var");
 
+       if (!var->pixclock)
+               return -EINVAL;
+
        var->transp.offset = 0;
        var->transp.length = 0;
        switch (var->bits_per_pixel) {
index 803ccb6aa479703bc1cb88237b4c3adc594a75a2..009bf1d926448011292c182e7eee29c25930ed6d 100644 (file)
@@ -1444,6 +1444,8 @@ sisfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
 
        vtotal = var->upper_margin + var->lower_margin + var->vsync_len;
 
+       if (!var->pixclock)
+               return -EINVAL;
        pixclock = var->pixclock;
 
        if((var->vmode & FB_VMODE_MASK) == FB_VMODE_NONINTERLACED) {
index 2de0e675fd1504da67b7110ee81152934ad2cbad..8e5bac27542d915534c3071ec5f64e89727c2c11 100644 (file)
@@ -1158,7 +1158,7 @@ stifb_init_display(struct stifb_info *fb)
            }
            break;
        }
-       stifb_blank(0, (struct fb_info *)fb);   /* 0=enable screen */
+       stifb_blank(0, fb->info);       /* 0=enable screen */
 
        SETUP_FB(fb);
 }
index 42c25dc851976c5fa823b89fc4f72e5826d17459..ac73937073a76f7d22df39a503ac59bda2d4a7da 100644 (file)
@@ -374,7 +374,6 @@ static int vt8500lcd_probe(struct platform_device *pdev)
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
-               dev_err(&pdev->dev, "no IRQ defined\n");
                ret = -ENODEV;
                goto failed_free_palette;
        }
index c14533ef108f191a7209f4f035e084fb8a41b57a..b5b8de521f99b26ba6c9b2fd707fb794a62612ae 100644 (file)
@@ -124,7 +124,7 @@ static void afs_dir_read_cleanup(struct afs_read *req)
                if (xas_retry(&xas, folio))
                        continue;
                BUG_ON(xa_is_value(folio));
-               ASSERTCMP(folio_file_mapping(folio), ==, mapping);
+               ASSERTCMP(folio->mapping, ==, mapping);
 
                folio_put(folio);
        }
@@ -202,12 +202,12 @@ static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
                if (xas_retry(&xas, folio))
                        continue;
 
-               BUG_ON(folio_file_mapping(folio) != mapping);
+               BUG_ON(folio->mapping != mapping);
 
                size = min_t(loff_t, folio_size(folio), req->actual_len - folio_pos(folio));
                for (offset = 0; offset < size; offset += sizeof(*block)) {
                        block = kmap_local_folio(folio, offset);
-                       pr_warn("[%02lx] %32phN\n", folio_index(folio) + offset, block);
+                       pr_warn("[%02lx] %32phN\n", folio->index + offset, block);
                        kunmap_local(block);
                }
        }
@@ -233,7 +233,7 @@ static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req)
                if (xas_retry(&xas, folio))
                        continue;
 
-               BUG_ON(folio_file_mapping(folio) != mapping);
+               BUG_ON(folio->mapping != mapping);
 
                if (!afs_dir_check_folio(dvnode, folio, req->actual_len)) {
                        afs_dir_dump(dvnode, req);
@@ -474,6 +474,14 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
                        continue;
                }
 
+               /* Don't expose silly rename entries to userspace. */
+               if (nlen > 6 &&
+                   dire->u.name[0] == '.' &&
+                   ctx->actor != afs_lookup_filldir &&
+                   ctx->actor != afs_lookup_one_filldir &&
+                   memcmp(dire->u.name, ".__afs", 6) == 0)
+                       continue;
+
                /* found the next entry */
                if (!dir_emit(ctx, dire->u.name, nlen,
                              ntohl(dire->u.vnode),
@@ -708,6 +716,8 @@ static void afs_do_lookup_success(struct afs_operation *op)
                        break;
                }
 
+               if (vp->scb.status.abort_code)
+                       trace_afs_bulkstat_error(op, &vp->fid, i, vp->scb.status.abort_code);
                if (!vp->scb.have_status && !vp->scb.have_error)
                        continue;
 
@@ -897,12 +907,16 @@ static struct inode *afs_do_lookup(struct inode *dir, struct dentry *dentry,
                afs_begin_vnode_operation(op);
                afs_wait_for_operation(op);
        }
-       inode = ERR_PTR(afs_op_error(op));
 
 out_op:
        if (!afs_op_error(op)) {
-               inode = &op->file[1].vnode->netfs.inode;
-               op->file[1].vnode = NULL;
+               if (op->file[1].scb.status.abort_code) {
+                       afs_op_accumulate_error(op, -ECONNABORTED,
+                                               op->file[1].scb.status.abort_code);
+               } else {
+                       inode = &op->file[1].vnode->netfs.inode;
+                       op->file[1].vnode = NULL;
+               }
        }
 
        if (op->file[0].scb.have_status)
@@ -2022,7 +2036,7 @@ static bool afs_dir_release_folio(struct folio *folio, gfp_t gfp_flags)
 {
        struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
 
-       _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio_index(folio));
+       _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio->index);
 
        folio_detach_private(folio);
 
index d3bc4a2d708519624673be4fd0572e9080da732c..c4d2711e20ad4476cabc0d41b4e50e2aad4477e4 100644 (file)
@@ -258,16 +258,7 @@ const struct inode_operations afs_dynroot_inode_operations = {
        .lookup         = afs_dynroot_lookup,
 };
 
-/*
- * Dirs in the dynamic root don't need revalidation.
- */
-static int afs_dynroot_d_revalidate(struct dentry *dentry, unsigned int flags)
-{
-       return 1;
-}
-
 const struct dentry_operations afs_dynroot_dentry_operations = {
-       .d_revalidate   = afs_dynroot_d_revalidate,
        .d_delete       = always_delete_dentry,
        .d_release      = afs_d_release,
        .d_automount    = afs_d_automount,
index 3bd02571f30debca6159756b5abe30e3dd905583..15eab053af6dc05931363c619cd32cf041093a3f 100644 (file)
@@ -166,7 +166,7 @@ static int afs_proc_addr_prefs_show(struct seq_file *m, void *v)
 
        if (!preflist) {
                seq_puts(m, "NO PREFS\n");
-               return 0;
+               goto out;
        }
 
        seq_printf(m, "PROT SUBNET                                      PRIOR (v=%u n=%u/%u/%u)\n",
@@ -191,7 +191,8 @@ static int afs_proc_addr_prefs_show(struct seq_file *m, void *v)
                }
        }
 
-       rcu_read_lock();
+out:
+       rcu_read_unlock();
        return 0;
 }
 
index 10704f2d3af5302f71a931e13bd0ba5432d46fe2..fd3e175d83423261d68124cd26fc0351488ad05e 100644 (file)
@@ -1715,7 +1715,7 @@ static int bch2_discard_one_bucket(struct btree_trans *trans,
                 * This works without any other locks because this is the only
                 * thread that removes items from the need_discard tree
                 */
-               bch2_trans_unlock(trans);
+               bch2_trans_unlock_long(trans);
                blkdev_issue_discard(ca->disk_sb.bdev,
                                     k.k->p.offset * ca->mi.bucket_size,
                                     ca->mi.bucket_size,
index bed75c93c06904e06f70e3afa92cc507a68b81c9..6843974423381029e7a8cf24fd4cd5c6c33627cd 100644 (file)
@@ -92,7 +92,7 @@ static noinline void print_cycle(struct printbuf *out, struct lock_graph *g)
                        continue;
 
                bch2_btree_trans_to_text(out, i->trans);
-               bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1);
+               bch2_prt_task_backtrace(out, task, i == g->g ? 5 : 1, GFP_NOWAIT);
        }
 }
 
@@ -227,7 +227,7 @@ static noinline int break_cycle(struct lock_graph *g, struct printbuf *cycle)
                        prt_printf(&buf, "backtrace:");
                        prt_newline(&buf);
                        printbuf_indent_add(&buf, 2);
-                       bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2);
+                       bch2_prt_task_backtrace(&buf, trans->locking_wait.task, 2, GFP_NOWAIT);
                        printbuf_indent_sub(&buf, 2);
                        prt_newline(&buf);
                }
index cadda9bbe4a4cd67fe3b6f6f7aa5a5d93e496307..7bdba8507fc93cdfdecc29de3e70e5589cf8177b 100644 (file)
@@ -627,7 +627,7 @@ restart:
                prt_printf(&i->buf, "backtrace:");
                prt_newline(&i->buf);
                printbuf_indent_add(&i->buf, 2);
-               bch2_prt_task_backtrace(&i->buf, task, 0);
+               bch2_prt_task_backtrace(&i->buf, task, 0, GFP_KERNEL);
                printbuf_indent_sub(&i->buf, 2);
                prt_newline(&i->buf);
 
index dc52918d06ef3f91c30484822a5a170b08543f9c..8c70123b6a0c809b6d50040593281c2e9c115828 100644 (file)
@@ -79,7 +79,7 @@ void bch2_inode_flush_nocow_writes_async(struct bch_fs *c,
                        continue;
 
                bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev, 0,
-                                                   REQ_OP_FLUSH,
+                                                   REQ_OP_WRITE|REQ_PREFLUSH,
                                                    GFP_KERNEL,
                                                    &c->nocow_flush_bioset),
                                   struct nocow_flush, bio);
index 4f0ecd60567570b7364cef517225ea0e3dfa5575..6a760777bafb06d08b449ee0db4308a77b54b11e 100644 (file)
@@ -119,22 +119,19 @@ static int lookup_inode(struct btree_trans *trans, u64 inode_nr,
        if (!ret)
                *snapshot = iter.pos.snapshot;
 err:
-       bch_err_msg(trans->c, ret, "fetching inode %llu:%u", inode_nr, *snapshot);
        bch2_trans_iter_exit(trans, &iter);
        return ret;
 }
 
-static int __lookup_dirent(struct btree_trans *trans,
+static int lookup_dirent_in_snapshot(struct btree_trans *trans,
                           struct bch_hash_info hash_info,
                           subvol_inum dir, struct qstr *name,
-                          u64 *target, unsigned *type)
+                          u64 *target, unsigned *type, u32 snapshot)
 {
        struct btree_iter iter;
        struct bkey_s_c_dirent d;
-       int ret;
-
-       ret = bch2_hash_lookup(trans, &iter, bch2_dirent_hash_desc,
-                              &hash_info, dir, name, 0);
+       int ret = bch2_hash_lookup_in_snapshot(trans, &iter, bch2_dirent_hash_desc,
+                              &hash_info, dir, name, 0, snapshot);
        if (ret)
                return ret;
 
@@ -225,15 +222,16 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
 
        struct bch_inode_unpacked root_inode;
        struct bch_hash_info root_hash_info;
-       ret = lookup_inode(trans, root_inum.inum, &root_inode, &snapshot);
+       u32 root_inode_snapshot = snapshot;
+       ret = lookup_inode(trans, root_inum.inum, &root_inode, &root_inode_snapshot);
        bch_err_msg(c, ret, "looking up root inode");
        if (ret)
                return ret;
 
        root_hash_info = bch2_hash_info_init(c, &root_inode);
 
-       ret = __lookup_dirent(trans, root_hash_info, root_inum,
-                             &lostfound_str, &inum, &d_type);
+       ret = lookup_dirent_in_snapshot(trans, root_hash_info, root_inum,
+                             &lostfound_str, &inum, &d_type, snapshot);
        if (bch2_err_matches(ret, ENOENT))
                goto create_lostfound;
 
@@ -250,7 +248,10 @@ static int lookup_lostfound(struct btree_trans *trans, u32 snapshot,
         * The bch2_check_dirents pass has already run, dangling dirents
         * shouldn't exist here:
         */
-       return lookup_inode(trans, inum, lostfound, &snapshot);
+       ret = lookup_inode(trans, inum, lostfound, &snapshot);
+       bch_err_msg(c, ret, "looking up lost+found %llu:%u in (root inode %llu, snapshot root %u)",
+                   inum, snapshot, root_inum.inum, bch2_snapshot_root(c, snapshot));
+       return ret;
 
 create_lostfound:
        /*
index d71d26e39521e4410a90cb6bf3e21df360e6c201..bc890776eb57933a5931edd2a2f07570f52b7ab3 100644 (file)
@@ -233,7 +233,7 @@ static void __journal_entry_close(struct journal *j, unsigned closed_val, bool t
                prt_str(&pbuf, "entry size: ");
                prt_human_readable_u64(&pbuf, vstruct_bytes(buf->data));
                prt_newline(&pbuf);
-               bch2_prt_task_backtrace(&pbuf, current, 1);
+               bch2_prt_task_backtrace(&pbuf, current, 1, GFP_NOWAIT);
                trace_journal_entry_close(c, pbuf.buf);
                printbuf_exit(&pbuf);
        }
index 04a1e79a5ed392cd8ebaac922a2516b374a6d094..bfd6585e746da45880da9b5ad8fb502586cbf933 100644 (file)
@@ -1988,7 +1988,8 @@ CLOSURE_CALLBACK(bch2_journal_write)
                        percpu_ref_get(&ca->io_ref);
 
                        bio = ca->journal.bio;
-                       bio_reset(bio, ca->disk_sb.bdev, REQ_OP_FLUSH);
+                       bio_reset(bio, ca->disk_sb.bdev,
+                                 REQ_OP_WRITE|REQ_PREFLUSH);
                        bio->bi_end_io          = journal_write_endio;
                        bio->bi_private         = ca;
                        closure_bio_submit(bio, cl);
index 89fdb7c21134ebbb6c145a88ed5b1943ab54588a..fcaa5a888744881a4f6c37dd77fbd8cf73b2f4d0 100644 (file)
@@ -160,21 +160,16 @@ static inline bool is_visible_key(struct bch_hash_desc desc, subvol_inum inum, s
 }
 
 static __always_inline int
-bch2_hash_lookup(struct btree_trans *trans,
+bch2_hash_lookup_in_snapshot(struct btree_trans *trans,
                 struct btree_iter *iter,
                 const struct bch_hash_desc desc,
                 const struct bch_hash_info *info,
                 subvol_inum inum, const void *key,
-                unsigned flags)
+                unsigned flags, u32 snapshot)
 {
        struct bkey_s_c k;
-       u32 snapshot;
        int ret;
 
-       ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
-       if (ret)
-               return ret;
-
        for_each_btree_key_upto_norestart(trans, *iter, desc.btree_id,
                           SPOS(inum.inum, desc.hash_key(info, key), snapshot),
                           POS(inum.inum, U64_MAX),
@@ -194,6 +189,19 @@ bch2_hash_lookup(struct btree_trans *trans,
        return ret ?: -BCH_ERR_ENOENT_str_hash_lookup;
 }
 
+static __always_inline int
+bch2_hash_lookup(struct btree_trans *trans,
+                struct btree_iter *iter,
+                const struct bch_hash_desc desc,
+                const struct bch_hash_info *info,
+                subvol_inum inum, const void *key,
+                unsigned flags)
+{
+       u32 snapshot;
+       return  bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot) ?:
+               bch2_hash_lookup_in_snapshot(trans, iter, desc, info, inum, key, flags, snapshot);
+}
+
 static __always_inline int
 bch2_hash_hole(struct btree_trans *trans,
               struct btree_iter *iter,
index a135136adeee355cb8854482e85b0c85e6c1b8f8..56b815fd9fc6ee5a541aa8e7007f3c00025c493d 100644 (file)
@@ -272,14 +272,14 @@ void bch2_print_string_as_lines(const char *prefix, const char *lines)
        console_unlock();
 }
 
-int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigned skipnr)
+int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *task, unsigned skipnr,
+                       gfp_t gfp)
 {
 #ifdef CONFIG_STACKTRACE
        unsigned nr_entries = 0;
-       int ret = 0;
 
        stack->nr = 0;
-       ret = darray_make_room(stack, 32);
+       int ret = darray_make_room_gfp(stack, 32, gfp);
        if (ret)
                return ret;
 
@@ -308,10 +308,10 @@ void bch2_prt_backtrace(struct printbuf *out, bch_stacktrace *stack)
        }
 }
 
-int bch2_prt_task_backtrace(struct printbuf *out, struct task_struct *task, unsigned skipnr)
+int bch2_prt_task_backtrace(struct printbuf *out, struct task_struct *task, unsigned skipnr, gfp_t gfp)
 {
        bch_stacktrace stack = { 0 };
-       int ret = bch2_save_backtrace(&stack, task, skipnr + 1);
+       int ret = bch2_save_backtrace(&stack, task, skipnr + 1, gfp);
 
        bch2_prt_backtrace(out, &stack);
        darray_exit(&stack);
index df67bf55fe2bc2d74265eb8a52fe6d22fca2fd2f..b414736d59a5b36d1344657eaeb6de6113ec5a09 100644 (file)
@@ -348,9 +348,9 @@ void bch2_prt_u64_base2(struct printbuf *, u64);
 void bch2_print_string_as_lines(const char *prefix, const char *lines);
 
 typedef DARRAY(unsigned long) bch_stacktrace;
-int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned);
+int bch2_save_backtrace(bch_stacktrace *stack, struct task_struct *, unsigned, gfp_t);
 void bch2_prt_backtrace(struct printbuf *, bch_stacktrace *);
-int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *, unsigned);
+int bch2_prt_task_backtrace(struct printbuf *, struct task_struct *, unsigned, gfp_t);
 
 static inline void prt_bdevname(struct printbuf *out, struct block_device *bdev)
 {
index 193168214eeb17fc8a8a9cff3942eb3f68958e1b..68345f73d429aa2d4537ef620a0048e61c4eb7a8 100644 (file)
@@ -141,16 +141,16 @@ static int compression_decompress_bio(struct list_head *ws,
 }
 
 static int compression_decompress(int type, struct list_head *ws,
-               const u8 *data_in, struct page *dest_page,
-               unsigned long start_byte, size_t srclen, size_t destlen)
+               const u8 *data_in, struct page *dest_page,
+               unsigned long dest_pgoff, size_t srclen, size_t destlen)
 {
        switch (type) {
        case BTRFS_COMPRESS_ZLIB: return zlib_decompress(ws, data_in, dest_page,
-                                               start_byte, srclen, destlen);
+                                               dest_pgoff, srclen, destlen);
        case BTRFS_COMPRESS_LZO:  return lzo_decompress(ws, data_in, dest_page,
-                                               start_byte, srclen, destlen);
+                                               dest_pgoff, srclen, destlen);
        case BTRFS_COMPRESS_ZSTD: return zstd_decompress(ws, data_in, dest_page,
-                                               start_byte, srclen, destlen);
+                                               dest_pgoff, srclen, destlen);
        case BTRFS_COMPRESS_NONE:
        default:
                /*
@@ -1037,14 +1037,23 @@ static int btrfs_decompress_bio(struct compressed_bio *cb)
  * start_byte tells us the offset into the compressed data we're interested in
  */
 int btrfs_decompress(int type, const u8 *data_in, struct page *dest_page,
-                    unsigned long start_byte, size_t srclen, size_t destlen)
+                    unsigned long dest_pgoff, size_t srclen, size_t destlen)
 {
+       struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb);
        struct list_head *workspace;
+       const u32 sectorsize = fs_info->sectorsize;
        int ret;
 
+       /*
+        * The full destination page range should not exceed the page size.
+        * And the @destlen should not exceed sectorsize, as this is only called for
+        * inline file extents, which should not exceed sectorsize.
+        */
+       ASSERT(dest_pgoff + destlen <= PAGE_SIZE && destlen <= sectorsize);
+
        workspace = get_workspace(type, 0);
        ret = compression_decompress(type, workspace, data_in, dest_page,
-                                    start_byte, srclen, destlen);
+                                    dest_pgoff, srclen, destlen);
        put_workspace(type, workspace);
 
        return ret;
index 93cc92974deee4cebb4fd25d38118f2c046e1840..afd7e50d073d4ac743c924b70e7e1734af2f6ffc 100644 (file)
@@ -148,7 +148,7 @@ int zlib_compress_pages(struct list_head *ws, struct address_space *mapping,
                unsigned long *total_in, unsigned long *total_out);
 int zlib_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
 int zlib_decompress(struct list_head *ws, const u8 *data_in,
-               struct page *dest_page, unsigned long start_byte, size_t srclen,
+               struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
                size_t destlen);
 struct list_head *zlib_alloc_workspace(unsigned int level);
 void zlib_free_workspace(struct list_head *ws);
@@ -159,7 +159,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
                unsigned long *total_in, unsigned long *total_out);
 int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb);
 int lzo_decompress(struct list_head *ws, const u8 *data_in,
-               struct page *dest_page, unsigned long start_byte, size_t srclen,
+               struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
                size_t destlen);
 struct list_head *lzo_alloc_workspace(unsigned int level);
 void lzo_free_workspace(struct list_head *ws);
index f396aba92c579641d1cce38b48e7e7cd4febc510..8e8cc11112772dfd020217e30d74fe138c3151ca 100644 (file)
@@ -1260,7 +1260,8 @@ static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
        u64 bytes_left, end;
        u64 aligned_start = ALIGN(start, 1 << SECTOR_SHIFT);
 
-       if (WARN_ON(start != aligned_start)) {
+       /* Adjust the range to be aligned to 512B sectors if necessary. */
+       if (start != aligned_start) {
                len -= aligned_start - start;
                len = round_down(len, 1 << SECTOR_SHIFT);
                start = aligned_start;
@@ -4298,6 +4299,42 @@ static int prepare_allocation_clustered(struct btrfs_fs_info *fs_info,
        return 0;
 }
 
+static int prepare_allocation_zoned(struct btrfs_fs_info *fs_info,
+                                   struct find_free_extent_ctl *ffe_ctl)
+{
+       if (ffe_ctl->for_treelog) {
+               spin_lock(&fs_info->treelog_bg_lock);
+               if (fs_info->treelog_bg)
+                       ffe_ctl->hint_byte = fs_info->treelog_bg;
+               spin_unlock(&fs_info->treelog_bg_lock);
+       } else if (ffe_ctl->for_data_reloc) {
+               spin_lock(&fs_info->relocation_bg_lock);
+               if (fs_info->data_reloc_bg)
+                       ffe_ctl->hint_byte = fs_info->data_reloc_bg;
+               spin_unlock(&fs_info->relocation_bg_lock);
+       } else if (ffe_ctl->flags & BTRFS_BLOCK_GROUP_DATA) {
+               struct btrfs_block_group *block_group;
+
+               spin_lock(&fs_info->zone_active_bgs_lock);
+               list_for_each_entry(block_group, &fs_info->zone_active_bgs, active_bg_list) {
+                       /*
+                        * No lock is OK here because avail is monotinically
+                        * decreasing, and this is just a hint.
+                        */
+                       u64 avail = block_group->zone_capacity - block_group->alloc_offset;
+
+                       if (block_group_bits(block_group, ffe_ctl->flags) &&
+                           avail >= ffe_ctl->num_bytes) {
+                               ffe_ctl->hint_byte = block_group->start;
+                               break;
+                       }
+               }
+               spin_unlock(&fs_info->zone_active_bgs_lock);
+       }
+
+       return 0;
+}
+
 static int prepare_allocation(struct btrfs_fs_info *fs_info,
                              struct find_free_extent_ctl *ffe_ctl,
                              struct btrfs_space_info *space_info,
@@ -4308,19 +4345,7 @@ static int prepare_allocation(struct btrfs_fs_info *fs_info,
                return prepare_allocation_clustered(fs_info, ffe_ctl,
                                                    space_info, ins);
        case BTRFS_EXTENT_ALLOC_ZONED:
-               if (ffe_ctl->for_treelog) {
-                       spin_lock(&fs_info->treelog_bg_lock);
-                       if (fs_info->treelog_bg)
-                               ffe_ctl->hint_byte = fs_info->treelog_bg;
-                       spin_unlock(&fs_info->treelog_bg_lock);
-               }
-               if (ffe_ctl->for_data_reloc) {
-                       spin_lock(&fs_info->relocation_bg_lock);
-                       if (fs_info->data_reloc_bg)
-                               ffe_ctl->hint_byte = fs_info->data_reloc_bg;
-                       spin_unlock(&fs_info->relocation_bg_lock);
-               }
-               return 0;
+               return prepare_allocation_zoned(fs_info, ffe_ctl);
        default:
                BUG();
        }
index 809b11472a806c92ef9ad4454d354a9460a51b7b..1eb93d3962aac4608cda0255ea31d7e53dbc8da2 100644 (file)
@@ -4458,6 +4458,8 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
        u64 root_flags;
        int ret;
 
+       down_write(&fs_info->subvol_sem);
+
        /*
         * Don't allow to delete a subvolume with send in progress. This is
         * inside the inode lock so the error handling that has to drop the bit
@@ -4469,25 +4471,25 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
                btrfs_warn(fs_info,
                           "attempt to delete subvolume %llu during send",
                           dest->root_key.objectid);
-               return -EPERM;
+               ret = -EPERM;
+               goto out_up_write;
        }
        if (atomic_read(&dest->nr_swapfiles)) {
                spin_unlock(&dest->root_item_lock);
                btrfs_warn(fs_info,
                           "attempt to delete subvolume %llu with active swapfile",
                           root->root_key.objectid);
-               return -EPERM;
+               ret = -EPERM;
+               goto out_up_write;
        }
        root_flags = btrfs_root_flags(&dest->root_item);
        btrfs_set_root_flags(&dest->root_item,
                             root_flags | BTRFS_ROOT_SUBVOL_DEAD);
        spin_unlock(&dest->root_item_lock);
 
-       down_write(&fs_info->subvol_sem);
-
        ret = may_destroy_subvol(dest);
        if (ret)
-               goto out_up_write;
+               goto out_undead;
 
        btrfs_init_block_rsv(&block_rsv, BTRFS_BLOCK_RSV_TEMP);
        /*
@@ -4497,7 +4499,7 @@ int btrfs_delete_subvolume(struct btrfs_inode *dir, struct dentry *dentry)
         */
        ret = btrfs_subvolume_reserve_metadata(root, &block_rsv, 5, true);
        if (ret)
-               goto out_up_write;
+               goto out_undead;
 
        trans = btrfs_start_transaction(root, 0);
        if (IS_ERR(trans)) {
@@ -4563,15 +4565,17 @@ out_end_trans:
        inode->i_flags |= S_DEAD;
 out_release:
        btrfs_subvolume_release_metadata(root, &block_rsv);
-out_up_write:
-       up_write(&fs_info->subvol_sem);
+out_undead:
        if (ret) {
                spin_lock(&dest->root_item_lock);
                root_flags = btrfs_root_flags(&dest->root_item);
                btrfs_set_root_flags(&dest->root_item,
                                root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
                spin_unlock(&dest->root_item_lock);
-       } else {
+       }
+out_up_write:
+       up_write(&fs_info->subvol_sem);
+       if (!ret) {
                d_invalidate(dentry);
                btrfs_prune_dentries(dest);
                ASSERT(dest->send_in_progress == 0);
index 41b479861b3c767bb582920db56ea442c8f7f381..dfed9dd9c2d75b8205531b030c220b42820e77ce 100644 (file)
@@ -790,6 +790,9 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
                return -EOPNOTSUPP;
        }
 
+       if (btrfs_root_refs(&root->root_item) == 0)
+               return -ENOENT;
+
        if (!test_bit(BTRFS_ROOT_SHAREABLE, &root->state))
                return -EINVAL;
 
@@ -2608,6 +2611,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp)
                                ret = -EFAULT;
                                goto out;
                        }
+                       if (range.flags & ~BTRFS_DEFRAG_RANGE_FLAGS_SUPP) {
+                               ret = -EOPNOTSUPP;
+                               goto out;
+                       }
                        /* compression requires us to start the IO */
                        if ((range.flags & BTRFS_DEFRAG_RANGE_COMPRESS)) {
                                range.flags |= BTRFS_DEFRAG_RANGE_START_IO;
index 1131d5a29d612ee50e14c488b1812a0657c259f1..e43bc0fdc74ec9b0224568928b31e0ca10c77805 100644 (file)
@@ -425,16 +425,16 @@ int lzo_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
 }
 
 int lzo_decompress(struct list_head *ws, const u8 *data_in,
-               struct page *dest_page, unsigned long start_byte, size_t srclen,
+               struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
                size_t destlen)
 {
        struct workspace *workspace = list_entry(ws, struct workspace, list);
+       struct btrfs_fs_info *fs_info = btrfs_sb(dest_page->mapping->host->i_sb);
+       const u32 sectorsize = fs_info->sectorsize;
        size_t in_len;
        size_t out_len;
        size_t max_segment_len = WORKSPACE_BUF_LENGTH;
        int ret = 0;
-       char *kaddr;
-       unsigned long bytes;
 
        if (srclen < LZO_LEN || srclen > max_segment_len + LZO_LEN * 2)
                return -EUCLEAN;
@@ -451,7 +451,7 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
        }
        data_in += LZO_LEN;
 
-       out_len = PAGE_SIZE;
+       out_len = sectorsize;
        ret = lzo1x_decompress_safe(data_in, in_len, workspace->buf, &out_len);
        if (ret != LZO_E_OK) {
                pr_warn("BTRFS: decompress failed!\n");
@@ -459,29 +459,13 @@ int lzo_decompress(struct list_head *ws, const u8 *data_in,
                goto out;
        }
 
-       if (out_len < start_byte) {
+       ASSERT(out_len <= sectorsize);
+       memcpy_to_page(dest_page, dest_pgoff, workspace->buf, out_len);
+       /* Early end, considered as an error. */
+       if (unlikely(out_len < destlen)) {
                ret = -EIO;
-               goto out;
+               memzero_page(dest_page, dest_pgoff + out_len, destlen - out_len);
        }
-
-       /*
-        * the caller is already checking against PAGE_SIZE, but lets
-        * move this check closer to the memcpy/memset
-        */
-       destlen = min_t(unsigned long, destlen, PAGE_SIZE);
-       bytes = min_t(unsigned long, destlen, out_len - start_byte);
-
-       kaddr = kmap_local_page(dest_page);
-       memcpy(kaddr, workspace->buf + start_byte, bytes);
-
-       /*
-        * btrfs_getblock is doing a zero on the tail of the page too,
-        * but this will cover anything missing from the decompressed
-        * data.
-        */
-       if (bytes < destlen)
-               memset(kaddr+bytes, 0, destlen-bytes);
-       kunmap_local(kaddr);
 out:
        return ret;
 }
index 6486f0d7e9931b4fafbc03ddc5ddca0863679d7a..8c4fc98ca9ce7de055841a06e43863eeb6b960e0 100644 (file)
@@ -889,8 +889,10 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
 out_unlock:
        spin_unlock(&fs_info->ref_verify_lock);
 out:
-       if (ret)
+       if (ret) {
+               btrfs_free_ref_cache(fs_info);
                btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
+       }
        return ret;
 }
 
@@ -1021,8 +1023,8 @@ int btrfs_build_ref_tree(struct btrfs_fs_info *fs_info)
                }
        }
        if (ret) {
-               btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
                btrfs_free_ref_cache(fs_info);
+               btrfs_clear_opt(fs_info->mount_opt, REF_VERIFY);
        }
        btrfs_free_path(path);
        return ret;
index a01807cbd4d44e4127c798e470cef51d8bfa13e6..0123d272892373b3465c942e75e181d3bc77e681 100644 (file)
@@ -1098,12 +1098,22 @@ out:
 static void scrub_read_endio(struct btrfs_bio *bbio)
 {
        struct scrub_stripe *stripe = bbio->private;
+       struct bio_vec *bvec;
+       int sector_nr = calc_sector_number(stripe, bio_first_bvec_all(&bbio->bio));
+       int num_sectors;
+       u32 bio_size = 0;
+       int i;
+
+       ASSERT(sector_nr < stripe->nr_sectors);
+       bio_for_each_bvec_all(bvec, &bbio->bio, i)
+               bio_size += bvec->bv_len;
+       num_sectors = bio_size >> stripe->bg->fs_info->sectorsize_bits;
 
        if (bbio->bio.bi_status) {
-               bitmap_set(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
-               bitmap_set(&stripe->error_bitmap, 0, stripe->nr_sectors);
+               bitmap_set(&stripe->io_error_bitmap, sector_nr, num_sectors);
+               bitmap_set(&stripe->error_bitmap, sector_nr, num_sectors);
        } else {
-               bitmap_clear(&stripe->io_error_bitmap, 0, stripe->nr_sectors);
+               bitmap_clear(&stripe->io_error_bitmap, sector_nr, num_sectors);
        }
        bio_put(&bbio->bio);
        if (atomic_dec_and_test(&stripe->pending_io)) {
@@ -1636,6 +1646,9 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
 {
        struct btrfs_fs_info *fs_info = stripe->bg->fs_info;
        struct btrfs_bio *bbio = NULL;
+       unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
+                                     stripe->bg->length - stripe->logical) >>
+                                 fs_info->sectorsize_bits;
        u64 stripe_len = BTRFS_STRIPE_LEN;
        int mirror = stripe->mirror_num;
        int i;
@@ -1646,6 +1659,10 @@ static void scrub_submit_extent_sector_read(struct scrub_ctx *sctx,
                struct page *page = scrub_stripe_get_page(stripe, i);
                unsigned int pgoff = scrub_stripe_get_page_offset(stripe, i);
 
+               /* We're beyond the chunk boundary, no need to read anymore. */
+               if (i >= nr_sectors)
+                       break;
+
                /* The current sector cannot be merged, submit the bio. */
                if (bbio &&
                    ((i > 0 &&
@@ -1701,6 +1718,9 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
 {
        struct btrfs_fs_info *fs_info = sctx->fs_info;
        struct btrfs_bio *bbio;
+       unsigned int nr_sectors = min(BTRFS_STRIPE_LEN, stripe->bg->start +
+                                     stripe->bg->length - stripe->logical) >>
+                                 fs_info->sectorsize_bits;
        int mirror = stripe->mirror_num;
 
        ASSERT(stripe->bg);
@@ -1715,14 +1735,16 @@ static void scrub_submit_initial_read(struct scrub_ctx *sctx,
        bbio = btrfs_bio_alloc(SCRUB_STRIPE_PAGES, REQ_OP_READ, fs_info,
                               scrub_read_endio, stripe);
 
-       /* Read the whole stripe. */
        bbio->bio.bi_iter.bi_sector = stripe->logical >> SECTOR_SHIFT;
-       for (int i = 0; i < BTRFS_STRIPE_LEN >> PAGE_SHIFT; i++) {
+       /* Read the whole range inside the chunk boundary. */
+       for (unsigned int cur = 0; cur < nr_sectors; cur++) {
+               struct page *page = scrub_stripe_get_page(stripe, cur);
+               unsigned int pgoff = scrub_stripe_get_page_offset(stripe, cur);
                int ret;
 
-               ret = bio_add_page(&bbio->bio, stripe->pages[i], PAGE_SIZE, 0);
+               ret = bio_add_page(&bbio->bio, page, fs_info->sectorsize, pgoff);
                /* We should have allocated enough bio vectors. */
-               ASSERT(ret == PAGE_SIZE);
+               ASSERT(ret == fs_info->sectorsize);
        }
        atomic_inc(&stripe->pending_io);
 
index 4e36550618e580044fb0b0d573ddfee196cdca5d..2d7519a6ce72d3c58e70b1cb567258e642604a87 100644 (file)
@@ -8205,8 +8205,8 @@ long btrfs_ioctl_send(struct inode *inode, struct btrfs_ioctl_send_args *arg)
                goto out;
        }
 
-       sctx->clone_roots = kvcalloc(sizeof(*sctx->clone_roots),
-                                    arg->clone_sources_count + 1,
+       sctx->clone_roots = kvcalloc(arg->clone_sources_count + 1,
+                                    sizeof(*sctx->clone_roots),
                                     GFP_KERNEL);
        if (!sctx->clone_roots) {
                ret = -ENOMEM;
index 93511d54abf8280bc6778a17b5fa75a28d3585c1..0e49dab8dad2480243f4d32e6ee934c0f2b35b67 100644 (file)
@@ -475,7 +475,8 @@ void btrfs_subpage_set_writeback(const struct btrfs_fs_info *fs_info,
 
        spin_lock_irqsave(&subpage->lock, flags);
        bitmap_set(subpage->bitmaps, start_bit, len >> fs_info->sectorsize_bits);
-       folio_start_writeback(folio);
+       if (!folio_test_writeback(folio))
+               folio_start_writeback(folio);
        spin_unlock_irqrestore(&subpage->lock, flags);
 }
 
index 896acfda17895150ff501960dd72f084c542301e..101f786963d4d7712baab28c912226fb741c0c9b 100644 (file)
@@ -1457,6 +1457,14 @@ static int btrfs_reconfigure(struct fs_context *fc)
 
        btrfs_info_to_ctx(fs_info, &old_ctx);
 
+       /*
+        * This is our "bind mount" trick, we don't want to allow the user to do
+        * anything other than mount a different ro/rw and a different subvol,
+        * all of the mount options should be maintained.
+        */
+       if (mount_reconfigure)
+               ctx->mount_opt = old_ctx.mount_opt;
+
        sync_filesystem(sb);
        set_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
 
index 50fdc69fdddf9d26014a65ed73c13fe694d05e4b..6eccf8496486c0630cd85c90ca813170f08e6eb5 100644 (file)
@@ -1436,7 +1436,7 @@ static int check_extent_item(struct extent_buffer *leaf,
                if (unlikely(ptr + btrfs_extent_inline_ref_size(inline_type) > end)) {
                        extent_err(leaf, slot,
 "inline ref item overflows extent item, ptr %lu iref size %u end %lu",
-                                  ptr, inline_type, end);
+                                  ptr, btrfs_extent_inline_ref_size(inline_type), end);
                        return -EUCLEAN;
                }
 
index 4c32497311d2ff6ba28fc9ac5ba8dd5b8f835a66..d67785be2c778c6611d639dcbdcffffec4c513c2 100644 (file)
@@ -3087,7 +3087,6 @@ struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
        map = btrfs_find_chunk_map(fs_info, logical, length);
 
        if (unlikely(!map)) {
-               read_unlock(&fs_info->mapping_tree_lock);
                btrfs_crit(fs_info,
                           "unable to find chunk map for logical %llu length %llu",
                           logical, length);
@@ -3095,7 +3094,6 @@ struct btrfs_chunk_map *btrfs_get_chunk_map(struct btrfs_fs_info *fs_info,
        }
 
        if (unlikely(map->start > logical || map->start + map->chunk_len <= logical)) {
-               read_unlock(&fs_info->mapping_tree_lock);
                btrfs_crit(fs_info,
                           "found a bad chunk map, wanted %llu-%llu, found %llu-%llu",
                           logical, logical + length, map->start,
index 36cf1f0e338e2f59d736aaeb1001e00e8eaddaa3..8da66ea699e8febfdef6cc189c5917d22628265d 100644 (file)
@@ -354,18 +354,13 @@ done:
 }
 
 int zlib_decompress(struct list_head *ws, const u8 *data_in,
-               struct page *dest_page, unsigned long start_byte, size_t srclen,
+               struct page *dest_page, unsigned long dest_pgoff, size_t srclen,
                size_t destlen)
 {
        struct workspace *workspace = list_entry(ws, struct workspace, list);
        int ret = 0;
        int wbits = MAX_WBITS;
-       unsigned long bytes_left;
-       unsigned long total_out = 0;
-       unsigned long pg_offset = 0;
-
-       destlen = min_t(unsigned long, destlen, PAGE_SIZE);
-       bytes_left = destlen;
+       unsigned long to_copy;
 
        workspace->strm.next_in = data_in;
        workspace->strm.avail_in = srclen;
@@ -390,60 +385,30 @@ int zlib_decompress(struct list_head *ws, const u8 *data_in,
                return -EIO;
        }
 
-       while (bytes_left > 0) {
-               unsigned long buf_start;
-               unsigned long buf_offset;
-               unsigned long bytes;
-
-               ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
-               if (ret != Z_OK && ret != Z_STREAM_END)
-                       break;
-
-               buf_start = total_out;
-               total_out = workspace->strm.total_out;
-
-               if (total_out == buf_start) {
-                       ret = -EIO;
-                       break;
-               }
-
-               if (total_out <= start_byte)
-                       goto next;
-
-               if (total_out > start_byte && buf_start < start_byte)
-                       buf_offset = start_byte - buf_start;
-               else
-                       buf_offset = 0;
-
-               bytes = min(PAGE_SIZE - pg_offset,
-                           PAGE_SIZE - (buf_offset % PAGE_SIZE));
-               bytes = min(bytes, bytes_left);
+       /*
+        * Everything (in/out buf) should be at most one sector, there should
+        * be no need to switch any input/output buffer.
+        */
+       ret = zlib_inflate(&workspace->strm, Z_FINISH);
+       to_copy = min(workspace->strm.total_out, destlen);
+       if (ret != Z_STREAM_END)
+               goto out;
 
-               memcpy_to_page(dest_page, pg_offset,
-                              workspace->buf + buf_offset, bytes);
+       memcpy_to_page(dest_page, dest_pgoff, workspace->buf, to_copy);
 
-               pg_offset += bytes;
-               bytes_left -= bytes;
-next:
-               workspace->strm.next_out = workspace->buf;
-               workspace->strm.avail_out = workspace->buf_size;
-       }
-
-       if (ret != Z_STREAM_END && bytes_left != 0)
+out:
+       if (unlikely(to_copy != destlen)) {
+               pr_warn_ratelimited("BTRFS: infalte failed, decompressed=%lu expected=%zu\n",
+                                       to_copy, destlen);
                ret = -EIO;
-       else
+       } else {
                ret = 0;
+       }
 
        zlib_inflateEnd(&workspace->strm);
 
-       /*
-        * this should only happen if zlib returned fewer bytes than we
-        * expected.  btrfs_get_block is responsible for zeroing from the
-        * end of the inline extent (destlen) to the end of the page
-        */
-       if (pg_offset < destlen) {
-               memzero_page(dest_page, pg_offset, destlen - pg_offset);
-       }
+       if (unlikely(to_copy < destlen))
+               memzero_page(dest_page, dest_pgoff + to_copy, destlen - to_copy);
        return ret;
 }
 
index 5bd76813b23f065fdf670bf8fe3fbd59ee0c88d9..168af9d000d168324fcc8355781517ddeedeefd1 100644 (file)
@@ -2055,6 +2055,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
 
        map = block_group->physical_map;
 
+       spin_lock(&fs_info->zone_active_bgs_lock);
        spin_lock(&block_group->lock);
        if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
                ret = true;
@@ -2067,7 +2068,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
                goto out_unlock;
        }
 
-       spin_lock(&fs_info->zone_active_bgs_lock);
        for (i = 0; i < map->num_stripes; i++) {
                struct btrfs_zoned_device_info *zinfo;
                int reserved = 0;
@@ -2087,20 +2087,17 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
                 */
                if (atomic_read(&zinfo->active_zones_left) <= reserved) {
                        ret = false;
-                       spin_unlock(&fs_info->zone_active_bgs_lock);
                        goto out_unlock;
                }
 
                if (!btrfs_dev_set_active_zone(device, physical)) {
                        /* Cannot activate the zone */
                        ret = false;
-                       spin_unlock(&fs_info->zone_active_bgs_lock);
                        goto out_unlock;
                }
                if (!is_data)
                        zinfo->reserved_active_zones--;
        }
-       spin_unlock(&fs_info->zone_active_bgs_lock);
 
        /* Successfully activated all the zones */
        set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
@@ -2108,8 +2105,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
 
        /* For the active block group list */
        btrfs_get_block_group(block_group);
-
-       spin_lock(&fs_info->zone_active_bgs_lock);
        list_add_tail(&block_group->active_bg_list, &fs_info->zone_active_bgs);
        spin_unlock(&fs_info->zone_active_bgs_lock);
 
@@ -2117,6 +2112,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
 
 out_unlock:
        spin_unlock(&block_group->lock);
+       spin_unlock(&fs_info->zone_active_bgs_lock);
        return ret;
 }
 
index 5fd74ec60befc6cb192e8102a14e87de2b45bb87..4ba42f1fa3b4077b04735282354de250e70fe87d 100644 (file)
@@ -539,6 +539,9 @@ int cachefiles_ondemand_init_object(struct cachefiles_object *object)
        struct fscache_volume *volume = object->volume->vcookie;
        size_t volume_key_size, cookie_key_size, data_len;
 
+       if (!object->ondemand)
+               return 0;
+
        /*
         * CacheFiles will firstly check the cache file under the root cache
         * directory. If the coherency check failed, it will fallback to
index 279933e007d21798549df035b4aa595597f225b6..7cc5841577b240f90f9a623e64adc87c3fb24982 100644 (file)
 struct z_erofs_decompress_req {
        struct super_block *sb;
        struct page **in, **out;
-
        unsigned short pageofs_in, pageofs_out;
        unsigned int inputsize, outputsize;
 
-       /* indicate the algorithm will be used for decompression */
-       unsigned int alg;
+       unsigned int alg;       /* the algorithm for decompression */
        bool inplace_io, partial_decoding, fillgaps;
+       gfp_t gfp;      /* allocation flags for extra temporary buffers */
 };
 
 struct z_erofs_decompressor {
index 072ef6a66823ef351923f2c0514c9ddec50e5d8f..d4cee95af14c7490e85706589853059b99b7e688 100644 (file)
@@ -111,8 +111,9 @@ static int z_erofs_lz4_prepare_dstpages(struct z_erofs_lz4_decompress_ctx *ctx,
                        victim = availables[--top];
                        get_page(victim);
                } else {
-                       victim = erofs_allocpage(pagepool,
-                                                GFP_KERNEL | __GFP_NOFAIL);
+                       victim = erofs_allocpage(pagepool, rq->gfp);
+                       if (!victim)
+                               return -ENOMEM;
                        set_page_private(victim, Z_EROFS_SHORTLIVED_PAGE);
                }
                rq->out[i] = victim;
index 4a64a9c91dd322379d2c4be2268f6c4c24f995ee..b98872058abe82d4034b84c1c93c46645b50968b 100644 (file)
@@ -95,7 +95,7 @@ int z_erofs_load_deflate_config(struct super_block *sb,
 }
 
 int z_erofs_deflate_decompress(struct z_erofs_decompress_req *rq,
-                              struct page **pagepool)
+                              struct page **pgpl)
 {
        const unsigned int nrpages_out =
                PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
@@ -158,8 +158,12 @@ again:
                        strm->z.avail_out = min_t(u32, outsz, PAGE_SIZE - pofs);
                        outsz -= strm->z.avail_out;
                        if (!rq->out[no]) {
-                               rq->out[no] = erofs_allocpage(pagepool,
-                                               GFP_KERNEL | __GFP_NOFAIL);
+                               rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
+                               if (!rq->out[no]) {
+                                       kout = NULL;
+                                       err = -ENOMEM;
+                                       break;
+                               }
                                set_page_private(rq->out[no],
                                                 Z_EROFS_SHORTLIVED_PAGE);
                        }
@@ -211,8 +215,11 @@ again:
 
                        DBG_BUGON(erofs_page_is_managed(EROFS_SB(sb),
                                                        rq->in[j]));
-                       tmppage = erofs_allocpage(pagepool,
-                                                 GFP_KERNEL | __GFP_NOFAIL);
+                       tmppage = erofs_allocpage(pgpl, rq->gfp);
+                       if (!tmppage) {
+                               err = -ENOMEM;
+                               goto failed;
+                       }
                        set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
                        copy_highpage(tmppage, rq->in[j]);
                        rq->in[j] = tmppage;
@@ -230,7 +237,7 @@ again:
                        break;
                }
        }
-
+failed:
        if (zlib_inflateEnd(&strm->z) != Z_OK && !err)
                err = -EIO;
        if (kout)
index 2dd14f99c1dc10eeea57eedfccbb649bf184828f..6ca357d83cfa458225f20e2d6f6a45307fef2194 100644 (file)
@@ -148,7 +148,7 @@ again:
 }
 
 int z_erofs_lzma_decompress(struct z_erofs_decompress_req *rq,
-                           struct page **pagepool)
+                           struct page **pgpl)
 {
        const unsigned int nrpages_out =
                PAGE_ALIGN(rq->pageofs_out + rq->outputsize) >> PAGE_SHIFT;
@@ -215,8 +215,11 @@ again:
                                                   PAGE_SIZE - pageofs);
                        outlen -= strm->buf.out_size;
                        if (!rq->out[no] && rq->fillgaps) {     /* deduped */
-                               rq->out[no] = erofs_allocpage(pagepool,
-                                               GFP_KERNEL | __GFP_NOFAIL);
+                               rq->out[no] = erofs_allocpage(pgpl, rq->gfp);
+                               if (!rq->out[no]) {
+                                       err = -ENOMEM;
+                                       break;
+                               }
                                set_page_private(rq->out[no],
                                                 Z_EROFS_SHORTLIVED_PAGE);
                        }
@@ -258,8 +261,11 @@ again:
 
                        DBG_BUGON(erofs_page_is_managed(EROFS_SB(rq->sb),
                                                        rq->in[j]));
-                       tmppage = erofs_allocpage(pagepool,
-                                                 GFP_KERNEL | __GFP_NOFAIL);
+                       tmppage = erofs_allocpage(pgpl, rq->gfp);
+                       if (!tmppage) {
+                               err = -ENOMEM;
+                               goto failed;
+                       }
                        set_page_private(tmppage, Z_EROFS_SHORTLIVED_PAGE);
                        copy_highpage(tmppage, rq->in[j]);
                        rq->in[j] = tmppage;
@@ -277,6 +283,7 @@ again:
                        break;
                }
        }
+failed:
        if (no < nrpages_out && strm->buf.out)
                kunmap(rq->out[no]);
        if (ni < nrpages_in)
index bc12030393b24f26231fb363ac07e3150cd6babb..5ff90026fd43fe116e3a34178bf00b9f3303b411 100644 (file)
@@ -459,7 +459,7 @@ static struct erofs_fscache *erofs_fscache_acquire_cookie(struct super_block *sb
 
        inode->i_size = OFFSET_MAX;
        inode->i_mapping->a_ops = &erofs_fscache_meta_aops;
-       mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
+       mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
        inode->i_blkbits = EROFS_SB(sb)->blkszbits;
        inode->i_private = ctx;
 
index 3d616dea55dc3dbccbac495988f865947b0d2a96..36e638e8b53a3d290fcb7ade23a40dc4805be9e6 100644 (file)
@@ -60,7 +60,7 @@ static void *erofs_read_inode(struct erofs_buf *buf,
                } else {
                        const unsigned int gotten = sb->s_blocksize - *ofs;
 
-                       copied = kmalloc(vi->inode_isize, GFP_NOFS);
+                       copied = kmalloc(vi->inode_isize, GFP_KERNEL);
                        if (!copied) {
                                err = -ENOMEM;
                                goto err_out;
index 5dea308764b45038f8236bf31b004067f0f297a6..e146d09151af4188efe4cb7bf2ad4a938b8596af 100644 (file)
@@ -81,7 +81,7 @@ struct erofs_workgroup *erofs_insert_workgroup(struct super_block *sb,
 repeat:
        xa_lock(&sbi->managed_pslots);
        pre = __xa_cmpxchg(&sbi->managed_pslots, grp->index,
-                          NULL, grp, GFP_NOFS);
+                          NULL, grp, GFP_KERNEL);
        if (pre) {
                if (xa_is_err(pre)) {
                        pre = ERR_PTR(xa_err(pre));
index 692c0c39be638dc4b2454b63968a0467043ddc7a..ff0aa72b0db342f10ed7c1b565d2cc7bd6a540ff 100644 (file)
@@ -82,6 +82,9 @@ struct z_erofs_pcluster {
        /* L: indicate several pageofs_outs or not */
        bool multibases;
 
+       /* L: whether extra buffer allocations are best-effort */
+       bool besteffort;
+
        /* A: compressed bvecs (can be cached or inplaced pages) */
        struct z_erofs_bvec compressed_bvecs[];
 };
@@ -230,7 +233,7 @@ static int z_erofs_bvec_enqueue(struct z_erofs_bvec_iter *iter,
                struct page *nextpage = *candidate_bvpage;
 
                if (!nextpage) {
-                       nextpage = erofs_allocpage(pagepool, GFP_NOFS);
+                       nextpage = erofs_allocpage(pagepool, GFP_KERNEL);
                        if (!nextpage)
                                return -ENOMEM;
                        set_page_private(nextpage, Z_EROFS_SHORTLIVED_PAGE);
@@ -302,7 +305,7 @@ static struct z_erofs_pcluster *z_erofs_alloc_pcluster(unsigned int size)
                if (nrpages > pcs->maxpages)
                        continue;
 
-               pcl = kmem_cache_zalloc(pcs->slab, GFP_NOFS);
+               pcl = kmem_cache_zalloc(pcs->slab, GFP_KERNEL);
                if (!pcl)
                        return ERR_PTR(-ENOMEM);
                pcl->pclustersize = size;
@@ -563,21 +566,19 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
                        __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
        unsigned int i;
 
-       if (i_blocksize(fe->inode) != PAGE_SIZE)
-               return;
-       if (fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
+       if (i_blocksize(fe->inode) != PAGE_SIZE ||
+           fe->mode < Z_EROFS_PCLUSTER_FOLLOWED)
                return;
 
        for (i = 0; i < pclusterpages; ++i) {
                struct page *page, *newpage;
                void *t;        /* mark pages just found for debugging */
 
-               /* the compressed page was loaded before */
+               /* Inaccurate check w/o locking to avoid unneeded lookups */
                if (READ_ONCE(pcl->compressed_bvecs[i].page))
                        continue;
 
                page = find_get_page(mc, pcl->obj.index + i);
-
                if (page) {
                        t = (void *)((unsigned long)page | 1);
                        newpage = NULL;
@@ -597,9 +598,13 @@ static void z_erofs_bind_cache(struct z_erofs_decompress_frontend *fe)
                        set_page_private(newpage, Z_EROFS_PREALLOCATED_PAGE);
                        t = (void *)((unsigned long)newpage | 1);
                }
-
-               if (!cmpxchg_relaxed(&pcl->compressed_bvecs[i].page, NULL, t))
+               spin_lock(&pcl->obj.lockref.lock);
+               if (!pcl->compressed_bvecs[i].page) {
+                       pcl->compressed_bvecs[i].page = t;
+                       spin_unlock(&pcl->obj.lockref.lock);
                        continue;
+               }
+               spin_unlock(&pcl->obj.lockref.lock);
 
                if (page)
                        put_page(page);
@@ -694,7 +699,7 @@ static void z_erofs_cache_invalidate_folio(struct folio *folio,
        DBG_BUGON(stop > folio_size(folio) || stop < length);
 
        if (offset == 0 && stop == folio_size(folio))
-               while (!z_erofs_cache_release_folio(folio, GFP_NOFS))
+               while (!z_erofs_cache_release_folio(folio, 0))
                        cond_resched();
 }
 
@@ -713,36 +718,30 @@ int erofs_init_managed_cache(struct super_block *sb)
        set_nlink(inode, 1);
        inode->i_size = OFFSET_MAX;
        inode->i_mapping->a_ops = &z_erofs_cache_aops;
-       mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
+       mapping_set_gfp_mask(inode->i_mapping, GFP_KERNEL);
        EROFS_SB(sb)->managed_cache = inode;
        return 0;
 }
 
-static bool z_erofs_try_inplace_io(struct z_erofs_decompress_frontend *fe,
-                                  struct z_erofs_bvec *bvec)
-{
-       struct z_erofs_pcluster *const pcl = fe->pcl;
-
-       while (fe->icur > 0) {
-               if (!cmpxchg(&pcl->compressed_bvecs[--fe->icur].page,
-                            NULL, bvec->page)) {
-                       pcl->compressed_bvecs[fe->icur] = *bvec;
-                       return true;
-               }
-       }
-       return false;
-}
-
 /* callers must be with pcluster lock held */
 static int z_erofs_attach_page(struct z_erofs_decompress_frontend *fe,
                               struct z_erofs_bvec *bvec, bool exclusive)
 {
+       struct z_erofs_pcluster *pcl = fe->pcl;
        int ret;
 
        if (exclusive) {
                /* give priority for inplaceio to use file pages first */
-               if (z_erofs_try_inplace_io(fe, bvec))
+               spin_lock(&pcl->obj.lockref.lock);
+               while (fe->icur > 0) {
+                       if (pcl->compressed_bvecs[--fe->icur].page)
+                               continue;
+                       pcl->compressed_bvecs[fe->icur] = *bvec;
+                       spin_unlock(&pcl->obj.lockref.lock);
                        return 0;
+               }
+               spin_unlock(&pcl->obj.lockref.lock);
+
                /* otherwise, check if it can be used as a bvpage */
                if (fe->mode >= Z_EROFS_PCLUSTER_FOLLOWED &&
                    !fe->candidate_bvpage)
@@ -964,7 +963,7 @@ static int z_erofs_read_fragment(struct super_block *sb, struct page *page,
 }
 
 static int z_erofs_do_read_page(struct z_erofs_decompress_frontend *fe,
-                               struct page *page)
+                               struct page *page, bool ra)
 {
        struct inode *const inode = fe->inode;
        struct erofs_map_blocks *const map = &fe->map;
@@ -1014,6 +1013,7 @@ repeat:
                err = z_erofs_pcluster_begin(fe);
                if (err)
                        goto out;
+               fe->pcl->besteffort |= !ra;
        }
 
        /*
@@ -1280,6 +1280,9 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
                                        .inplace_io = overlapped,
                                        .partial_decoding = pcl->partial,
                                        .fillgaps = pcl->multibases,
+                                       .gfp = pcl->besteffort ?
+                                               GFP_KERNEL | __GFP_NOFAIL :
+                                               GFP_NOWAIT | __GFP_NORETRY
                                 }, be->pagepool);
 
        /* must handle all compressed pages before actual file pages */
@@ -1322,6 +1325,7 @@ static int z_erofs_decompress_pcluster(struct z_erofs_decompress_backend *be,
        pcl->length = 0;
        pcl->partial = true;
        pcl->multibases = false;
+       pcl->besteffort = false;
        pcl->bvset.nextpage = NULL;
        pcl->vcnt = 0;
 
@@ -1423,23 +1427,26 @@ static void z_erofs_fill_bio_vec(struct bio_vec *bvec,
 {
        gfp_t gfp = mapping_gfp_mask(mc);
        bool tocache = false;
-       struct z_erofs_bvec *zbv = pcl->compressed_bvecs + nr;
+       struct z_erofs_bvec zbv;
        struct address_space *mapping;
-       struct page *page, *oldpage;
+       struct page *page;
        int justfound, bs = i_blocksize(f->inode);
 
        /* Except for inplace pages, the entire page can be used for I/Os */
        bvec->bv_offset = 0;
        bvec->bv_len = PAGE_SIZE;
 repeat:
-       oldpage = READ_ONCE(zbv->page);
-       if (!oldpage)
+       spin_lock(&pcl->obj.lockref.lock);
+       zbv = pcl->compressed_bvecs[nr];
+       page = zbv.page;
+       justfound = (unsigned long)page & 1UL;
+       page = (struct page *)((unsigned long)page & ~1UL);
+       pcl->compressed_bvecs[nr].page = page;
+       spin_unlock(&pcl->obj.lockref.lock);
+       if (!page)
                goto out_allocpage;
 
-       justfound = (unsigned long)oldpage & 1UL;
-       page = (struct page *)((unsigned long)oldpage & ~1UL);
        bvec->bv_page = page;
-
        DBG_BUGON(z_erofs_is_shortlived_page(page));
        /*
         * Handle preallocated cached pages.  We tried to allocate such pages
@@ -1448,7 +1455,6 @@ repeat:
         */
        if (page->private == Z_EROFS_PREALLOCATED_PAGE) {
                set_page_private(page, 0);
-               WRITE_ONCE(zbv->page, page);
                tocache = true;
                goto out_tocache;
        }
@@ -1459,9 +1465,9 @@ repeat:
         * therefore it is impossible for `mapping` to be NULL.
         */
        if (mapping && mapping != mc) {
-               if (zbv->offset < 0)
-                       bvec->bv_offset = round_up(-zbv->offset, bs);
-               bvec->bv_len = round_up(zbv->end, bs) - bvec->bv_offset;
+               if (zbv.offset < 0)
+                       bvec->bv_offset = round_up(-zbv.offset, bs);
+               bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset;
                return;
        }
 
@@ -1471,7 +1477,6 @@ repeat:
 
        /* the cached page is still in managed cache */
        if (page->mapping == mc) {
-               WRITE_ONCE(zbv->page, page);
                /*
                 * The cached page is still available but without a valid
                 * `->private` pcluster hint.  Let's reconnect them.
@@ -1503,11 +1508,15 @@ repeat:
        put_page(page);
 out_allocpage:
        page = erofs_allocpage(&f->pagepool, gfp | __GFP_NOFAIL);
-       if (oldpage != cmpxchg(&zbv->page, oldpage, page)) {
+       spin_lock(&pcl->obj.lockref.lock);
+       if (pcl->compressed_bvecs[nr].page) {
                erofs_pagepool_add(&f->pagepool, page);
+               spin_unlock(&pcl->obj.lockref.lock);
                cond_resched();
                goto repeat;
        }
+       pcl->compressed_bvecs[nr].page = page;
+       spin_unlock(&pcl->obj.lockref.lock);
        bvec->bv_page = page;
 out_tocache:
        if (!tocache || bs != PAGE_SIZE ||
@@ -1685,6 +1694,7 @@ submit_bio_retry:
 
                        if (cur + bvec.bv_len > end)
                                bvec.bv_len = end - cur;
+                       DBG_BUGON(bvec.bv_len < sb->s_blocksize);
                        if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len,
                                          bvec.bv_offset))
                                goto submit_bio_retry;
@@ -1785,7 +1795,7 @@ static void z_erofs_pcluster_readmore(struct z_erofs_decompress_frontend *f,
                        if (PageUptodate(page))
                                unlock_page(page);
                        else
-                               (void)z_erofs_do_read_page(f, page);
+                               (void)z_erofs_do_read_page(f, page, !!rac);
                        put_page(page);
                }
 
@@ -1806,7 +1816,7 @@ static int z_erofs_read_folio(struct file *file, struct folio *folio)
        f.headoffset = (erofs_off_t)folio->index << PAGE_SHIFT;
 
        z_erofs_pcluster_readmore(&f, NULL, true);
-       err = z_erofs_do_read_page(&f, &folio->page);
+       err = z_erofs_do_read_page(&f, &folio->page, false);
        z_erofs_pcluster_readmore(&f, NULL, false);
        z_erofs_pcluster_end(&f);
 
@@ -1847,7 +1857,7 @@ static void z_erofs_readahead(struct readahead_control *rac)
                folio = head;
                head = folio_get_private(folio);
 
-               err = z_erofs_do_read_page(&f, &folio->page);
+               err = z_erofs_do_read_page(&f, &folio->page, true);
                if (err && err != -EINTR)
                        erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
                                  folio->index, EROFS_I(inode)->nid);
index 8cdd5b2dd09c2e8047d6bd360e14b060dd23fbf0..af4fbb61cd53e97c788387a0d8277d1ce5495d7d 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -128,7 +128,7 @@ SYSCALL_DEFINE1(uselib, const char __user *, library)
        struct filename *tmp = getname(library);
        int error = PTR_ERR(tmp);
        static const struct open_flags uselib_flags = {
-               .open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
+               .open_flag = O_LARGEFILE | O_RDONLY,
                .acc_mode = MAY_READ | MAY_EXEC,
                .intent = LOOKUP_OPEN,
                .lookup_flags = LOOKUP_FOLLOW,
@@ -904,6 +904,10 @@ EXPORT_SYMBOL(transfer_args_to_stack);
 
 #endif /* CONFIG_MMU */
 
+/*
+ * On success, caller must call do_close_execat() on the returned
+ * struct file to close it.
+ */
 static struct file *do_open_execat(int fd, struct filename *name, int flags)
 {
        struct file *file;
@@ -948,6 +952,17 @@ exit:
        return ERR_PTR(err);
 }
 
+/**
+ * open_exec - Open a path name for execution
+ *
+ * @name: path name to open with the intent of executing it.
+ *
+ * Returns ERR_PTR on failure or allocated struct file on success.
+ *
+ * As this is a wrapper for the internal do_open_execat(), callers
+ * must call allow_write_access() before fput() on release. Also see
+ * do_close_execat().
+ */
 struct file *open_exec(const char *name)
 {
        struct filename *filename = getname_kernel(name);
@@ -1409,6 +1424,9 @@ int begin_new_exec(struct linux_binprm * bprm)
 
 out_unlock:
        up_write(&me->signal->exec_update_lock);
+       if (!bprm->cred)
+               mutex_unlock(&me->signal->cred_guard_mutex);
+
 out:
        return retval;
 }
@@ -1484,6 +1502,15 @@ static int prepare_bprm_creds(struct linux_binprm *bprm)
        return -ENOMEM;
 }
 
+/* Matches do_open_execat() */
+static void do_close_execat(struct file *file)
+{
+       if (!file)
+               return;
+       allow_write_access(file);
+       fput(file);
+}
+
 static void free_bprm(struct linux_binprm *bprm)
 {
        if (bprm->mm) {
@@ -1495,10 +1522,7 @@ static void free_bprm(struct linux_binprm *bprm)
                mutex_unlock(&current->signal->cred_guard_mutex);
                abort_creds(bprm->cred);
        }
-       if (bprm->file) {
-               allow_write_access(bprm->file);
-               fput(bprm->file);
-       }
+       do_close_execat(bprm->file);
        if (bprm->executable)
                fput(bprm->executable);
        /* If a binfmt changed the interp, free it. */
@@ -1520,8 +1544,7 @@ static struct linux_binprm *alloc_bprm(int fd, struct filename *filename, int fl
 
        bprm = kzalloc(sizeof(*bprm), GFP_KERNEL);
        if (!bprm) {
-               allow_write_access(file);
-               fput(file);
+               do_close_execat(file);
                return ERR_PTR(-ENOMEM);
        }
 
@@ -1610,6 +1633,7 @@ static void check_unsafe_exec(struct linux_binprm *bprm)
        }
        rcu_read_unlock();
 
+       /* "users" and "in_exec" locked for copy_fs() */
        if (p->fs->users > n_fs)
                bprm->unsafe |= LSM_UNSAFE_SHARE;
        else
@@ -1826,9 +1850,6 @@ static int exec_binprm(struct linux_binprm *bprm)
        return 0;
 }
 
-/*
- * sys_execve() executes a new program.
- */
 static int bprm_execve(struct linux_binprm *bprm)
 {
        int retval;
index ea5b8e57d904e20b964fb5e627c4bae894370401..671664fed3077f794de3a5707bd69b90cb328e78 100644 (file)
@@ -340,7 +340,7 @@ static ssize_t hugetlbfs_read_iter(struct kiocb *iocb, struct iov_iter *to)
                } else {
                        folio_unlock(folio);
 
-                       if (!folio_test_has_hwpoisoned(folio))
+                       if (!folio_test_hwpoison(folio))
                                want = nr;
                        else {
                                /*
index 8eec84c651bfba2da05af6a834c4ad3fe7a60f2b..cb3cda1390adb16e1ad8031783849ba59022db87 100644 (file)
@@ -2763,9 +2763,7 @@ static int dbBackSplit(dmtree_t *tp, int leafno, bool is_ctl)
  *     leafno  - the number of the leaf to be updated.
  *     newval  - the new value for the leaf.
  *
- * RETURN VALUES:
- *  0          - success
- *     -EIO    - i/o error
+ * RETURN VALUES: none
  */
 static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl)
 {
@@ -2792,10 +2790,6 @@ static int dbJoin(dmtree_t *tp, int leafno, int newval, bool is_ctl)
                 * get the buddy size (number of words covered) of
                 * the new value.
                 */
-
-               if ((newval - tp->dmt_budmin) > BUDMIN)
-                       return -EIO;
-
                budsz = BUDSIZE(newval, tp->dmt_budmin);
 
                /* try to join.
index a59e7b2edaacdcb251765793f14e87eb93a60bb3..3298c29b5548398c0026ccf6ab30c32cb290070d 100644 (file)
@@ -101,7 +101,7 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
                }
 
                if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
-                       if (folio_index(folio) == rreq->no_unlock_folio &&
+                       if (folio->index == rreq->no_unlock_folio &&
                            test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
                                _debug("no unlock");
                        else
@@ -246,13 +246,13 @@ EXPORT_SYMBOL(netfs_readahead);
  */
 int netfs_read_folio(struct file *file, struct folio *folio)
 {
-       struct address_space *mapping = folio_file_mapping(folio);
+       struct address_space *mapping = folio->mapping;
        struct netfs_io_request *rreq;
        struct netfs_inode *ctx = netfs_inode(mapping->host);
        struct folio *sink = NULL;
        int ret;
 
-       _enter("%lx", folio_index(folio));
+       _enter("%lx", folio->index);
 
        rreq = netfs_alloc_request(mapping, file,
                                   folio_file_pos(folio), folio_size(folio),
@@ -460,7 +460,7 @@ retry:
                ret = PTR_ERR(rreq);
                goto error;
        }
-       rreq->no_unlock_folio   = folio_index(folio);
+       rreq->no_unlock_folio   = folio->index;
        __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
 
        ret = netfs_begin_cache_read(rreq, ctx);
@@ -518,7 +518,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
                             size_t offset, size_t len)
 {
        struct netfs_io_request *rreq;
-       struct address_space *mapping = folio_file_mapping(folio);
+       struct address_space *mapping = folio->mapping;
        struct netfs_inode *ctx = netfs_inode(mapping->host);
        unsigned long long start = folio_pos(folio);
        size_t flen = folio_size(folio);
@@ -535,7 +535,7 @@ int netfs_prefetch_for_write(struct file *file, struct folio *folio,
                goto error;
        }
 
-       rreq->no_unlock_folio = folio_index(folio);
+       rreq->no_unlock_folio = folio->index;
        __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
        ret = netfs_begin_cache_read(rreq, ctx);
        if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
index 93dc76f34e39a077a82d235fe5ec69bbc5d6e13d..a3059b3168fd95756c7e57986ed999e205dfa8aa 100644 (file)
@@ -221,10 +221,11 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
                if (unlikely(fault_in_iov_iter_readable(iter, part) == part))
                        break;
 
-               ret = -ENOMEM;
                folio = netfs_grab_folio_for_write(mapping, pos, part);
-               if (!folio)
+               if (IS_ERR(folio)) {
+                       ret = PTR_ERR(folio);
                        break;
+               }
 
                flen = folio_size(folio);
                offset = pos & (flen - 1);
@@ -343,7 +344,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
                        break;
                default:
                        WARN(true, "Unexpected modify type %u ix=%lx\n",
-                            howto, folio_index(folio));
+                            howto, folio->index);
                        ret = -EIO;
                        goto error_folio_unlock;
                }
@@ -648,7 +649,7 @@ static void netfs_pages_written_back(struct netfs_io_request *wreq)
        xas_for_each(&xas, folio, last) {
                WARN(!folio_test_writeback(folio),
                     "bad %zx @%llx page %lx %lx\n",
-                    wreq->len, wreq->start, folio_index(folio), last);
+                    wreq->len, wreq->start, folio->index, last);
 
                if ((finfo = netfs_folio_info(folio))) {
                        /* Streaming writes cannot be redirtied whilst under
@@ -795,7 +796,7 @@ static void netfs_extend_writeback(struct address_space *mapping,
                                continue;
                        if (xa_is_value(folio))
                                break;
-                       if (folio_index(folio) != index) {
+                       if (folio->index != index) {
                                xas_reset(xas);
                                break;
                        }
@@ -901,7 +902,7 @@ static ssize_t netfs_write_back_from_locked_folio(struct address_space *mapping,
        long count = wbc->nr_to_write;
        int ret;
 
-       _enter(",%lx,%llx-%llx,%u", folio_index(folio), start, end, caching);
+       _enter(",%lx,%llx-%llx,%u", folio->index, start, end, caching);
 
        wreq = netfs_alloc_request(mapping, NULL, start, folio_size(folio),
                                   NETFS_WRITEBACK);
@@ -1047,7 +1048,7 @@ search_again:
 
        start = folio_pos(folio); /* May regress with THPs */
 
-       _debug("wback %lx", folio_index(folio));
+       _debug("wback %lx", folio->index);
 
        /* At this point we hold neither the i_pages lock nor the page lock:
         * the page may be truncated or invalidated (changing page->mapping to
index d645f8b302a27882c86c3c46e134dd5bcbc35cef..9397ed39b0b4ecbdd9c9b5860887162990c2f66d 100644 (file)
@@ -179,13 +179,14 @@ EXPORT_SYMBOL(fscache_acquire_cache);
 void fscache_put_cache(struct fscache_cache *cache,
                       enum fscache_cache_trace where)
 {
-       unsigned int debug_id = cache->debug_id;
+       unsigned int debug_id;
        bool zero;
        int ref;
 
        if (IS_ERR_OR_NULL(cache))
                return;
 
+       debug_id = cache->debug_id;
        zero = __refcount_dec_and_test(&cache->ref, &ref);
        trace_fscache_cache(debug_id, ref - 1, where);
 
index 4309edf338627eee2963e1520ab6485a483e1c5d..e8ff1e61ce79b7f67e1252f4b66aa461bfe1d4b8 100644 (file)
@@ -124,7 +124,7 @@ static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
                        /* We might have multiple writes from the same huge
                         * folio, but we mustn't unlock a folio more than once.
                         */
-                       if (have_unlocked && folio_index(folio) <= unlocked)
+                       if (have_unlocked && folio->index <= unlocked)
                                continue;
                        unlocked = folio_next_index(folio) - 1;
                        trace_netfs_folio(folio, netfs_folio_trace_end_copy);
index 0e3af37fc9243f7a0d351840904aa0ce5d91ee59..90051ced8e2a879827e54d4b50c976bc11f6b759 100644 (file)
@@ -180,7 +180,7 @@ void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
        struct netfs_folio *finfo = NULL;
        size_t flen = folio_size(folio);
 
-       _enter("{%lx},%zx,%zx", folio_index(folio), offset, length);
+       _enter("{%lx},%zx,%zx", folio->index, offset, length);
 
        folio_wait_fscache(folio);
 
index 2fa54cfd4882307e87e9c109070ecdc25a3db401..6dc6340e28529d8efe2a9bb99144ca297909e9d3 100644 (file)
@@ -7911,14 +7911,16 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
 {
        struct file_lock *fl;
        int status = false;
-       struct nfsd_file *nf = find_any_file(fp);
+       struct nfsd_file *nf;
        struct inode *inode;
        struct file_lock_context *flctx;
 
+       spin_lock(&fp->fi_lock);
+       nf = find_any_file_locked(fp);
        if (!nf) {
                /* Any valid lock stateid should have some sort of access */
                WARN_ON_ONCE(1);
-               return status;
+               goto out;
        }
 
        inode = file_inode(nf->nf_file);
@@ -7934,7 +7936,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
                }
                spin_unlock(&flctx->flc_lock);
        }
-       nfsd_file_put(nf);
+out:
+       spin_unlock(&fp->fi_lock);
        return status;
 }
 
@@ -7944,10 +7947,8 @@ check_for_locks(struct nfs4_file *fp, struct nfs4_lockowner *lowner)
  * @cstate: NFSv4 COMPOUND state
  * @u: RELEASE_LOCKOWNER arguments
  *
- * The lockowner's so_count is bumped when a lock record is added
- * or when copying a conflicting lock. The latter case is brief,
- * but can lead to fleeting false positives when looking for
- * locks-in-use.
+ * Check if theree are any locks still held and if not - free the lockowner
+ * and any lock state that is owned.
  *
  * Return values:
  *   %nfs_ok: lockowner released or not found
@@ -7983,10 +7984,13 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
                spin_unlock(&clp->cl_lock);
                return nfs_ok;
        }
-       if (atomic_read(&lo->lo_owner.so_count) != 2) {
-               spin_unlock(&clp->cl_lock);
-               nfs4_put_stateowner(&lo->lo_owner);
-               return nfserr_locks_held;
+
+       list_for_each_entry(stp, &lo->lo_owner.so_stateids, st_perstateowner) {
+               if (check_for_locks(stp->st_stid.sc_file, lo)) {
+                       spin_unlock(&clp->cl_lock);
+                       nfs4_put_stateowner(&lo->lo_owner);
+                       return nfserr_locks_held;
+               }
        }
        unhash_lockowner_locked(lo);
        while (!list_empty(&lo->lo_owner.so_stateids)) {
index 984ffdaeed6ca8efcf8acb7852a481628ee3c380..5764f91d283e7027e2ca075057242968c1016455 100644 (file)
 
 struct ovl_lookup_data {
        struct super_block *sb;
-       struct vfsmount *mnt;
+       const struct ovl_layer *layer;
        struct qstr name;
        bool is_dir;
        bool opaque;
+       bool xwhiteouts;
        bool stop;
        bool last;
        char *redirect;
@@ -201,17 +202,13 @@ struct dentry *ovl_decode_real_fh(struct ovl_fs *ofs, struct ovl_fh *fh,
        return real;
 }
 
-static bool ovl_is_opaquedir(struct ovl_fs *ofs, const struct path *path)
-{
-       return ovl_path_check_dir_xattr(ofs, path, OVL_XATTR_OPAQUE);
-}
-
 static struct dentry *ovl_lookup_positive_unlocked(struct ovl_lookup_data *d,
                                                   const char *name,
                                                   struct dentry *base, int len,
                                                   bool drop_negative)
 {
-       struct dentry *ret = lookup_one_unlocked(mnt_idmap(d->mnt), name, base, len);
+       struct dentry *ret = lookup_one_unlocked(mnt_idmap(d->layer->mnt), name,
+                                                base, len);
 
        if (!IS_ERR(ret) && d_flags_negative(smp_load_acquire(&ret->d_flags))) {
                if (drop_negative && ret->d_lockref.count == 1) {
@@ -232,10 +229,13 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
                             size_t prelen, const char *post,
                             struct dentry **ret, bool drop_negative)
 {
+       struct ovl_fs *ofs = OVL_FS(d->sb);
        struct dentry *this;
        struct path path;
        int err;
        bool last_element = !post[0];
+       bool is_upper = d->layer->idx == 0;
+       char val;
 
        this = ovl_lookup_positive_unlocked(d, name, base, namelen, drop_negative);
        if (IS_ERR(this)) {
@@ -253,8 +253,8 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
        }
 
        path.dentry = this;
-       path.mnt = d->mnt;
-       if (ovl_path_is_whiteout(OVL_FS(d->sb), &path)) {
+       path.mnt = d->layer->mnt;
+       if (ovl_path_is_whiteout(ofs, &path)) {
                d->stop = d->opaque = true;
                goto put_and_out;
        }
@@ -272,7 +272,7 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
                        d->stop = true;
                        goto put_and_out;
                }
-               err = ovl_check_metacopy_xattr(OVL_FS(d->sb), &path, NULL);
+               err = ovl_check_metacopy_xattr(ofs, &path, NULL);
                if (err < 0)
                        goto out_err;
 
@@ -292,7 +292,12 @@ static int ovl_lookup_single(struct dentry *base, struct ovl_lookup_data *d,
                if (d->last)
                        goto out;
 
-               if (ovl_is_opaquedir(OVL_FS(d->sb), &path)) {
+               /* overlay.opaque=x means xwhiteouts directory */
+               val = ovl_get_opaquedir_val(ofs, &path);
+               if (last_element && !is_upper && val == 'x') {
+                       d->xwhiteouts = true;
+                       ovl_layer_set_xwhiteouts(ofs, d->layer);
+               } else if (val == 'y') {
                        d->stop = true;
                        if (last_element)
                                d->opaque = true;
@@ -863,7 +868,8 @@ fail:
  * Returns next layer in stack starting from top.
  * Returns -1 if this is the last layer.
  */
-int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path,
+                 const struct ovl_layer **layer)
 {
        struct ovl_entry *oe = OVL_E(dentry);
        struct ovl_path *lowerstack = ovl_lowerstack(oe);
@@ -871,13 +877,16 @@ int ovl_path_next(int idx, struct dentry *dentry, struct path *path)
        BUG_ON(idx < 0);
        if (idx == 0) {
                ovl_path_upper(dentry, path);
-               if (path->dentry)
+               if (path->dentry) {
+                       *layer = &OVL_FS(dentry->d_sb)->layers[0];
                        return ovl_numlower(oe) ? 1 : -1;
+               }
                idx++;
        }
        BUG_ON(idx > ovl_numlower(oe));
        path->dentry = lowerstack[idx - 1].dentry;
-       path->mnt = lowerstack[idx - 1].layer->mnt;
+       *layer = lowerstack[idx - 1].layer;
+       path->mnt = (*layer)->mnt;
 
        return (idx < ovl_numlower(oe)) ? idx + 1 : -1;
 }
@@ -1055,7 +1064,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
        old_cred = ovl_override_creds(dentry->d_sb);
        upperdir = ovl_dentry_upper(dentry->d_parent);
        if (upperdir) {
-               d.mnt = ovl_upper_mnt(ofs);
+               d.layer = &ofs->layers[0];
                err = ovl_lookup_layer(upperdir, &d, &upperdentry, true);
                if (err)
                        goto out;
@@ -1111,7 +1120,7 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                else if (d.is_dir || !ofs->numdatalayer)
                        d.last = lower.layer->idx == ovl_numlower(roe);
 
-               d.mnt = lower.layer->mnt;
+               d.layer = lower.layer;
                err = ovl_lookup_layer(lower.dentry, &d, &this, false);
                if (err)
                        goto out_put;
@@ -1278,6 +1287,8 @@ struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
 
        if (upperopaque)
                ovl_dentry_set_opaque(dentry);
+       if (d.xwhiteouts)
+               ovl_dentry_set_xwhiteouts(dentry);
 
        if (upperdentry)
                ovl_dentry_set_upper_alias(dentry);
index 5ba11eb4376792f3047bb683913557f79f1fa53e..ee949f3e7c77839e999cb6f5344cb167fd4e43b5 100644 (file)
@@ -50,7 +50,6 @@ enum ovl_xattr {
        OVL_XATTR_METACOPY,
        OVL_XATTR_PROTATTR,
        OVL_XATTR_XWHITEOUT,
-       OVL_XATTR_XWHITEOUTS,
 };
 
 enum ovl_inode_flag {
@@ -70,6 +69,8 @@ enum ovl_entry_flag {
        OVL_E_UPPER_ALIAS,
        OVL_E_OPAQUE,
        OVL_E_CONNECTED,
+       /* Lower stack may contain xwhiteout entries */
+       OVL_E_XWHITEOUTS,
 };
 
 enum {
@@ -477,6 +478,10 @@ bool ovl_dentry_test_flag(unsigned long flag, struct dentry *dentry);
 bool ovl_dentry_is_opaque(struct dentry *dentry);
 bool ovl_dentry_is_whiteout(struct dentry *dentry);
 void ovl_dentry_set_opaque(struct dentry *dentry);
+bool ovl_dentry_has_xwhiteouts(struct dentry *dentry);
+void ovl_dentry_set_xwhiteouts(struct dentry *dentry);
+void ovl_layer_set_xwhiteouts(struct ovl_fs *ofs,
+                             const struct ovl_layer *layer);
 bool ovl_dentry_has_upper_alias(struct dentry *dentry);
 void ovl_dentry_set_upper_alias(struct dentry *dentry);
 bool ovl_dentry_needs_data_copy_up(struct dentry *dentry, int flags);
@@ -494,11 +499,10 @@ struct file *ovl_path_open(const struct path *path, int flags);
 int ovl_copy_up_start(struct dentry *dentry, int flags);
 void ovl_copy_up_end(struct dentry *dentry);
 bool ovl_already_copied_up(struct dentry *dentry, int flags);
-bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
-                             enum ovl_xattr ox);
+char ovl_get_dir_xattr_val(struct ovl_fs *ofs, const struct path *path,
+                          enum ovl_xattr ox);
 bool ovl_path_check_origin_xattr(struct ovl_fs *ofs, const struct path *path);
 bool ovl_path_check_xwhiteout_xattr(struct ovl_fs *ofs, const struct path *path);
-bool ovl_path_check_xwhiteouts_xattr(struct ovl_fs *ofs, const struct path *path);
 bool ovl_init_uuid_xattr(struct super_block *sb, struct ovl_fs *ofs,
                         const struct path *upperpath);
 
@@ -573,7 +577,13 @@ static inline bool ovl_is_impuredir(struct super_block *sb,
                .mnt = ovl_upper_mnt(ofs),
        };
 
-       return ovl_path_check_dir_xattr(ofs, &upperpath, OVL_XATTR_IMPURE);
+       return ovl_get_dir_xattr_val(ofs, &upperpath, OVL_XATTR_IMPURE) == 'y';
+}
+
+static inline char ovl_get_opaquedir_val(struct ovl_fs *ofs,
+                                        const struct path *path)
+{
+       return ovl_get_dir_xattr_val(ofs, path, OVL_XATTR_OPAQUE);
 }
 
 static inline bool ovl_redirect_follow(struct ovl_fs *ofs)
@@ -680,7 +690,8 @@ int ovl_get_index_name(struct ovl_fs *ofs, struct dentry *origin,
 struct dentry *ovl_get_index_fh(struct ovl_fs *ofs, struct ovl_fh *fh);
 struct dentry *ovl_lookup_index(struct ovl_fs *ofs, struct dentry *upper,
                                struct dentry *origin, bool verify);
-int ovl_path_next(int idx, struct dentry *dentry, struct path *path);
+int ovl_path_next(int idx, struct dentry *dentry, struct path *path,
+                 const struct ovl_layer **layer);
 int ovl_verify_lowerdata(struct dentry *dentry);
 struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
                          unsigned int flags);
index 5fa9c58af65f2107c19524fc58808a3140820f5c..cb449ab310a7a89aafa0ee04ee7ff6c8141dd7d5 100644 (file)
@@ -40,6 +40,8 @@ struct ovl_layer {
        int idx;
        /* One fsid per unique underlying sb (upper fsid == 0) */
        int fsid;
+       /* xwhiteouts were found on this layer */
+       bool has_xwhiteouts;
 };
 
 struct ovl_path {
@@ -59,7 +61,7 @@ struct ovl_fs {
        unsigned int numfs;
        /* Number of data-only lower layers */
        unsigned int numdatalayer;
-       const struct ovl_layer *layers;
+       struct ovl_layer *layers;
        struct ovl_sb *fs;
        /* workbasedir is the path at workdir= mount option */
        struct dentry *workbasedir;
index e71156baa7bccae2d15c1830938d62116f546b74..0ca8af060b0c194e5824e59b59d9d2dc8b051355 100644 (file)
@@ -305,8 +305,6 @@ static inline int ovl_dir_read(const struct path *realpath,
        if (IS_ERR(realfile))
                return PTR_ERR(realfile);
 
-       rdd->in_xwhiteouts_dir = rdd->dentry &&
-               ovl_path_check_xwhiteouts_xattr(OVL_FS(rdd->dentry->d_sb), realpath);
        rdd->first_maybe_whiteout = NULL;
        rdd->ctx.pos = 0;
        do {
@@ -359,10 +357,13 @@ static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list,
                .is_lowest = false,
        };
        int idx, next;
+       const struct ovl_layer *layer;
 
        for (idx = 0; idx != -1; idx = next) {
-               next = ovl_path_next(idx, dentry, &realpath);
+               next = ovl_path_next(idx, dentry, &realpath, &layer);
                rdd.is_upper = ovl_dentry_upper(dentry) == realpath.dentry;
+               rdd.in_xwhiteouts_dir = layer->has_xwhiteouts &&
+                                       ovl_dentry_has_xwhiteouts(dentry);
 
                if (next != -1) {
                        err = ovl_dir_read(&realpath, &rdd);
index 4ab66e3d4cff9854a99bcc1505963927476bf1d5..2eef6c70b2aed54027b9ec2b1b544101ea32aefc 100644 (file)
@@ -1249,6 +1249,7 @@ static struct dentry *ovl_get_root(struct super_block *sb,
                                   struct ovl_entry *oe)
 {
        struct dentry *root;
+       struct ovl_fs *ofs = OVL_FS(sb);
        struct ovl_path *lowerpath = ovl_lowerstack(oe);
        unsigned long ino = d_inode(lowerpath->dentry)->i_ino;
        int fsid = lowerpath->layer->fsid;
@@ -1270,6 +1271,20 @@ static struct dentry *ovl_get_root(struct super_block *sb,
                        ovl_set_flag(OVL_IMPURE, d_inode(root));
        }
 
+       /* Look for xwhiteouts marker except in the lowermost layer */
+       for (int i = 0; i < ovl_numlower(oe) - 1; i++, lowerpath++) {
+               struct path path = {
+                       .mnt = lowerpath->layer->mnt,
+                       .dentry = lowerpath->dentry,
+               };
+
+               /* overlay.opaque=x means xwhiteouts directory */
+               if (ovl_get_opaquedir_val(ofs, &path) == 'x') {
+                       ovl_layer_set_xwhiteouts(ofs, lowerpath->layer);
+                       ovl_dentry_set_xwhiteouts(root);
+               }
+       }
+
        /* Root is always merge -> can have whiteouts */
        ovl_set_flag(OVL_WHITEOUTS, d_inode(root));
        ovl_dentry_set_flag(OVL_E_CONNECTED, root);
index 0217094c23ea6ae8905c7cb0c44c3ba969345200..a8e17f14d7a219aafada9e174ef50c6f67f56ff7 100644 (file)
@@ -461,6 +461,33 @@ void ovl_dentry_set_opaque(struct dentry *dentry)
        ovl_dentry_set_flag(OVL_E_OPAQUE, dentry);
 }
 
+bool ovl_dentry_has_xwhiteouts(struct dentry *dentry)
+{
+       return ovl_dentry_test_flag(OVL_E_XWHITEOUTS, dentry);
+}
+
+void ovl_dentry_set_xwhiteouts(struct dentry *dentry)
+{
+       ovl_dentry_set_flag(OVL_E_XWHITEOUTS, dentry);
+}
+
+/*
+ * ovl_layer_set_xwhiteouts() is called before adding the overlay dir
+ * dentry to dcache, while readdir of that same directory happens after
+ * the overlay dir dentry is in dcache, so if some cpu observes that
+ * ovl_dentry_is_xwhiteouts(), it will also observe layer->has_xwhiteouts
+ * for the layers where xwhiteouts marker was found in that merge dir.
+ */
+void ovl_layer_set_xwhiteouts(struct ovl_fs *ofs,
+                             const struct ovl_layer *layer)
+{
+       if (layer->has_xwhiteouts)
+               return;
+
+       /* Write once to read-mostly layer properties */
+       ofs->layers[layer->idx].has_xwhiteouts = true;
+}
+
 /*
  * For hard links and decoded file handles, it's possible for ovl_dentry_upper()
  * to return positive, while there's no actual upper alias for the inode.
@@ -739,19 +766,6 @@ bool ovl_path_check_xwhiteout_xattr(struct ovl_fs *ofs, const struct path *path)
        return res >= 0;
 }
 
-bool ovl_path_check_xwhiteouts_xattr(struct ovl_fs *ofs, const struct path *path)
-{
-       struct dentry *dentry = path->dentry;
-       int res;
-
-       /* xattr.whiteouts must be a directory */
-       if (!d_is_dir(dentry))
-               return false;
-
-       res = ovl_path_getxattr(ofs, path, OVL_XATTR_XWHITEOUTS, NULL, 0);
-       return res >= 0;
-}
-
 /*
  * Load persistent uuid from xattr into s_uuid if found, or store a new
  * random generated value in s_uuid and in xattr.
@@ -811,20 +825,17 @@ fail:
        return false;
 }
 
-bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
-                              enum ovl_xattr ox)
+char ovl_get_dir_xattr_val(struct ovl_fs *ofs, const struct path *path,
+                          enum ovl_xattr ox)
 {
        int res;
        char val;
 
        if (!d_is_dir(path->dentry))
-               return false;
+               return 0;
 
        res = ovl_path_getxattr(ofs, path, ox, &val, 1);
-       if (res == 1 && val == 'y')
-               return true;
-
-       return false;
+       return res == 1 ? val : 0;
 }
 
 #define OVL_XATTR_OPAQUE_POSTFIX       "opaque"
@@ -837,7 +848,6 @@ bool ovl_path_check_dir_xattr(struct ovl_fs *ofs, const struct path *path,
 #define OVL_XATTR_METACOPY_POSTFIX     "metacopy"
 #define OVL_XATTR_PROTATTR_POSTFIX     "protattr"
 #define OVL_XATTR_XWHITEOUT_POSTFIX    "whiteout"
-#define OVL_XATTR_XWHITEOUTS_POSTFIX   "whiteouts"
 
 #define OVL_XATTR_TAB_ENTRY(x) \
        [x] = { [false] = OVL_XATTR_TRUSTED_PREFIX x ## _POSTFIX, \
@@ -854,7 +864,6 @@ const char *const ovl_xattr_table[][2] = {
        OVL_XATTR_TAB_ENTRY(OVL_XATTR_METACOPY),
        OVL_XATTR_TAB_ENTRY(OVL_XATTR_PROTATTR),
        OVL_XATTR_TAB_ENTRY(OVL_XATTR_XWHITEOUT),
-       OVL_XATTR_TAB_ENTRY(OVL_XATTR_XWHITEOUTS),
 };
 
 int ovl_check_setxattr(struct ovl_fs *ofs, struct dentry *upperdentry,
index 971892620504730e6e2265f50c54874f3d676eac..1daeb5714faad14c24c49a5efd5d118aaf04b54c 100644 (file)
@@ -145,21 +145,27 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
        struct cached_fid *cfid;
        struct cached_fids *cfids;
        const char *npath;
+       int retries = 0, cur_sleep = 1;
 
        if (tcon == NULL || tcon->cfids == NULL || tcon->nohandlecache ||
            is_smb1_server(tcon->ses->server) || (dir_cache_timeout == 0))
                return -EOPNOTSUPP;
 
        ses = tcon->ses;
-       server = cifs_pick_channel(ses);
        cfids = tcon->cfids;
 
-       if (!server->ops->new_lease_key)
-               return -EIO;
-
        if (cifs_sb->root == NULL)
                return -ENOENT;
 
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       oplock = SMB2_OPLOCK_LEVEL_II;
+       server = cifs_pick_channel(ses);
+
+       if (!server->ops->new_lease_key)
+               return -EIO;
+
        utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
        if (!utf16_path)
                return -ENOMEM;
@@ -268,6 +274,11 @@ int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
         */
        cfid->has_lease = true;
 
+       if (retries) {
+               smb2_set_replay(server, &rqst[0]);
+               smb2_set_replay(server, &rqst[1]);
+       }
+
        rc = compound_send_recv(xid, ses, server,
                                flags, 2, rqst,
                                resp_buftype, rsp_iov);
@@ -367,6 +378,11 @@ out:
                atomic_inc(&tcon->num_remote_opens);
        }
        kfree(utf16_path);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
index ef4c2e3c9fa6130b129be94d4a15c4724b952ed9..6322f0f68a176b177c943b074fe414c4905bf9bb 100644 (file)
@@ -572,7 +572,7 @@ static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash,
                len = cifs_strtoUTF16(user, ses->user_name, len, nls_cp);
                UniStrupr(user);
        } else {
-               memset(user, '\0', 2);
+               *(u16 *)user = 0;
        }
 
        rc = crypto_shash_update(ses->server->secmech.hmacmd5,
index e902de4e475af9cc3483fba922a1b11cbb068cd9..2a4a4e3a8751f2ce8f0409ce79dc5024e02bb883 100644 (file)
@@ -396,7 +396,7 @@ cifs_alloc_inode(struct super_block *sb)
        spin_lock_init(&cifs_inode->writers_lock);
        cifs_inode->writers = 0;
        cifs_inode->netfs.inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
-       cifs_inode->server_eof = 0;
+       cifs_inode->netfs.remote_i_size = 0;
        cifs_inode->uniqueid = 0;
        cifs_inode->createtime = 0;
        cifs_inode->epoch = 0;
@@ -1380,6 +1380,7 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
        struct inode *src_inode = file_inode(src_file);
        struct inode *target_inode = file_inode(dst_file);
        struct cifsInodeInfo *src_cifsi = CIFS_I(src_inode);
+       struct cifsInodeInfo *target_cifsi = CIFS_I(target_inode);
        struct cifsFileInfo *smb_file_src;
        struct cifsFileInfo *smb_file_target;
        struct cifs_tcon *src_tcon;
@@ -1428,7 +1429,7 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
         * Advance the EOF marker after the flush above to the end of the range
         * if it's short of that.
         */
-       if (src_cifsi->server_eof < off + len) {
+       if (src_cifsi->netfs.remote_i_size < off + len) {
                rc = cifs_precopy_set_eof(src_inode, src_cifsi, src_tcon, xid, off + len);
                if (rc < 0)
                        goto unlock;
@@ -1452,12 +1453,22 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
        /* Discard all the folios that overlap the destination region. */
        truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
 
+       fscache_invalidate(cifs_inode_cookie(target_inode), NULL,
+                          i_size_read(target_inode), 0);
+
        rc = file_modified(dst_file);
        if (!rc) {
                rc = target_tcon->ses->server->ops->copychunk_range(xid,
                        smb_file_src, smb_file_target, off, len, destoff);
-               if (rc > 0 && destoff + rc > i_size_read(target_inode))
+               if (rc > 0 && destoff + rc > i_size_read(target_inode)) {
                        truncate_setsize(target_inode, destoff + rc);
+                       netfs_resize_file(&target_cifsi->netfs,
+                                         i_size_read(target_inode), true);
+                       fscache_resize_cookie(cifs_inode_cookie(target_inode),
+                                             i_size_read(target_inode));
+               }
+               if (rc > 0 && destoff + rc > target_cifsi->netfs.zero_point)
+                       target_cifsi->netfs.zero_point = destoff + rc;
        }
 
        file_accessed(src_file);
index 20036fb16cececeaa3acffb78d81691ac86b1ec3..16befff4cbb47c9ac104b052401a490398d0fac9 100644 (file)
  */
 #define CIFS_DEF_ACTIMEO (1 * HZ)
 
+/*
+ * max sleep time before retry to server
+ */
+#define CIFS_MAX_SLEEP 2000
+
 /*
  * max attribute cache timeout (jiffies) - 2^30
  */
@@ -1501,6 +1506,7 @@ struct cifs_writedata {
        struct smbd_mr                  *mr;
 #endif
        struct cifs_credits             credits;
+       bool                            replay;
 };
 
 /*
@@ -1561,7 +1567,6 @@ struct cifsInodeInfo {
        spinlock_t writers_lock;
        unsigned int writers;           /* Number of writers on this inode */
        unsigned long time;             /* jiffies of last update of inode */
-       u64  server_eof;                /* current file size on server -- protected by i_lock */
        u64  uniqueid;                  /* server inode number */
        u64  createtime;                /* creation time on server */
        __u8 lease_key[SMB2_LEASE_KEY_SIZE];    /* lease key for this inode */
@@ -1831,6 +1836,13 @@ static inline bool is_retryable_error(int error)
        return false;
 }
 
+static inline bool is_replayable_error(int error)
+{
+       if (error == -EAGAIN || error == -ECONNABORTED)
+               return true;
+       return false;
+}
+
 
 /* cifs_get_writable_file() flags */
 #define FIND_WR_ANY         0
index 3a213432775b167dfc844df81de1aba42c5fcfb9..b75282c204dadff986f9711d38e775834c30ddd2 100644 (file)
@@ -87,7 +87,7 @@ void cifs_pages_written_back(struct inode *inode, loff_t start, unsigned int len
                        continue;
                if (!folio_test_writeback(folio)) {
                        WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
-                                 len, start, folio_index(folio), end);
+                                 len, start, folio->index, end);
                        continue;
                }
 
@@ -120,7 +120,7 @@ void cifs_pages_write_failed(struct inode *inode, loff_t start, unsigned int len
                        continue;
                if (!folio_test_writeback(folio)) {
                        WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
-                                 len, start, folio_index(folio), end);
+                                 len, start, folio->index, end);
                        continue;
                }
 
@@ -151,7 +151,7 @@ void cifs_pages_write_redirty(struct inode *inode, loff_t start, unsigned int le
        xas_for_each(&xas, folio, end) {
                if (!folio_test_writeback(folio)) {
                        WARN_ONCE(1, "bad %x @%llx page %lx %lx\n",
-                                 len, start, folio_index(folio), end);
+                                 len, start, folio->index, end);
                        continue;
                }
 
@@ -2120,8 +2120,8 @@ cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
 {
        loff_t end_of_write = offset + bytes_written;
 
-       if (end_of_write > cifsi->server_eof)
-               cifsi->server_eof = end_of_write;
+       if (end_of_write > cifsi->netfs.remote_i_size)
+               netfs_resize_file(&cifsi->netfs, end_of_write, true);
 }
 
 static ssize_t
@@ -2651,7 +2651,7 @@ static void cifs_extend_writeback(struct address_space *mapping,
                                continue;
                        if (xa_is_value(folio))
                                break;
-                       if (folio_index(folio) != index)
+                       if (folio->index != index)
                                break;
                        if (!folio_try_get_rcu(folio)) {
                                xas_reset(&xas);
@@ -2899,7 +2899,7 @@ redo_folio:
                                        goto skip_write;
                        }
 
-                       if (folio_mapping(folio) != mapping ||
+                       if (folio->mapping != mapping ||
                            !folio_test_dirty(folio)) {
                                start += folio_size(folio);
                                folio_unlock(folio);
@@ -3247,8 +3247,8 @@ cifs_uncached_writev_complete(struct work_struct *work)
 
        spin_lock(&inode->i_lock);
        cifs_update_eof(cifsi, wdata->offset, wdata->bytes);
-       if (cifsi->server_eof > inode->i_size)
-               i_size_write(inode, cifsi->server_eof);
+       if (cifsi->netfs.remote_i_size > inode->i_size)
+               i_size_write(inode, cifsi->netfs.remote_i_size);
        spin_unlock(&inode->i_lock);
 
        complete(&wdata->done);
@@ -3300,6 +3300,7 @@ cifs_resend_wdata(struct cifs_writedata *wdata, struct list_head *wdata_list,
                        if (wdata->cfile->invalidHandle)
                                rc = -EAGAIN;
                        else {
+                               wdata->replay = true;
 #ifdef CONFIG_CIFS_SMB_DIRECT
                                if (wdata->mr) {
                                        wdata->mr->need_invalidate = true;
index f0989484f2c648796d923fcd3f998b150b1f92cf..d02f8ba29cb5bf22f1dcdcc3932f20afc3094f22 100644 (file)
@@ -104,7 +104,7 @@ cifs_revalidate_cache(struct inode *inode, struct cifs_fattr *fattr)
        fattr->cf_mtime = timestamp_truncate(fattr->cf_mtime, inode);
        mtime = inode_get_mtime(inode);
        if (timespec64_equal(&mtime, &fattr->cf_mtime) &&
-           cifs_i->server_eof == fattr->cf_eof) {
+           cifs_i->netfs.remote_i_size == fattr->cf_eof) {
                cifs_dbg(FYI, "%s: inode %llu is unchanged\n",
                         __func__, cifs_i->uniqueid);
                return;
@@ -194,7 +194,7 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr)
        else
                clear_bit(CIFS_INO_DELETE_PENDING, &cifs_i->flags);
 
-       cifs_i->server_eof = fattr->cf_eof;
+       cifs_i->netfs.remote_i_size = fattr->cf_eof;
        /*
         * Can't safely change the file size here if the client is writing to
         * it due to potential races.
@@ -2858,7 +2858,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
 
 set_size_out:
        if (rc == 0) {
-               cifsInode->server_eof = attrs->ia_size;
+               netfs_resize_file(&cifsInode->netfs, attrs->ia_size, true);
                cifs_setsize(inode, attrs->ia_size);
                /*
                 * i_blocks is not related to (i_size / i_blksize), but instead
@@ -3011,6 +3011,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
        if ((attrs->ia_valid & ATTR_SIZE) &&
            attrs->ia_size != i_size_read(inode)) {
                truncate_setsize(inode, attrs->ia_size);
+               netfs_resize_file(&cifsInode->netfs, attrs->ia_size, true);
                fscache_resize_cookie(cifs_inode_cookie(inode), attrs->ia_size);
        }
 
@@ -3210,6 +3211,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
        if ((attrs->ia_valid & ATTR_SIZE) &&
            attrs->ia_size != i_size_read(inode)) {
                truncate_setsize(inode, attrs->ia_size);
+               netfs_resize_file(&cifsInode->netfs, attrs->ia_size, true);
                fscache_resize_cookie(cifs_inode_cookie(inode), attrs->ia_size);
        }
 
index 94255401b38dcb24c705f255731db2791e171c8d..3b1b01d10f7d7a2f1d12b158ded96d58203f80d0 100644 (file)
@@ -141,7 +141,7 @@ retry:
                                        if (likely(reparse_inode_match(inode, fattr))) {
                                                fattr->cf_mode = inode->i_mode;
                                                fattr->cf_rdev = inode->i_rdev;
-                                               fattr->cf_eof = CIFS_I(inode)->server_eof;
+                                               fattr->cf_eof = CIFS_I(inode)->netfs.remote_i_size;
                                                fattr->cf_symlink_target = NULL;
                                        } else {
                                                CIFS_I(inode)->time = 0;
index a652200540c8aa5d2aa0ecd68ed50cc66f587d05..05818cd6d932e91792ecc65d764eba0a942cb28d 100644 (file)
@@ -120,6 +120,14 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
        unsigned int size[2];
        void *data[2];
        int len;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       oplock = SMB2_OPLOCK_LEVEL_NONE;
+       num_rqst = 0;
+       server = cifs_pick_channel(ses);
 
        vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
        if (vars == NULL)
@@ -127,8 +135,6 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
        rqst = &vars->rqst[0];
        rsp_iov = &vars->rsp_iov[0];
 
-       server = cifs_pick_channel(ses);
-
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
@@ -463,15 +469,24 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
        num_rqst++;
 
        if (cfile) {
+               if (retries)
+                       for (i = 1; i < num_rqst - 2; i++)
+                               smb2_set_replay(server, &rqst[i]);
+
                rc = compound_send_recv(xid, ses, server,
                                        flags, num_rqst - 2,
                                        &rqst[1], &resp_buftype[1],
                                        &rsp_iov[1]);
-       } else
+       } else {
+               if (retries)
+                       for (i = 0; i < num_rqst; i++)
+                               smb2_set_replay(server, &rqst[i]);
+
                rc = compound_send_recv(xid, ses, server,
                                        flags, num_rqst,
                                        rqst, resp_buftype,
                                        rsp_iov);
+       }
 
 finished:
        num_rqst = 0;
@@ -620,9 +635,6 @@ finished:
        }
        SMB2_close_free(&rqst[num_rqst]);
 
-       if (cfile)
-               cifsFileInfo_put(cfile);
-
        num_cmds += 2;
        if (out_iov && out_buftype) {
                memcpy(out_iov, rsp_iov, num_cmds * sizeof(*out_iov));
@@ -632,7 +644,16 @@ finished:
                for (i = 0; i < num_cmds; i++)
                        free_rsp_buf(resp_buftype[i], rsp_iov[i].iov_base);
        }
+       num_cmds -= 2; /* correct num_cmds as there could be a retry */
        kfree(vars);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
+       if (cfile)
+               cifsFileInfo_put(cfile);
+
        return rc;
 }
 
index d9553c2556a290dcea14434e00df9d854e713aa3..83c898afc8354bf04c7a86ee57e4343ad3618319 100644 (file)
@@ -1108,7 +1108,7 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
 {
        struct smb2_compound_vars *vars;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        struct smb_rqst *rqst;
        struct kvec *rsp_iov;
        __le16 *utf16_path = NULL;
@@ -1124,6 +1124,13 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
        struct smb2_file_full_ea_info *ea = NULL;
        struct smb2_query_info_rsp *rsp;
        int rc, used_len = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = CIFS_CP_CREATE_CLOSE_OP;
+       oplock = SMB2_OPLOCK_LEVEL_NONE;
+       server = cifs_pick_channel(ses);
 
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
@@ -1244,6 +1251,12 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
                goto sea_exit;
        smb2_set_related(&rqst[2]);
 
+       if (retries) {
+               smb2_set_replay(server, &rqst[0]);
+               smb2_set_replay(server, &rqst[1]);
+               smb2_set_replay(server, &rqst[2]);
+       }
+
        rc = compound_send_recv(xid, ses, server,
                                flags, 3, rqst,
                                resp_buftype, rsp_iov);
@@ -1260,6 +1273,11 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
        kfree(vars);
 out_free_path:
        kfree(utf16_path);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 #endif
@@ -1484,7 +1502,7 @@ smb2_ioctl_query_info(const unsigned int xid,
        struct smb_rqst *rqst;
        struct kvec *rsp_iov;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        char __user *arg = (char __user *)p;
        struct smb_query_info qi;
        struct smb_query_info __user *pqi;
@@ -1501,6 +1519,13 @@ smb2_ioctl_query_info(const unsigned int xid,
        void *data[2];
        int create_options = is_dir ? CREATE_NOT_FILE : CREATE_NOT_DIR;
        void (*free_req1_func)(struct smb_rqst *r);
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = CIFS_CP_CREATE_CLOSE_OP;
+       oplock = SMB2_OPLOCK_LEVEL_NONE;
+       server = cifs_pick_channel(ses);
 
        vars = kzalloc(sizeof(*vars), GFP_ATOMIC);
        if (vars == NULL)
@@ -1641,6 +1666,12 @@ smb2_ioctl_query_info(const unsigned int xid,
                goto free_req_1;
        smb2_set_related(&rqst[2]);
 
+       if (retries) {
+               smb2_set_replay(server, &rqst[0]);
+               smb2_set_replay(server, &rqst[1]);
+               smb2_set_replay(server, &rqst[2]);
+       }
+
        rc = compound_send_recv(xid, ses, server,
                                flags, 3, rqst,
                                resp_buftype, rsp_iov);
@@ -1701,6 +1732,11 @@ free_output_buffer:
        kfree(buffer);
 free_vars:
        kfree(vars);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -2227,8 +2263,14 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
        struct cifs_open_parms oparms;
        struct smb2_query_directory_rsp *qd_rsp = NULL;
        struct smb2_create_rsp *op_rsp = NULL;
-       struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
-       int retry_count = 0;
+       struct TCP_Server_Info *server;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       oplock = SMB2_OPLOCK_LEVEL_NONE;
+       server = cifs_pick_channel(tcon->ses);
 
        utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
        if (!utf16_path)
@@ -2278,14 +2320,15 @@ smb2_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon,
 
        smb2_set_related(&rqst[1]);
 
-again:
+       if (retries) {
+               smb2_set_replay(server, &rqst[0]);
+               smb2_set_replay(server, &rqst[1]);
+       }
+
        rc = compound_send_recv(xid, tcon->ses, server,
                                flags, 2, rqst,
                                resp_buftype, rsp_iov);
 
-       if (rc == -EAGAIN && retry_count++ < 10)
-               goto again;
-
        /* If the open failed there is nothing to do */
        op_rsp = (struct smb2_create_rsp *)rsp_iov[0].iov_base;
        if (op_rsp == NULL || op_rsp->hdr.Status != STATUS_SUCCESS) {
@@ -2333,6 +2376,11 @@ again:
        SMB2_query_directory_free(&rqst[1]);
        free_rsp_buf(resp_buftype[0], rsp_iov[0].iov_base);
        free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -2457,6 +2505,22 @@ smb2_oplock_response(struct cifs_tcon *tcon, __u64 persistent_fid,
                                 CIFS_CACHE_READ(cinode) ? 1 : 0);
 }
 
+void
+smb2_set_replay(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+{
+       struct smb2_hdr *shdr;
+
+       if (server->dialect < SMB30_PROT_ID)
+               return;
+
+       shdr = (struct smb2_hdr *)(rqst->rq_iov[0].iov_base);
+       if (shdr == NULL) {
+               cifs_dbg(FYI, "shdr NULL in smb2_set_related\n");
+               return;
+       }
+       shdr->Flags |= SMB2_FLAGS_REPLAY_OPERATION;
+}
+
 void
 smb2_set_related(struct smb_rqst *rqst)
 {
@@ -2529,6 +2593,27 @@ smb2_set_next_command(struct cifs_tcon *tcon, struct smb_rqst *rqst)
        shdr->NextCommand = cpu_to_le32(len);
 }
 
+/*
+ * helper function for exponential backoff and check if replayable
+ */
+bool smb2_should_replay(struct cifs_tcon *tcon,
+                               int *pretries,
+                               int *pcur_sleep)
+{
+       if (!pretries || !pcur_sleep)
+               return false;
+
+       if (tcon->retry || (*pretries)++ < tcon->ses->server->retrans) {
+               msleep(*pcur_sleep);
+               (*pcur_sleep) = ((*pcur_sleep) << 1);
+               if ((*pcur_sleep) > CIFS_MAX_SLEEP)
+                       (*pcur_sleep) = CIFS_MAX_SLEEP;
+               return true;
+       }
+
+       return false;
+}
+
 /*
  * Passes the query info response back to the caller on success.
  * Caller need to free this with free_rsp_buf().
@@ -2542,7 +2627,7 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
 {
        struct smb2_compound_vars *vars;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        int flags = CIFS_CP_CREATE_CLOSE_OP;
        struct smb_rqst *rqst;
        int resp_buftype[3];
@@ -2553,6 +2638,13 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
        int rc;
        __le16 *utf16_path;
        struct cached_fid *cfid = NULL;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = CIFS_CP_CREATE_CLOSE_OP;
+       oplock = SMB2_OPLOCK_LEVEL_NONE;
+       server = cifs_pick_channel(ses);
 
        if (!path)
                path = "";
@@ -2633,6 +2725,14 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
                goto qic_exit;
        smb2_set_related(&rqst[2]);
 
+       if (retries) {
+               if (!cfid) {
+                       smb2_set_replay(server, &rqst[0]);
+                       smb2_set_replay(server, &rqst[2]);
+               }
+               smb2_set_replay(server, &rqst[1]);
+       }
+
        if (cfid) {
                rc = compound_send_recv(xid, ses, server,
                                        flags, 1, &rqst[1],
@@ -2665,6 +2765,11 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
        kfree(vars);
 out_free_path:
        kfree(utf16_path);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -3213,6 +3318,9 @@ static long smb3_zero_range(struct file *file, struct cifs_tcon *tcon,
                                  cfile->fid.volatile_fid, cfile->pid, new_size);
                if (rc >= 0) {
                        truncate_setsize(inode, new_size);
+                       netfs_resize_file(&cifsi->netfs, new_size, true);
+                       if (offset < cifsi->netfs.zero_point)
+                               cifsi->netfs.zero_point = offset;
                        fscache_resize_cookie(cifs_inode_cookie(inode), new_size);
                }
        }
@@ -3436,7 +3544,7 @@ static long smb3_simple_falloc(struct file *file, struct cifs_tcon *tcon,
                rc = SMB2_set_eof(xid, tcon, cfile->fid.persistent_fid,
                                  cfile->fid.volatile_fid, cfile->pid, new_eof);
                if (rc == 0) {
-                       cifsi->server_eof = new_eof;
+                       netfs_resize_file(&cifsi->netfs, new_eof, true);
                        cifs_setsize(inode, new_eof);
                        cifs_truncate_page(inode->i_mapping, inode->i_size);
                        truncate_setsize(inode, new_eof);
@@ -3528,8 +3636,9 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
        int rc;
        unsigned int xid;
        struct inode *inode = file_inode(file);
-       struct cifsFileInfo *cfile = file->private_data;
        struct cifsInodeInfo *cifsi = CIFS_I(inode);
+       struct cifsFileInfo *cfile = file->private_data;
+       struct netfs_inode *ictx = &cifsi->netfs;
        loff_t old_eof, new_eof;
 
        xid = get_xid();
@@ -3549,6 +3658,7 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
                goto out_2;
 
        truncate_pagecache_range(inode, off, old_eof);
+       ictx->zero_point = old_eof;
 
        rc = smb2_copychunk_range(xid, cfile, cfile, off + len,
                                  old_eof - off - len, off);
@@ -3563,9 +3673,10 @@ static long smb3_collapse_range(struct file *file, struct cifs_tcon *tcon,
 
        rc = 0;
 
-       cifsi->server_eof = i_size_read(inode) - len;
-       truncate_setsize(inode, cifsi->server_eof);
-       fscache_resize_cookie(cifs_inode_cookie(inode), cifsi->server_eof);
+       truncate_setsize(inode, new_eof);
+       netfs_resize_file(&cifsi->netfs, new_eof, true);
+       ictx->zero_point = new_eof;
+       fscache_resize_cookie(cifs_inode_cookie(inode), new_eof);
 out_2:
        filemap_invalidate_unlock(inode->i_mapping);
  out:
@@ -3581,6 +3692,7 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
        unsigned int xid;
        struct cifsFileInfo *cfile = file->private_data;
        struct inode *inode = file_inode(file);
+       struct cifsInodeInfo *cifsi = CIFS_I(inode);
        __u64 count, old_eof, new_eof;
 
        xid = get_xid();
@@ -3608,6 +3720,7 @@ static long smb3_insert_range(struct file *file, struct cifs_tcon *tcon,
                goto out_2;
 
        truncate_setsize(inode, new_eof);
+       netfs_resize_file(&cifsi->netfs, i_size_read(inode), true);
        fscache_resize_cookie(cifs_inode_cookie(inode), i_size_read(inode));
 
        rc = smb2_copychunk_range(xid, cfile, cfile, off, count, off + len);
index 288199f0b987df98ba3fab9320523bc16e73092d..86f6f35b7f32e8498e2628350abf43daa0d97f96 100644 (file)
@@ -195,7 +195,6 @@ cifs_chan_skip_or_disable(struct cifs_ses *ses,
                pserver = server->primary_server;
                cifs_signal_cifsd_for_reconnect(pserver, false);
 skip_terminate:
-               mutex_unlock(&ses->session_mutex);
                return -EHOSTDOWN;
        }
 
@@ -2765,7 +2764,14 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
        int flags = 0;
        unsigned int total_len;
        __le16 *utf16_path = NULL;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       n_iov = 2;
+       server = cifs_pick_channel(ses);
 
        cifs_dbg(FYI, "mkdir\n");
 
@@ -2869,6 +2875,10 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
        /* no need to inc num_remote_opens because we close it just below */
        trace_smb3_posix_mkdir_enter(xid, tcon->tid, ses->Suid, full_path, CREATE_NOT_FILE,
                                    FILE_WRITE_ATTRIBUTES);
+
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        /* resource #4: response buffer */
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
@@ -2906,6 +2916,11 @@ err_free_req:
        cifs_small_buf_release(req);
 err_free_path:
        kfree(utf16_path);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -3101,12 +3116,18 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
        struct smb2_create_rsp *rsp = NULL;
        struct cifs_tcon *tcon = oparms->tcon;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        struct kvec iov[SMB2_CREATE_IOV_SIZE];
        struct kvec rsp_iov = {NULL, 0};
        int resp_buftype = CIFS_NO_BUFFER;
        int rc = 0;
        int flags = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        cifs_dbg(FYI, "create/open\n");
        if (!ses || !server)
@@ -3128,6 +3149,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
        trace_smb3_open_enter(xid, tcon->tid, tcon->ses->Suid, oparms->path,
                oparms->create_options, oparms->desired_access);
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags,
                            &rsp_iov);
@@ -3181,6 +3205,11 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
 creat_exit:
        SMB2_open_free(&rqst);
        free_rsp_buf(resp_buftype, rsp);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -3305,15 +3334,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        int resp_buftype = CIFS_NO_BUFFER;
        int rc = 0;
        int flags = 0;
-
-       cifs_dbg(FYI, "SMB2 IOCTL\n");
-
-       if (out_data != NULL)
-               *out_data = NULL;
-
-       /* zero out returned data len, in case of error */
-       if (plen)
-               *plen = 0;
+       int retries = 0, cur_sleep = 1;
 
        if (!tcon)
                return -EIO;
@@ -3322,10 +3343,23 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        if (!ses)
                return -EIO;
 
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
        server = cifs_pick_channel(ses);
+
        if (!server)
                return -EIO;
 
+       cifs_dbg(FYI, "SMB2 IOCTL\n");
+
+       if (out_data != NULL)
+               *out_data = NULL;
+
+       /* zero out returned data len, in case of error */
+       if (plen)
+               *plen = 0;
+
        if (smb3_encryption_required(tcon))
                flags |= CIFS_TRANSFORM_REQ;
 
@@ -3340,6 +3374,9 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        if (rc)
                goto ioctl_exit;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags,
                            &rsp_iov);
@@ -3409,6 +3446,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
 ioctl_exit:
        SMB2_ioctl_free(&rqst);
        free_rsp_buf(resp_buftype, rsp);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -3480,13 +3522,20 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
        struct smb_rqst rqst;
        struct smb2_close_rsp *rsp = NULL;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        struct kvec iov[1];
        struct kvec rsp_iov;
        int resp_buftype = CIFS_NO_BUFFER;
        int rc = 0;
        int flags = 0;
        bool query_attrs = false;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       query_attrs = false;
+       server = cifs_pick_channel(ses);
 
        cifs_dbg(FYI, "Close\n");
 
@@ -3512,6 +3561,9 @@ __SMB2_close(const unsigned int xid, struct cifs_tcon *tcon,
        if (rc)
                goto close_exit;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
        rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
@@ -3545,6 +3597,11 @@ close_exit:
                        cifs_dbg(VFS, "handle cancelled close fid 0x%llx returned error %d\n",
                                 persistent_fid, tmp_rc);
        }
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -3675,12 +3732,19 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
        struct TCP_Server_Info *server;
        int flags = 0;
        bool allocated = false;
+       int retries = 0, cur_sleep = 1;
 
        cifs_dbg(FYI, "Query Info\n");
 
        if (!ses)
                return -EIO;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       allocated = false;
        server = cifs_pick_channel(ses);
+
        if (!server)
                return -EIO;
 
@@ -3702,6 +3766,9 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
        trace_smb3_query_info_enter(xid, persistent_fid, tcon->tid,
                                    ses->Suid, info_class, (__u32)info_type);
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
        rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
@@ -3744,6 +3811,11 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
 qinf_exit:
        SMB2_query_info_free(&rqst);
        free_rsp_buf(resp_buftype, rsp);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -3844,7 +3916,7 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
                u32 *plen /* returned data len */)
 {
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        struct smb_rqst rqst;
        struct smb2_change_notify_rsp *smb_rsp;
        struct kvec iov[1];
@@ -3852,6 +3924,12 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
        int resp_buftype = CIFS_NO_BUFFER;
        int flags = 0;
        int rc = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        cifs_dbg(FYI, "change notify\n");
        if (!ses || !server)
@@ -3876,6 +3954,10 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
 
        trace_smb3_notify_enter(xid, persistent_fid, tcon->tid, ses->Suid,
                                (u8)watch_tree, completion_filter);
+
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
 
@@ -3910,6 +3992,11 @@ SMB2_change_notify(const unsigned int xid, struct cifs_tcon *tcon,
        if (rqst.rq_iov)
                cifs_small_buf_release(rqst.rq_iov[0].iov_base); /* request */
        free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -4152,10 +4239,16 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
        struct smb_rqst rqst;
        struct kvec iov[1];
        struct kvec rsp_iov = {NULL, 0};
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        int resp_buftype = CIFS_NO_BUFFER;
        int flags = 0;
        int rc = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        cifs_dbg(FYI, "flush\n");
        if (!ses || !(ses->server))
@@ -4175,6 +4268,10 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
                goto flush_exit;
 
        trace_smb3_flush_enter(xid, persistent_fid, tcon->tid, ses->Suid);
+
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
 
@@ -4189,6 +4286,11 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
  flush_exit:
        SMB2_flush_free(&rqst);
        free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -4668,7 +4770,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
        struct cifs_io_parms *io_parms = NULL;
        int credit_request;
 
-       if (!wdata->server)
+       if (!wdata->server || wdata->replay)
                server = wdata->server = cifs_pick_channel(tcon->ses);
 
        /*
@@ -4753,6 +4855,8 @@ smb2_async_writev(struct cifs_writedata *wdata,
        rqst.rq_nvec = 1;
        rqst.rq_iter = wdata->iter;
        rqst.rq_iter_size = iov_iter_count(&rqst.rq_iter);
+       if (wdata->replay)
+               smb2_set_replay(server, &rqst);
 #ifdef CONFIG_CIFS_SMB_DIRECT
        if (wdata->mr)
                iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
@@ -4826,18 +4930,21 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
        int flags = 0;
        unsigned int total_len;
        struct TCP_Server_Info *server;
+       int retries = 0, cur_sleep = 1;
 
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
        *nbytes = 0;
-
-       if (n_vec < 1)
-               return rc;
-
        if (!io_parms->server)
                io_parms->server = cifs_pick_channel(io_parms->tcon->ses);
        server = io_parms->server;
        if (server == NULL)
                return -ECONNABORTED;
 
+       if (n_vec < 1)
+               return rc;
+
        rc = smb2_plain_req_init(SMB2_WRITE, io_parms->tcon, server,
                                 (void **) &req, &total_len);
        if (rc)
@@ -4871,6 +4978,9 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
        rqst.rq_iov = iov;
        rqst.rq_nvec = n_vec + 1;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, io_parms->tcon->ses, server,
                            &rqst,
                            &resp_buftype, flags, &rsp_iov);
@@ -4895,6 +5005,11 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
 
        cifs_small_buf_release(req);
        free_rsp_buf(resp_buftype, rsp);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(io_parms->tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -5206,8 +5321,14 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        struct kvec rsp_iov;
        int rc = 0;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        int flags = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        if (!ses || !(ses->server))
                return -EIO;
@@ -5227,6 +5348,9 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
        if (rc)
                goto qdir_exit;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
        rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
@@ -5261,6 +5385,11 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
 qdir_exit:
        SMB2_query_directory_free(&rqst);
        free_rsp_buf(resp_buftype, rsp);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -5327,8 +5456,14 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
        int rc = 0;
        int resp_buftype;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        int flags = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        if (!ses || !server)
                return -EIO;
@@ -5356,6 +5491,8 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
                return rc;
        }
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
 
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags,
@@ -5371,6 +5508,11 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
 
        free_rsp_buf(resp_buftype, rsp);
        kfree(iov);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -5423,12 +5565,18 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
        int rc;
        struct smb2_oplock_break *req = NULL;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        int flags = CIFS_OBREAK_OP;
        unsigned int total_len;
        struct kvec iov[1];
        struct kvec rsp_iov;
        int resp_buf_type;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = CIFS_OBREAK_OP;
+       server = cifs_pick_channel(ses);
 
        cifs_dbg(FYI, "SMB2_oplock_break\n");
        rc = smb2_plain_req_init(SMB2_OPLOCK_BREAK, tcon, server,
@@ -5453,15 +5601,21 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
        rqst.rq_iov = iov;
        rqst.rq_nvec = 1;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buf_type, flags, &rsp_iov);
        cifs_small_buf_release(req);
-
        if (rc) {
                cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
                cifs_dbg(FYI, "Send error in Oplock Break = %d\n", rc);
        }
 
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -5547,9 +5701,15 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
        int rc = 0;
        int resp_buftype;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        FILE_SYSTEM_POSIX_INFO *info = NULL;
        int flags = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        rc = build_qfs_info_req(&iov, tcon, server,
                                FS_POSIX_INFORMATION,
@@ -5565,6 +5725,9 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
        rqst.rq_iov = &iov;
        rqst.rq_nvec = 1;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
        free_qfs_info_req(&iov);
@@ -5584,6 +5747,11 @@ SMB311_posix_qfs_info(const unsigned int xid, struct cifs_tcon *tcon,
 
 posix_qfsinf_exit:
        free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -5598,9 +5766,15 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
        int rc = 0;
        int resp_buftype;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        struct smb2_fs_full_size_info *info = NULL;
        int flags = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        rc = build_qfs_info_req(&iov, tcon, server,
                                FS_FULL_SIZE_INFORMATION,
@@ -5616,6 +5790,9 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
        rqst.rq_iov = &iov;
        rqst.rq_nvec = 1;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
        free_qfs_info_req(&iov);
@@ -5635,6 +5812,11 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
 
 qfsinf_exit:
        free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -5649,9 +5831,15 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
        int rc = 0;
        int resp_buftype, max_len, min_len;
        struct cifs_ses *ses = tcon->ses;
-       struct TCP_Server_Info *server = cifs_pick_channel(ses);
+       struct TCP_Server_Info *server;
        unsigned int rsp_len, offset;
        int flags = 0;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = 0;
+       server = cifs_pick_channel(ses);
 
        if (level == FS_DEVICE_INFORMATION) {
                max_len = sizeof(FILE_SYSTEM_DEVICE_INFO);
@@ -5683,6 +5871,9 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
        rqst.rq_iov = &iov;
        rqst.rq_nvec = 1;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, ses, server,
                            &rqst, &resp_buftype, flags, &rsp_iov);
        free_qfs_info_req(&iov);
@@ -5720,6 +5911,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
 
 qfsattr_exit:
        free_rsp_buf(resp_buftype, rsp_iov.iov_base);
+
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
@@ -5737,7 +5933,13 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
        unsigned int count;
        int flags = CIFS_NO_RSP_BUF;
        unsigned int total_len;
-       struct TCP_Server_Info *server = cifs_pick_channel(tcon->ses);
+       struct TCP_Server_Info *server;
+       int retries = 0, cur_sleep = 1;
+
+replay_again:
+       /* reinitialize for possible replay */
+       flags = CIFS_NO_RSP_BUF;
+       server = cifs_pick_channel(tcon->ses);
 
        cifs_dbg(FYI, "smb2_lockv num lock %d\n", num_lock);
 
@@ -5768,6 +5970,9 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
        rqst.rq_iov = iov;
        rqst.rq_nvec = 2;
 
+       if (retries)
+               smb2_set_replay(server, &rqst);
+
        rc = cifs_send_recv(xid, tcon->ses, server,
                            &rqst, &resp_buf_type, flags,
                            &rsp_iov);
@@ -5779,6 +5984,10 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
                                    tcon->ses->Suid, rc);
        }
 
+       if (is_replayable_error(rc) &&
+           smb2_should_replay(tcon, &retries, &cur_sleep))
+               goto replay_again;
+
        return rc;
 }
 
index 0034b537b0b3f9dd057ce5183f05b6fedb147f77..b3069911e9dd8f51ea38ea54da740049696d18e6 100644 (file)
@@ -122,6 +122,11 @@ extern unsigned long smb_rqst_len(struct TCP_Server_Info *server,
 extern void smb2_set_next_command(struct cifs_tcon *tcon,
                                  struct smb_rqst *rqst);
 extern void smb2_set_related(struct smb_rqst *rqst);
+extern void smb2_set_replay(struct TCP_Server_Info *server,
+                           struct smb_rqst *rqst);
+extern bool smb2_should_replay(struct cifs_tcon *tcon,
+                         int *pretries,
+                         int *pcur_sleep);
 
 /*
  * SMB2 Worker functions - most of protocol specific implementation details
index f0ce26414f17377365ed0201f21dd4e9cdf06b59..1d1ee9f18f373501f781447f82b494857dd8e9f3 100644 (file)
 #include "cifsproto.h"
 #include "../common/md4.h"
 
-#ifndef false
-#define false 0
-#endif
-#ifndef true
-#define true 1
-#endif
-
 /* following came from the other byteorder.h to avoid include conflicts */
 #define CVAL(buf,pos) (((unsigned char *)(buf))[pos])
 #define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8)
index 4f717ad7c21b424d45f785fdbb94be941c1d7f14..e00278fcfa4fa65f063430001c5506e3a2906358 100644 (file)
@@ -400,10 +400,17 @@ unmask:
                                                  server->conn_id, server->hostname);
        }
 smbd_done:
-       if (rc < 0 && rc != -EINTR)
+       /*
+        * there's hardly any use for the layers above to know the
+        * actual error code here. All they should do at this point is
+        * to retry the connection and hope it goes away.
+        */
+       if (rc < 0 && rc != -EINTR && rc != -EAGAIN) {
                cifs_server_dbg(VFS, "Error %d sending data on socket to server\n",
                         rc);
-       else if (rc > 0)
+               rc = -ECONNABORTED;
+               cifs_signal_cifsd_for_reconnect(server, false);
+       } else if (rc > 0)
                rc = 0;
 out:
        cifs_in_send_dec(server);
@@ -1026,6 +1033,9 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
                if (!server || server->terminate)
                        continue;
 
+               if (CIFS_CHAN_NEEDS_RECONNECT(ses, i))
+                       continue;
+
                /*
                 * strictly speaking, we should pick up req_lock to read
                 * server->in_flight. But it shouldn't matter much here if we
index b7521e41402e003a3fd7e121c6003cf7edf23539..0ebf91ffa2361c0940aba0fc301d1a65bf1612e5 100644 (file)
@@ -304,7 +304,8 @@ enum ksmbd_event {
        KSMBD_EVENT_SPNEGO_AUTHEN_REQUEST,
        KSMBD_EVENT_SPNEGO_AUTHEN_RESPONSE      = 15,
 
-       KSMBD_EVENT_MAX
+       __KSMBD_EVENT_MAX,
+       KSMBD_EVENT_MAX = __KSMBD_EVENT_MAX - 1
 };
 
 /*
index b49d47bdafc945e31bdfa8d7b9f9931752c4d17c..f29bb03f0dc47bfcb0fe3fc5c5acff16d5a314a8 100644 (file)
@@ -74,7 +74,7 @@ static int handle_unsupported_event(struct sk_buff *skb, struct genl_info *info)
 static int handle_generic_event(struct sk_buff *skb, struct genl_info *info);
 static int ksmbd_ipc_heartbeat_request(void);
 
-static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX] = {
+static const struct nla_policy ksmbd_nl_policy[KSMBD_EVENT_MAX + 1] = {
        [KSMBD_EVENT_UNSPEC] = {
                .len = 0,
        },
@@ -403,7 +403,7 @@ static int handle_generic_event(struct sk_buff *skb, struct genl_info *info)
                return -EPERM;
 #endif
 
-       if (type >= KSMBD_EVENT_MAX) {
+       if (type > KSMBD_EVENT_MAX) {
                WARN_ON(1);
                return -EINVAL;
        }
index 9d4222154dcc0c92201a0d7a6e4dac77e0eea37b..002a3f0dc7c5880b61045cf7f10f7e078b85d6a9 100644 (file)
@@ -365,6 +365,7 @@ static int ksmbd_tcp_readv(struct tcp_transport *t, struct kvec *iov_orig,
  * @t:         TCP transport instance
  * @buf:       buffer to store read data from socket
  * @to_read:   number of bytes to read from socket
+ * @max_retries: number of retries if reading from socket fails
  *
  * Return:     on success return number of bytes read from socket,
  *             otherwise return error number
@@ -416,6 +417,7 @@ static void tcp_destroy_socket(struct socket *ksmbd_socket)
 
 /**
  * create_socket - create socket for ksmbd/0
+ * @iface:      interface to bind the created socket to
  *
  * Return:     0 on success, error number otherwise
  */
index 6795fda2af191ac7e5d9cacec4220bc0feba2a2c..1c3dd0ad4660eff5127ae8fc1ddecc7631977982 100644 (file)
@@ -34,7 +34,15 @@ static DEFINE_MUTEX(eventfs_mutex);
 
 /* Choose something "unique" ;-) */
 #define EVENTFS_FILE_INODE_INO         0x12c4e37
-#define EVENTFS_DIR_INODE_INO          0x134b2f5
+
+/* Just try to make something consistent and unique */
+static int eventfs_dir_ino(struct eventfs_inode *ei)
+{
+       if (!ei->ino)
+               ei->ino = get_next_ino();
+
+       return ei->ino;
+}
 
 /*
  * The eventfs_inode (ei) itself is protected by SRCU. It is released from
@@ -273,44 +281,6 @@ static void update_inode_attr(struct dentry *dentry, struct inode *inode,
                inode->i_gid = attr->gid;
 }
 
-static void update_gid(struct eventfs_inode *ei, kgid_t gid, int level)
-{
-       struct eventfs_inode *ei_child;
-
-       /* at most we have events/system/event */
-       if (WARN_ON_ONCE(level > 3))
-               return;
-
-       ei->attr.gid = gid;
-
-       if (ei->entry_attrs) {
-               for (int i = 0; i < ei->nr_entries; i++) {
-                       ei->entry_attrs[i].gid = gid;
-               }
-       }
-
-       /*
-        * Only eventfs_inode with dentries are updated, make sure
-        * all eventfs_inodes are updated. If one of the children
-        * do not have a dentry, this function must traverse it.
-        */
-       list_for_each_entry_srcu(ei_child, &ei->children, list,
-                                srcu_read_lock_held(&eventfs_srcu)) {
-               if (!ei_child->dentry)
-                       update_gid(ei_child, gid, level + 1);
-       }
-}
-
-void eventfs_update_gid(struct dentry *dentry, kgid_t gid)
-{
-       struct eventfs_inode *ei = dentry->d_fsdata;
-       int idx;
-
-       idx = srcu_read_lock(&eventfs_srcu);
-       update_gid(ei, gid, 0);
-       srcu_read_unlock(&eventfs_srcu, idx);
-}
-
 /**
  * create_file - create a file in the tracefs filesystem
  * @name: the name of the file to create.
@@ -396,7 +366,7 @@ static struct dentry *create_dir(struct eventfs_inode *ei, struct dentry *parent
        inode->i_fop = &eventfs_file_operations;
 
        /* All directories will have the same inode number */
-       inode->i_ino = EVENTFS_DIR_INODE_INO;
+       inode->i_ino = eventfs_dir_ino(ei);
 
        ti = get_tracefs(inode);
        ti->flags |= TRACEFS_EVENT_INODE;
@@ -802,7 +772,7 @@ static int eventfs_iterate(struct file *file, struct dir_context *ctx)
 
                name = ei_child->name;
 
-               ino = EVENTFS_DIR_INODE_INO;
+               ino = eventfs_dir_ino(ei_child);
 
                if (!dir_emit(ctx, name, strlen(name), ino, DT_DIR))
                        goto out_dec;
index 12b7d0150ae9efeab86e21fd7e15c8e1a397a69e..91c2bf0b91d9c53d83b6325161787b090f2ba574 100644 (file)
@@ -55,6 +55,10 @@ struct eventfs_inode {
        struct eventfs_attr             *entry_attrs;
        struct eventfs_attr             attr;
        void                            *data;
+       unsigned int                    is_freed:1;
+       unsigned int                    is_events:1;
+       unsigned int                    nr_entries:30;
+       unsigned int                    ino;
        /*
         * Union - used for deletion
         * @llist:      for calling dput() if needed after RCU
@@ -64,9 +68,6 @@ struct eventfs_inode {
                struct llist_node       llist;
                struct rcu_head         rcu;
        };
-       unsigned int                    is_freed:1;
-       unsigned int                    is_events:1;
-       unsigned int                    nr_entries:30;
 };
 
 static inline struct tracefs_inode *get_tracefs(const struct inode *inode)
@@ -81,7 +82,6 @@ struct inode *tracefs_get_inode(struct super_block *sb);
 struct dentry *eventfs_start_creating(const char *name, struct dentry *parent);
 struct dentry *eventfs_failed_creating(struct dentry *dentry);
 struct dentry *eventfs_end_creating(struct dentry *dentry);
-void eventfs_update_gid(struct dentry *dentry, kgid_t gid);
 void eventfs_set_ei_status_free(struct tracefs_inode *ti, struct dentry *dentry);
 
 #endif /* _TRACEFS_INTERNAL_H */
index aff20ddd4a9f9cdeeeca1f54f210d19462773a5b..5a2512d20bd07473a872592911ede7246b8c11b7 100644 (file)
@@ -1496,6 +1496,18 @@ xfs_fs_fill_super(
 
        mp->m_super = sb;
 
+       /*
+        * Copy VFS mount flags from the context now that all parameter parsing
+        * is guaranteed to have been completed by either the old mount API or
+        * the newer fsopen/fsconfig API.
+        */
+       if (fc->sb_flags & SB_RDONLY)
+               set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
+       if (fc->sb_flags & SB_DIRSYNC)
+               mp->m_features |= XFS_FEAT_DIRSYNC;
+       if (fc->sb_flags & SB_SYNCHRONOUS)
+               mp->m_features |= XFS_FEAT_WSYNC;
+
        error = xfs_fs_validate_params(mp);
        if (error)
                return error;
@@ -1965,6 +1977,11 @@ static const struct fs_context_operations xfs_context_ops = {
        .free        = xfs_fs_free,
 };
 
+/*
+ * WARNING: do not initialise any parameters in this function that depend on
+ * mount option parsing having already been performed as this can be called from
+ * fsopen() before any parameters have been set.
+ */
 static int xfs_init_fs_context(
        struct fs_context       *fc)
 {
@@ -1996,16 +2013,6 @@ static int xfs_init_fs_context(
        mp->m_logbsize = -1;
        mp->m_allocsize_log = 16; /* 64k */
 
-       /*
-        * Copy binary VFS mount flags we are interested in.
-        */
-       if (fc->sb_flags & SB_RDONLY)
-               set_bit(XFS_OPSTATE_READONLY, &mp->m_opstate);
-       if (fc->sb_flags & SB_DIRSYNC)
-               mp->m_features |= XFS_FEAT_DIRSYNC;
-       if (fc->sb_flags & SB_SYNCHRONOUS)
-               mp->m_features |= XFS_FEAT_WSYNC;
-
        fc->s_fs_info = mp;
        fc->ops = &xfs_context_ops;
 
index 1dbb14daccfaf326af0c54c89ff61afb50e07982..26d68115afb826b65a9fd11ce329635161e39cca 100644 (file)
@@ -471,7 +471,7 @@ enum ata_completion_errors {
 
 /*
  * Link power management policy: If you alter this, you also need to
- * alter libata-scsi.c (for the ascii descriptions)
+ * alter libata-sata.c (for the ascii descriptions)
  */
 enum ata_lpm_policy {
        ATA_LPM_UNKNOWN,
index 8c55ff351e5f2eed3416b0b59dd7e193f06bec02..41f03b352401e7556ddf92f0b9a53da4918a291a 100644 (file)
@@ -681,6 +681,7 @@ struct mlx5e_resources {
                struct mlx5_sq_bfreg       bfreg;
 #define MLX5_MAX_NUM_TC 8
                u32                        tisn[MLX5_MAX_PORTS][MLX5_MAX_NUM_TC];
+               bool                       tisn_valid;
        } hw_objs;
        struct net_device *uplink_netdev;
        struct mutex uplink_netdev_lock;
index 6f7725238abc2fcfeaf471e988b0035df25b9b87..3fb428ce7d1c7c0dd57969e8b82e227a4efb5d41 100644 (file)
@@ -132,6 +132,7 @@ struct mlx5_flow_handle;
 
 enum {
        FLOW_CONTEXT_HAS_TAG = BIT(0),
+       FLOW_CONTEXT_UPLINK_HAIRPIN_EN = BIT(1),
 };
 
 struct mlx5_flow_context {
index bf5320b28b8bf045f7ab3492eb7f050e027df29d..c726f90ab752452cbe9726462ecedd72b21656f6 100644 (file)
@@ -3576,7 +3576,7 @@ struct mlx5_ifc_flow_context_bits {
        u8         action[0x10];
 
        u8         extended_destination[0x1];
-       u8         reserved_at_81[0x1];
+       u8         uplink_hairpin_en[0x1];
        u8         flow_source[0x2];
        u8         encrypt_decrypt_type[0x4];
        u8         destination_list_size[0x18];
@@ -4036,8 +4036,13 @@ struct mlx5_ifc_nic_vport_context_bits {
        u8         affiliation_criteria[0x4];
        u8         affiliated_vhca_id[0x10];
 
-       u8         reserved_at_60[0xd0];
+       u8         reserved_at_60[0xa0];
+
+       u8         reserved_at_100[0x1];
+       u8         sd_group[0x3];
+       u8         reserved_at_104[0x1c];
 
+       u8         reserved_at_120[0x10];
        u8         mtu[0x10];
 
        u8         system_image_guid[0x40];
@@ -10122,8 +10127,7 @@ struct mlx5_ifc_mpir_reg_bits {
        u8         reserved_at_20[0x20];
 
        u8         local_port[0x8];
-       u8         reserved_at_28[0x15];
-       u8         sd_group[0x3];
+       u8         reserved_at_28[0x18];
 
        u8         reserved_at_60[0x20];
 };
index fbb9bf4478894c72e0e4a3f4d6404008611cbca0..c36cc6d829267e8b795c5c1ea7f71c1e28dcdaed 100644 (file)
@@ -72,6 +72,7 @@ int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
 int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
 int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
                                           u64 *system_image_guid);
+int mlx5_query_nic_vport_sd_group(struct mlx5_core_dev *mdev, u8 *sd_group);
 int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
 int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
                                    u16 vport, u64 node_guid);
index 40d94411d49204e7276a6ad9554eb17335fd4577..dc7048824be81d628ca12f0874c1a7508da0d5c1 100644 (file)
@@ -156,6 +156,7 @@ calc_vm_flag_bits(unsigned long flags)
        return _calc_vm_trans(flags, MAP_GROWSDOWN,  VM_GROWSDOWN ) |
               _calc_vm_trans(flags, MAP_LOCKED,     VM_LOCKED    ) |
               _calc_vm_trans(flags, MAP_SYNC,       VM_SYNC      ) |
+              _calc_vm_trans(flags, MAP_STACK,      VM_NOHUGEPAGE) |
               arch_calc_vm_flag_bits(flags);
 }
 
index 4ed33b12782151632e36aa114039cb4a0916fe06..a497f189d98818bcda37458746ebb2bded7826e4 100644 (file)
@@ -2013,9 +2013,9 @@ static inline int pfn_valid(unsigned long pfn)
        if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
                return 0;
        ms = __pfn_to_section(pfn);
-       rcu_read_lock();
+       rcu_read_lock_sched();
        if (!valid_section(ms)) {
-               rcu_read_unlock();
+               rcu_read_unlock_sched();
                return 0;
        }
        /*
@@ -2023,7 +2023,7 @@ static inline int pfn_valid(unsigned long pfn)
         * the entire section-sized span.
         */
        ret = early_section(ms) || pfn_section_valid(ms, pfn);
-       rcu_read_unlock();
+       rcu_read_unlock_sched();
 
        return ret;
 }
index cdb8ea53c365ba45be4041c887de4c9d1c22afcd..ffe8f618ab869729bd6c888a8a05e45d46a6c7c2 100644 (file)
@@ -920,7 +920,7 @@ struct task_struct {
        unsigned                        sched_rt_mutex:1;
 #endif
 
-       /* Bit to tell LSMs we're in execve(): */
+       /* Bit to tell TOMOYO we're in execve(): */
        unsigned                        in_execve:1;
        unsigned                        in_iowait:1;
 #ifndef TIF_RESTORE_SIGMASK
index 888a4b217829fd4d6baf52f784ce35e9ad6bd0ed..e65ec3fd27998a5b82fc2c4597c575125e653056 100644 (file)
@@ -505,12 +505,6 @@ static inline bool sk_psock_strp_enabled(struct sk_psock *psock)
        return !!psock->saved_data_ready;
 }
 
-static inline bool sk_is_udp(const struct sock *sk)
-{
-       return sk->sk_type == SOCK_DGRAM &&
-              sk->sk_protocol == IPPROTO_UDP;
-}
-
 #if IS_ENABLED(CONFIG_NET_SOCK_MSG)
 
 #define BPF_F_STRPARSER        (1UL << 1)
index 471fe2ff9066b75e82795b92972905bbae3cc48c..600fbd5daf683d4d93536a569ef5e52248d7a851 100644 (file)
@@ -21,7 +21,7 @@
 #include <uapi/linux/spi/spi.h>
 
 /* Max no. of CS supported per spi device */
-#define SPI_CS_CNT_MAX 4
+#define SPI_CS_CNT_MAX 16
 
 struct dma_chan;
 struct software_node;
index cdba4d0c6d4a88dd19db34faa425addb8cd3f744..77eb9b0e768504daa57af63b2eb7c8debd00dfda 100644 (file)
@@ -128,6 +128,7 @@ struct mnt_id_req;
 #define __TYPE_IS_LL(t) (__TYPE_AS(t, 0LL) || __TYPE_AS(t, 0ULL))
 #define __SC_LONG(t, a) __typeof(__builtin_choose_expr(__TYPE_IS_LL(t), 0LL, 0L)) a
 #define __SC_CAST(t, a)        (__force t) a
+#define __SC_TYPE(t, a)        t
 #define __SC_ARGS(t, a)        a
 #define __SC_TEST(t, a) (void)BUILD_BUG_ON_ZERO(!__TYPE_IS_LL(t) && sizeof(t) > sizeof(long))
 
index d0a2f827d5f20f3fed3c177d9b64d9dac373a26f..9ab4bf704e864358215d2370d33d3d9668681923 100644 (file)
@@ -357,4 +357,12 @@ static inline bool inet_csk_has_ulp(const struct sock *sk)
        return inet_test_bit(IS_ICSK, sk) && !!inet_csk(sk)->icsk_ulp_ops;
 }
 
+static inline void inet_init_csk_locks(struct sock *sk)
+{
+       struct inet_connection_sock *icsk = inet_csk(sk);
+
+       spin_lock_init(&icsk->icsk_accept_queue.rskq_lock);
+       spin_lock_init(&icsk->icsk_accept_queue.fastopenq.lock);
+}
+
 #endif /* _INET_CONNECTION_SOCK_H */
index aa86453f6b9ba367f772570a7b783bb098be6236..d94c242eb3ed20b2c5b2e5ceea3953cf96341fb7 100644 (file)
@@ -307,11 +307,6 @@ static inline unsigned long inet_cmsg_flags(const struct inet_sock *inet)
 #define inet_assign_bit(nr, sk, val)           \
        assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
 
-static inline bool sk_is_inet(struct sock *sk)
-{
-       return sk->sk_family == AF_INET || sk->sk_family == AF_INET6;
-}
-
 /**
  * sk_to_full_sk - Access to a full socket
  * @sk: pointer to a socket
index 7e73f8e5e4970d4d12b89bf6a1a3988f88e2b635..1d55ba7c45be16356e4144e09cdfeb7da99a7971 100644 (file)
@@ -262,8 +262,7 @@ static inline void llc_pdu_header_init(struct sk_buff *skb, u8 type,
  */
 static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
 {
-       if (skb->protocol == htons(ETH_P_802_2))
-               memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
+       memcpy(sa, eth_hdr(skb)->h_source, ETH_ALEN);
 }
 
 /**
@@ -275,8 +274,7 @@ static inline void llc_pdu_decode_sa(struct sk_buff *skb, u8 *sa)
  */
 static inline void llc_pdu_decode_da(struct sk_buff *skb, u8 *da)
 {
-       if (skb->protocol == htons(ETH_P_802_2))
-               memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
+       memcpy(da, eth_hdr(skb)->h_dest, ETH_ALEN);
 }
 
 /**
index b157c5cafd14cfe307f3d36ad533d528f142eea6..4e1ea18eb5f05f763429ed2f75c0c1c9aee20d44 100644 (file)
@@ -205,6 +205,7 @@ static inline void nft_data_copy(u32 *dst, const struct nft_data *src,
  *     @nla: netlink attributes
  *     @portid: netlink portID of the original message
  *     @seq: netlink sequence number
+ *     @flags: modifiers to new request
  *     @family: protocol family
  *     @level: depth of the chains
  *     @report: notify via unicast netlink message
@@ -282,6 +283,7 @@ struct nft_elem_priv { };
  *
  *     @key: element key
  *     @key_end: closing element key
+ *     @data: element data
  *     @priv: element private data and extensions
  */
 struct nft_set_elem {
@@ -325,10 +327,10 @@ struct nft_set_iter {
  *     @dtype: data type
  *     @dlen: data length
  *     @objtype: object type
- *     @flags: flags
  *     @size: number of set elements
  *     @policy: set policy
  *     @gc_int: garbage collector interval
+ *     @timeout: element timeout
  *     @field_len: length of each field in concatenation, bytes
  *     @field_count: number of concatenated fields in element
  *     @expr: set must support for expressions
@@ -351,9 +353,9 @@ struct nft_set_desc {
 /**
  *     enum nft_set_class - performance class
  *
- *     @NFT_LOOKUP_O_1: constant, O(1)
- *     @NFT_LOOKUP_O_LOG_N: logarithmic, O(log N)
- *     @NFT_LOOKUP_O_N: linear, O(N)
+ *     @NFT_SET_CLASS_O_1: constant, O(1)
+ *     @NFT_SET_CLASS_O_LOG_N: logarithmic, O(log N)
+ *     @NFT_SET_CLASS_O_N: linear, O(N)
  */
 enum nft_set_class {
        NFT_SET_CLASS_O_1,
@@ -422,9 +424,13 @@ struct nft_set_ext;
  *     @remove: remove element from set
  *     @walk: iterate over all set elements
  *     @get: get set elements
+ *     @commit: commit set elements
+ *     @abort: abort set elements
  *     @privsize: function to return size of set private data
+ *     @estimate: estimate the required memory size and the lookup complexity class
  *     @init: initialize private data of new set instance
  *     @destroy: destroy private data of set instance
+ *     @gc_init: initialize garbage collection
  *     @elemsize: element private size
  *
  *     Operations lookup, update and delete have simpler interfaces, are faster
@@ -540,13 +546,16 @@ struct nft_set_elem_expr {
  *     @policy: set parameterization (see enum nft_set_policies)
  *     @udlen: user data length
  *     @udata: user data
- *     @expr: stateful expression
+ *     @pending_update: list of pending update set element
  *     @ops: set ops
  *     @flags: set flags
  *     @dead: set will be freed, never cleared
  *     @genmask: generation mask
  *     @klen: key length
  *     @dlen: data length
+ *     @num_exprs: numbers of exprs
+ *     @exprs: stateful expression
+ *     @catchall_list: list of catch-all set element
  *     @data: private set data
  */
 struct nft_set {
@@ -692,6 +701,7 @@ extern const struct nft_set_ext_type nft_set_ext_types[];
  *
  *     @len: length of extension area
  *     @offset: offsets of individual extension types
+ *     @ext_len: length of the expected extension(used to sanity check)
  */
 struct nft_set_ext_tmpl {
        u16     len;
@@ -840,6 +850,7 @@ struct nft_expr_ops;
  *     @select_ops: function to select nft_expr_ops
  *     @release_ops: release nft_expr_ops
  *     @ops: default ops, used when no select_ops functions is present
+ *     @inner_ops: inner ops, used for inner packet operation
  *     @list: used internally
  *     @name: Identifier
  *     @owner: module reference
@@ -881,14 +892,22 @@ struct nft_offload_ctx;
  *     struct nft_expr_ops - nf_tables expression operations
  *
  *     @eval: Expression evaluation function
+ *     @clone: Expression clone function
  *     @size: full expression size, including private data size
  *     @init: initialization function
  *     @activate: activate expression in the next generation
  *     @deactivate: deactivate expression in next generation
  *     @destroy: destruction function, called after synchronize_rcu
+ *     @destroy_clone: destruction clone function
  *     @dump: function to dump parameters
- *     @type: expression type
  *     @validate: validate expression, called during loop detection
+ *     @reduce: reduce expression
+ *     @gc: garbage collection expression
+ *     @offload: hardware offload expression
+ *     @offload_action: function to report true/false to allocate one slot or not in the flow
+ *                      offload array
+ *     @offload_stats: function to synchronize hardware stats via updating the counter expression
+ *     @type: expression type
  *     @data: extra data to attach to this expression operation
  */
 struct nft_expr_ops {
@@ -1041,14 +1060,21 @@ struct nft_rule_blob {
 /**
  *     struct nft_chain - nf_tables chain
  *
+ *     @blob_gen_0: rule blob pointer to the current generation
+ *     @blob_gen_1: rule blob pointer to the future generation
  *     @rules: list of rules in the chain
  *     @list: used internally
  *     @rhlhead: used internally
  *     @table: table that this chain belongs to
  *     @handle: chain handle
  *     @use: number of jump references to this chain
- *     @flags: bitmask of enum nft_chain_flags
+ *     @flags: bitmask of enum NFTA_CHAIN_FLAGS
+ *     @bound: bind or not
+ *     @genmask: generation mask
  *     @name: name of the chain
+ *     @udlen: user data length
+ *     @udata: user data in the chain
+ *     @blob_next: rule blob pointer to the next in the chain
  */
 struct nft_chain {
        struct nft_rule_blob            __rcu *blob_gen_0;
@@ -1146,6 +1172,7 @@ struct nft_hook {
  *     @hook_list: list of netfilter hooks (for NFPROTO_NETDEV family)
  *     @type: chain type
  *     @policy: default policy
+ *     @flags: indicate the base chain disabled or not
  *     @stats: per-cpu chain stats
  *     @chain: the chain
  *     @flow_block: flow block (for hardware offload)
@@ -1274,11 +1301,13 @@ struct nft_object_hash_key {
  *     struct nft_object - nf_tables stateful object
  *
  *     @list: table stateful object list node
- *     @key:  keys that identify this object
  *     @rhlhead: nft_objname_ht node
+ *     @key: keys that identify this object
  *     @genmask: generation mask
  *     @use: number of references to this stateful object
  *     @handle: unique object handle
+ *     @udlen: length of user data
+ *     @udata: user data
  *     @ops: object operations
  *     @data: object data, layout depends on type
  */
@@ -1344,6 +1373,7 @@ struct nft_object_type {
  *     @destroy: release existing stateful object
  *     @dump: netlink dump stateful object
  *     @update: update stateful object
+ *     @type: pointer to object type
  */
 struct nft_object_ops {
        void                            (*eval)(struct nft_object *obj,
@@ -1379,9 +1409,8 @@ void nft_unregister_obj(struct nft_object_type *obj_type);
  *     @genmask: generation mask
  *     @use: number of references to this flow table
  *     @handle: unique object handle
- *     @dev_name: array of device names
+ *     @hook_list: hook list for hooks per net_device in flowtables
  *     @data: rhashtable and garbage collector
- *     @ops: array of hooks
  */
 struct nft_flowtable {
        struct list_head                list;
index ba3e1b315de838f9696ad7948ae474552c288e73..934fdb9775519ff45d9455e74a8695bf8a1e4bce 100644 (file)
@@ -375,6 +375,10 @@ struct tcf_proto_ops {
                                                struct nlattr **tca,
                                                struct netlink_ext_ack *extack);
        void                    (*tmplt_destroy)(void *tmplt_priv);
+       void                    (*tmplt_reoffload)(struct tcf_chain *chain,
+                                                  bool add,
+                                                  flow_setup_cb_t *cb,
+                                                  void *cb_priv);
        struct tcf_exts *       (*get_exts)(const struct tcf_proto *tp,
                                            u32 handle);
 
index a7f815c7cfdfdf1296be2967fd100efdb10cdd63..54ca8dcbfb4335d657b5cea323aa7d8c4316d49e 100644 (file)
@@ -2765,9 +2765,25 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
                           &skb_shinfo(skb)->tskey);
 }
 
+static inline bool sk_is_inet(const struct sock *sk)
+{
+       int family = READ_ONCE(sk->sk_family);
+
+       return family == AF_INET || family == AF_INET6;
+}
+
 static inline bool sk_is_tcp(const struct sock *sk)
 {
-       return sk->sk_type == SOCK_STREAM && sk->sk_protocol == IPPROTO_TCP;
+       return sk_is_inet(sk) &&
+              sk->sk_type == SOCK_STREAM &&
+              sk->sk_protocol == IPPROTO_TCP;
+}
+
+static inline bool sk_is_udp(const struct sock *sk)
+{
+       return sk_is_inet(sk) &&
+              sk->sk_type == SOCK_DGRAM &&
+              sk->sk_protocol == IPPROTO_UDP;
 }
 
 static inline bool sk_is_stream_unix(const struct sock *sk)
index 526c1e7f505e4d9633bfb6da058ea25b9f2b9cfa..c9aec9ab6191205c7c6f8d3f0f5c136cae520750 100644 (file)
@@ -159,11 +159,29 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
        return ret;
 }
 
+static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+{
+       struct xdp_buff_xsk *xskb = container_of(tail, struct xdp_buff_xsk, xdp);
+
+       list_del(&xskb->xskb_list_node);
+}
+
+static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+{
+       struct xdp_buff_xsk *xskb = container_of(first, struct xdp_buff_xsk, xdp);
+       struct xdp_buff_xsk *frag;
+
+       frag = list_last_entry(&xskb->pool->xskb_list, struct xdp_buff_xsk,
+                              xskb_list_node);
+       return &frag->xdp;
+}
+
 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
 {
        xdp->data = xdp->data_hard_start + XDP_PACKET_HEADROOM;
        xdp->data_meta = xdp->data;
        xdp->data_end = xdp->data + size;
+       xdp->flags = 0;
 }
 
 static inline dma_addr_t xsk_buff_raw_get_dma(struct xsk_buff_pool *pool,
@@ -350,6 +368,15 @@ static inline struct xdp_buff *xsk_buff_get_frag(struct xdp_buff *first)
        return NULL;
 }
 
+static inline void xsk_buff_del_tail(struct xdp_buff *tail)
+{
+}
+
+static inline struct xdp_buff *xsk_buff_get_tail(struct xdp_buff *first)
+{
+       return NULL;
+}
+
 static inline void xsk_buff_set_size(struct xdp_buff *xdp, u32 size)
 {
 }
index 8d73171cb9f0d78672355d4e822176d95aa13cb2..08f2c93d6b1607939fb47c510153d544f496febe 100644 (file)
@@ -1071,6 +1071,31 @@ TRACE_EVENT(afs_file_error,
                      __print_symbolic(__entry->where, afs_file_errors))
            );
 
+TRACE_EVENT(afs_bulkstat_error,
+           TP_PROTO(struct afs_operation *op, struct afs_fid *fid, unsigned int index, s32 abort),
+
+           TP_ARGS(op, fid, index, abort),
+
+           TP_STRUCT__entry(
+                   __field_struct(struct afs_fid,      fid)
+                   __field(unsigned int,               op)
+                   __field(unsigned int,               index)
+                   __field(s32,                        abort)
+                            ),
+
+           TP_fast_assign(
+                   __entry->op = op->debug_id;
+                   __entry->fid = *fid;
+                   __entry->index = index;
+                   __entry->abort = abort;
+                          ),
+
+           TP_printk("OP=%08x[%02x] %llx:%llx:%x a=%d",
+                     __entry->op, __entry->index,
+                     __entry->fid.vid, __entry->fid.vnode, __entry->fid.unique,
+                     __entry->abort)
+           );
+
 TRACE_EVENT(afs_cm_no_server,
            TP_PROTO(struct afs_call *call, struct sockaddr_rxrpc *srx),
 
index de1944e42c6556a46a8a87855189f34b86859e99..63c49318a863076b0861b945c1511d98f9ffffe3 100644 (file)
@@ -53,7 +53,7 @@ extern "C" {
 #define DRM_IVPU_PARAM_CORE_CLOCK_RATE     3
 #define DRM_IVPU_PARAM_NUM_CONTEXTS        4
 #define DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS 5
-#define DRM_IVPU_PARAM_CONTEXT_PRIORITY            6
+#define DRM_IVPU_PARAM_CONTEXT_PRIORITY            6 /* Deprecated */
 #define DRM_IVPU_PARAM_CONTEXT_ID          7
 #define DRM_IVPU_PARAM_FW_API_VERSION      8
 #define DRM_IVPU_PARAM_ENGINE_HEARTBEAT            9
@@ -64,11 +64,18 @@ extern "C" {
 
 #define DRM_IVPU_PLATFORM_TYPE_SILICON     0
 
+/* Deprecated, use DRM_IVPU_JOB_PRIORITY */
 #define DRM_IVPU_CONTEXT_PRIORITY_IDLE     0
 #define DRM_IVPU_CONTEXT_PRIORITY_NORMAL    1
 #define DRM_IVPU_CONTEXT_PRIORITY_FOCUS            2
 #define DRM_IVPU_CONTEXT_PRIORITY_REALTIME  3
 
+#define DRM_IVPU_JOB_PRIORITY_DEFAULT  0
+#define DRM_IVPU_JOB_PRIORITY_IDLE     1
+#define DRM_IVPU_JOB_PRIORITY_NORMAL   2
+#define DRM_IVPU_JOB_PRIORITY_FOCUS    3
+#define DRM_IVPU_JOB_PRIORITY_REALTIME 4
+
 /**
  * DRM_IVPU_CAP_METRIC_STREAMER
  *
@@ -112,10 +119,6 @@ struct drm_ivpu_param {
         * %DRM_IVPU_PARAM_CONTEXT_BASE_ADDRESS:
         * Lowest VPU virtual address available in the current context (read-only)
         *
-        * %DRM_IVPU_PARAM_CONTEXT_PRIORITY:
-        * Value of current context scheduling priority (read-write).
-        * See DRM_IVPU_CONTEXT_PRIORITY_* for possible values.
-        *
         * %DRM_IVPU_PARAM_CONTEXT_ID:
         * Current context ID, always greater than 0 (read-only)
         *
@@ -286,6 +289,18 @@ struct drm_ivpu_submit {
         * to be executed. The offset has to be 8-byte aligned.
         */
        __u32 commands_offset;
+
+       /**
+        * @priority:
+        *
+        * Priority to be set for related job command queue, can be one of the following:
+        * %DRM_IVPU_JOB_PRIORITY_DEFAULT
+        * %DRM_IVPU_JOB_PRIORITY_IDLE
+        * %DRM_IVPU_JOB_PRIORITY_NORMAL
+        * %DRM_IVPU_JOB_PRIORITY_FOCUS
+        * %DRM_IVPU_JOB_PRIORITY_REALTIME
+        */
+       __u32 priority;
 };
 
 /* drm_ivpu_bo_wait job status codes */
index 7c29d82db9ee0dcb5ce770b384149c9734a50f30..f8bc34a6bcfa2f7313f2e9eac38e2df6a25aafca 100644 (file)
@@ -614,6 +614,9 @@ struct btrfs_ioctl_clone_range_args {
  */
 #define BTRFS_DEFRAG_RANGE_COMPRESS 1
 #define BTRFS_DEFRAG_RANGE_START_IO 2
+#define BTRFS_DEFRAG_RANGE_FLAGS_SUPP  (BTRFS_DEFRAG_RANGE_COMPRESS |          \
+                                        BTRFS_DEFRAG_RANGE_START_IO)
+
 struct btrfs_ioctl_defrag_range_args {
        /* start of the defrag operation */
        __u64 start;
index 8df18f3a974846b48e41b2a8dcbc2f2f2f90128e..8d4e836e1b6b15c1846fa2f6148111e63f4b4aa9 100644 (file)
@@ -876,6 +876,18 @@ config CC_NO_ARRAY_BOUNDS
        bool
        default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC11_NO_ARRAY_BOUNDS
 
+# Currently, disable -Wstringop-overflow for GCC 11, globally.
+config GCC11_NO_STRINGOP_OVERFLOW
+       def_bool y
+
+config CC_NO_STRINGOP_OVERFLOW
+       bool
+       default y if CC_IS_GCC && GCC_VERSION >= 110000 && GCC_VERSION < 120000 && GCC11_NO_STRINGOP_OVERFLOW
+
+config CC_STRINGOP_OVERFLOW
+       bool
+       default y if CC_IS_GCC && !CC_NO_STRINGOP_OVERFLOW
+
 #
 # For architectures that know their GCC __int128 support is sound
 #
index 6705634e5f52aa625b797f3c7931903c55130359..b1ee3a9c38072933dd020848b7f3586af257d912 100644 (file)
@@ -471,7 +471,6 @@ const struct io_issue_def io_issue_defs[] = {
        },
        [IORING_OP_FIXED_FD_INSTALL] = {
                .needs_file             = 1,
-               .audit_skip             = 1,
                .prep                   = io_install_fixed_fd_prep,
                .issue                  = io_install_fixed_fd,
        },
index 0fe0dd30554623edb87cd159b4ffe0c52288211a..e3357dfa14ca42dd5b25e6cf9ce4a4be8b7ee0f4 100644 (file)
@@ -277,6 +277,10 @@ int io_install_fixed_fd_prep(struct io_kiocb *req, const struct io_uring_sqe *sq
        if (flags & ~IORING_FIXED_FD_NO_CLOEXEC)
                return -EINVAL;
 
+       /* ensure the task's creds are used when installing/receiving fds */
+       if (req->flags & REQ_F_CREDS)
+               return -EPERM;
+
        /* default to O_CLOEXEC, disable if IORING_FIXED_FD_NO_CLOEXEC is set */
        ifi = io_kiocb_to_cmd(req, struct io_fixed_install);
        ifi->o_flags = O_CLOEXEC;
index 485bb0389b488d28a4efb23901b514d93b3834f6..929e98c629652a0fef1b71e6c002cca41936c4b4 100644 (file)
@@ -537,7 +537,7 @@ retry:
                }
        }
 
-       ret = __replace_page(vma, vaddr, old_page, new_page);
+       ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page);
        if (new_page)
                put_page(new_page);
 put_old:
index 47ff3b35352e0bb4ec040b77241d9cbdcb986ef2..0d944e92a43ffa13bdbcce6c6a28c44bab29ca19 100644 (file)
@@ -1748,6 +1748,7 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
        if (clone_flags & CLONE_FS) {
                /* tsk->fs is already what we want */
                spin_lock(&fs->lock);
+               /* "users" and "in_exec" locked for check_unsafe_exec() */
                if (fs->in_exec) {
                        spin_unlock(&fs->lock);
                        return -EAGAIN;
index e0e853412c158e1277ea4c63a40576093b0bc673..1e78ef24321e82dbfaf0c07941a0c41ad3438aaa 100644 (file)
@@ -627,12 +627,21 @@ retry:
 }
 
 /*
- * PI futexes can not be requeued and must remove themselves from the
- * hash bucket. The hash bucket lock (i.e. lock_ptr) is held.
+ * PI futexes can not be requeued and must remove themselves from the hash
+ * bucket. The hash bucket lock (i.e. lock_ptr) is held.
  */
 void futex_unqueue_pi(struct futex_q *q)
 {
-       __futex_unqueue(q);
+       /*
+        * If the lock was not acquired (due to timeout or signal) then the
+        * rt_waiter is removed before futex_q is. If this is observed by
+        * an unlocker after dropping the rtmutex wait lock and before
+        * acquiring the hash bucket lock, then the unlocker dequeues the
+        * futex_q from the hash bucket list to guarantee consistent state
+        * vs. userspace. Therefore the dequeue here must be conditional.
+        */
+       if (!plist_node_empty(&q->list))
+               __futex_unqueue(q);
 
        BUG_ON(!q->pi_state);
        put_pi_state(q->pi_state);
index 90e5197f4e5696dbd5a79fd0033ed95e4bd32fac..5722467f273794ec314870fc76d0ba04a8617f7e 100644 (file)
@@ -1135,6 +1135,7 @@ retry:
 
        hb = futex_hash(&key);
        spin_lock(&hb->lock);
+retry_hb:
 
        /*
         * Check waiters first. We do not trust user space values at
@@ -1177,12 +1178,17 @@ retry:
                /*
                 * Futex vs rt_mutex waiter state -- if there are no rt_mutex
                 * waiters even though futex thinks there are, then the waiter
-                * is leaving and the uncontended path is safe to take.
+                * is leaving. The entry needs to be removed from the list so a
+                * new futex_lock_pi() is not using this stale PI-state while
+                * the futex is available in user space again.
+                * There can be more than one task on its way out so it needs
+                * to retry.
                 */
                rt_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
                if (!rt_waiter) {
+                       __futex_unqueue(top_waiter);
                        raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
-                       goto do_uncontended;
+                       goto retry_hb;
                }
 
                get_pi_state(pi_state);
@@ -1217,7 +1223,6 @@ retry:
                return ret;
        }
 
-do_uncontended:
        /*
         * We have no kernel internal state, i.e. no waiters in the
         * kernel. Waiters which are about to queue themselves are stuck
index 27ca1c866f298bf9d8876bce68e418881702aabf..371eb1711d3467baf596c477411c1d3ac554cedd 100644 (file)
@@ -600,7 +600,7 @@ int __init early_irq_init(void)
                mutex_init(&desc[i].request_mutex);
                init_waitqueue_head(&desc[i].wait_for_threads);
                desc_set_defaults(i, &desc[i], node, NULL, NULL);
-               irq_resend_init(desc);
+               irq_resend_init(&desc[i]);
        }
        return arch_early_irq_init();
 }
index 1ae8517778066284be5c7c15111b09a0f726f164..b2bccfd37c383d04692fb6a7a72eb71a1f62798b 100644 (file)
@@ -1013,6 +1013,38 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
        return needmore;
 }
 
+static void swake_up_one_online_ipi(void *arg)
+{
+       struct swait_queue_head *wqh = arg;
+
+       swake_up_one(wqh);
+}
+
+static void swake_up_one_online(struct swait_queue_head *wqh)
+{
+       int cpu = get_cpu();
+
+       /*
+        * If called from rcutree_report_cpu_starting(), wake up
+        * is dangerous that late in the CPU-down hotplug process. The
+        * scheduler might queue an ignored hrtimer. Defer the wake up
+        * to an online CPU instead.
+        */
+       if (unlikely(cpu_is_offline(cpu))) {
+               int target;
+
+               target = cpumask_any_and(housekeeping_cpumask(HK_TYPE_RCU),
+                                        cpu_online_mask);
+
+               smp_call_function_single(target, swake_up_one_online_ipi,
+                                        wqh, 0);
+               put_cpu();
+       } else {
+               put_cpu();
+               swake_up_one(wqh);
+       }
+}
+
 /*
  * Awaken the grace-period kthread.  Don't do a self-awaken (unless in an
  * interrupt or softirq handler, in which case we just might immediately
@@ -1037,7 +1069,7 @@ static void rcu_gp_kthread_wake(void)
                return;
        WRITE_ONCE(rcu_state.gp_wake_time, jiffies);
        WRITE_ONCE(rcu_state.gp_wake_seq, READ_ONCE(rcu_state.gp_seq));
-       swake_up_one(&rcu_state.gp_wq);
+       swake_up_one_online(&rcu_state.gp_wq);
 }
 
 /*
index 6d7cea5d591f95d823b63972da899dded9e369d1..2ac440bc7e10bc8e1248eae47a661eb017768cee 100644 (file)
@@ -173,7 +173,6 @@ static bool sync_rcu_exp_done_unlocked(struct rcu_node *rnp)
        return ret;
 }
 
-
 /*
  * Report the exit from RCU read-side critical section for the last task
  * that queued itself during or before the current expedited preemptible-RCU
@@ -201,7 +200,7 @@ static void __rcu_report_exp_rnp(struct rcu_node *rnp,
                        raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
                        if (wake) {
                                smp_mb(); /* EGP done before wake_up(). */
-                               swake_up_one(&rcu_state.expedited_wq);
+                               swake_up_one_online(&rcu_state.expedited_wq);
                        }
                        break;
                }
index c108ed8a9804ada919575c97b42dd663e33c1a16..3052b1f1168e29c4432ba3b068488af11029018d 100644 (file)
@@ -99,6 +99,7 @@ static u64 suspend_start;
  * Interval: 0.5sec.
  */
 #define WATCHDOG_INTERVAL (HZ >> 1)
+#define WATCHDOG_INTERVAL_MAX_NS ((2 * WATCHDOG_INTERVAL) * (NSEC_PER_SEC / HZ))
 
 /*
  * Threshold: 0.0312s, when doubled: 0.0625s.
@@ -134,6 +135,7 @@ static DECLARE_WORK(watchdog_work, clocksource_watchdog_work);
 static DEFINE_SPINLOCK(watchdog_lock);
 static int watchdog_running;
 static atomic_t watchdog_reset_pending;
+static int64_t watchdog_max_interval;
 
 static inline void clocksource_watchdog_lock(unsigned long *flags)
 {
@@ -399,8 +401,8 @@ static inline void clocksource_reset_watchdog(void)
 static void clocksource_watchdog(struct timer_list *unused)
 {
        u64 csnow, wdnow, cslast, wdlast, delta;
+       int64_t wd_nsec, cs_nsec, interval;
        int next_cpu, reset_pending;
-       int64_t wd_nsec, cs_nsec;
        struct clocksource *cs;
        enum wd_read_status read_ret;
        unsigned long extra_wait = 0;
@@ -470,6 +472,27 @@ static void clocksource_watchdog(struct timer_list *unused)
                if (atomic_read(&watchdog_reset_pending))
                        continue;
 
+               /*
+                * The processing of timer softirqs can get delayed (usually
+                * on account of ksoftirqd not getting to run in a timely
+                * manner), which causes the watchdog interval to stretch.
+                * Skew detection may fail for longer watchdog intervals
+                * on account of fixed margins being used.
+                * Some clocksources, e.g. acpi_pm, cannot tolerate
+                * watchdog intervals longer than a few seconds.
+                */
+               interval = max(cs_nsec, wd_nsec);
+               if (unlikely(interval > WATCHDOG_INTERVAL_MAX_NS)) {
+                       if (system_state > SYSTEM_SCHEDULING &&
+                           interval > 2 * watchdog_max_interval) {
+                               watchdog_max_interval = interval;
+                               pr_warn("Long readout interval, skipping watchdog check: cs_nsec: %lld wd_nsec: %lld\n",
+                                       cs_nsec, wd_nsec);
+                       }
+                       watchdog_timer.expires = jiffies;
+                       continue;
+               }
+
                /* Check the deviation from the watchdog clocksource. */
                md = cs->uncertainty_margin + watchdog->uncertainty_margin;
                if (abs(cs_nsec - wd_nsec) > md) {
index d2501673028da5e627aa08b51a103c181295cad3..01fb50c1b17e4f1b33285ae2ce2690f0747f8ee8 100644 (file)
@@ -1577,6 +1577,7 @@ void tick_cancel_sched_timer(int cpu)
 {
        struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
        ktime_t idle_sleeptime, iowait_sleeptime;
+       unsigned long idle_calls, idle_sleeps;
 
 # ifdef CONFIG_HIGH_RES_TIMERS
        if (ts->sched_timer.base)
@@ -1585,9 +1586,13 @@ void tick_cancel_sched_timer(int cpu)
 
        idle_sleeptime = ts->idle_sleeptime;
        iowait_sleeptime = ts->iowait_sleeptime;
+       idle_calls = ts->idle_calls;
+       idle_sleeps = ts->idle_sleeps;
        memset(ts, 0, sizeof(*ts));
        ts->idle_sleeptime = idle_sleeptime;
        ts->iowait_sleeptime = iowait_sleeptime;
+       ts->idle_calls = idle_calls;
+       ts->idle_sleeps = idle_sleeps;
 }
 #endif
 
index 46439e3bcec4d20b45ae8202d7a68888778fa208..b33c3861fbbbf303e78f740a0fcc41caa2a77d77 100644 (file)
@@ -1470,8 +1470,10 @@ register_snapshot_trigger(char *glob,
                          struct event_trigger_data *data,
                          struct trace_event_file *file)
 {
-       if (tracing_alloc_snapshot_instance(file->tr) != 0)
-               return 0;
+       int ret = tracing_alloc_snapshot_instance(file->tr);
+
+       if (ret < 0)
+               return ret;
 
        return register_trigger(glob, data, file);
 }
index c774e560f2f957127c7e41b825164a0d102b6fd0..a4dcf0f2435213bc2b2b91d677ec18290aa53859 100644 (file)
@@ -574,7 +574,12 @@ __tracing_map_insert(struct tracing_map *map, void *key, bool lookup_only)
                                }
 
                                memcpy(elt->key, key, map->key_size);
-                               entry->val = elt;
+                               /*
+                                * Ensure the initialization is visible and
+                                * publish the elt.
+                                */
+                               smp_wmb();
+                               WRITE_ONCE(entry->val, elt);
                                atomic64_inc(&map->hits);
 
                                return entry->val;
index f5371287b3750f0cefdc423748846b4257d8d14a..074c6dd2e36a7d9e8154a604db7c2a8e4f669921 100644 (file)
@@ -45,8 +45,8 @@ int kunit_bus_init(void)
        int error;
 
        kunit_bus_device = root_device_register("kunit");
-       if (!kunit_bus_device)
-               return -ENOMEM;
+       if (IS_ERR(kunit_bus_device))
+               return PTR_ERR(kunit_bus_device);
 
        error = bus_register(&kunit_bus_type);
        if (error)
index 717b9599036ba0bccf1ffe846b7b051c591109f2..689fff2b2b106a597bbf9b2e473d37e193537b03 100644 (file)
@@ -146,6 +146,10 @@ void kunit_free_suite_set(struct kunit_suite_set suite_set)
        kfree(suite_set.start);
 }
 
+/*
+ * Filter and reallocate test suites. Must return the filtered test suites set
+ * allocated at a valid virtual address or NULL in case of error.
+ */
 struct kunit_suite_set
 kunit_filter_suites(const struct kunit_suite_set *suite_set,
                    const char *filter_glob,
index c4259d910356ba7e8f24847cd347eb5861071cb4..f7980ef236a38bdefd8e0e7b53915f6057348617 100644 (file)
@@ -720,7 +720,7 @@ static void kunit_device_cleanup_test(struct kunit *test)
        long action_was_run = 0;
 
        test_device = kunit_device_register(test, "my_device");
-       KUNIT_ASSERT_NOT_NULL(test, test_device);
+       KUNIT_ASSERT_NOT_ERR_OR_NULL(test, test_device);
 
        /* Add an action to verify cleanup. */
        devm_add_action(test_device, test_dev_action, &action_was_run);
index f95d2093a0aa3359c0cb08462ea62e76ab0f2ecf..31a5a992e64670f35a9717659f99ca1b385d5f2e 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/panic.h>
 #include <linux/sched/debug.h>
 #include <linux/sched.h>
+#include <linux/mm.h>
 
 #include "debugfs.h"
 #include "device-impl.h"
@@ -801,12 +802,19 @@ static void kunit_module_exit(struct module *mod)
        };
        const char *action = kunit_action();
 
+       /*
+        * Check if the start address is a valid virtual address to detect
+        * if the module load sequence has failed and the suite set has not
+        * been initialized and filtered.
+        */
+       if (!suite_set.start || !virt_addr_valid(suite_set.start))
+               return;
+
        if (!action)
                __kunit_test_suites_exit(mod->kunit_suites,
                                         mod->num_kunit_suites);
 
-       if (suite_set.start)
-               kunit_free_suite_set(suite_set);
+       kunit_free_suite_set(suite_set);
 }
 
 static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
@@ -816,12 +824,12 @@ static int kunit_module_notify(struct notifier_block *nb, unsigned long val,
 
        switch (val) {
        case MODULE_STATE_LIVE:
+               kunit_module_init(mod);
                break;
        case MODULE_STATE_GOING:
                kunit_module_exit(mod);
                break;
        case MODULE_STATE_COMING:
-               kunit_module_init(mod);
                break;
        case MODULE_STATE_UNFORMED:
                break;
index a0be5d05c7f08187667c91c7d0886843df52225c..5caa1f566553843911ffdf2edafd32ee70277ea8 100644 (file)
@@ -14,6 +14,7 @@
 
 #define pr_fmt(fmt) "stackdepot: " fmt
 
+#include <linux/debugfs.h>
 #include <linux/gfp.h>
 #include <linux/jhash.h>
 #include <linux/kernel.h>
@@ -21,8 +22,9 @@
 #include <linux/list.h>
 #include <linux/mm.h>
 #include <linux/mutex.h>
-#include <linux/percpu.h>
 #include <linux/printk.h>
+#include <linux/rculist.h>
+#include <linux/rcupdate.h>
 #include <linux/refcount.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
@@ -67,12 +69,28 @@ union handle_parts {
 };
 
 struct stack_record {
-       struct list_head list;          /* Links in hash table or freelist */
+       struct list_head hash_list;     /* Links in the hash table */
        u32 hash;                       /* Hash in hash table */
        u32 size;                       /* Number of stored frames */
-       union handle_parts handle;
+       union handle_parts handle;      /* Constant after initialization */
        refcount_t count;
-       unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES];    /* Frames */
+       union {
+               unsigned long entries[CONFIG_STACKDEPOT_MAX_FRAMES];    /* Frames */
+               struct {
+                       /*
+                        * An important invariant of the implementation is to
+                        * only place a stack record onto the freelist iff its
+                        * refcount is zero. Because stack records with a zero
+                        * refcount are never considered as valid, it is safe to
+                        * union @entries and freelist management state below.
+                        * Conversely, as soon as an entry is off the freelist
+                        * and its refcount becomes non-zero, the below must not
+                        * be accessed until being placed back on the freelist.
+                        */
+                       struct list_head free_list;     /* Links in the freelist */
+                       unsigned long rcu_state;        /* RCU cookie */
+               };
+       };
 };
 
 #define DEPOT_STACK_RECORD_SIZE \
@@ -112,8 +130,25 @@ static LIST_HEAD(free_stacks);
  * yet allocated or if the limit on the number of pools is reached.
  */
 static bool new_pool_required = true;
-/* Lock that protects the variables above. */
-static DEFINE_RWLOCK(pool_rwlock);
+/* The lock must be held when performing pool or freelist modifications. */
+static DEFINE_RAW_SPINLOCK(pool_lock);
+
+/* Statistics counters for debugfs. */
+enum depot_counter_id {
+       DEPOT_COUNTER_ALLOCS,
+       DEPOT_COUNTER_FREES,
+       DEPOT_COUNTER_INUSE,
+       DEPOT_COUNTER_FREELIST_SIZE,
+       DEPOT_COUNTER_COUNT,
+};
+static long counters[DEPOT_COUNTER_COUNT];
+static const char *const counter_names[] = {
+       [DEPOT_COUNTER_ALLOCS]          = "allocations",
+       [DEPOT_COUNTER_FREES]           = "frees",
+       [DEPOT_COUNTER_INUSE]           = "in_use",
+       [DEPOT_COUNTER_FREELIST_SIZE]   = "freelist_size",
+};
+static_assert(ARRAY_SIZE(counter_names) == DEPOT_COUNTER_COUNT);
 
 static int __init disable_stack_depot(char *str)
 {
@@ -258,14 +293,15 @@ out_unlock:
 }
 EXPORT_SYMBOL_GPL(stack_depot_init);
 
-/* Initializes a stack depol pool. */
+/*
+ * Initializes new stack depot @pool, release all its entries to the freelist,
+ * and update the list of pools.
+ */
 static void depot_init_pool(void *pool)
 {
        int offset;
 
-       lockdep_assert_held_write(&pool_rwlock);
-
-       WARN_ON(!list_empty(&free_stacks));
+       lockdep_assert_held(&pool_lock);
 
        /* Initialize handles and link stack records into the freelist. */
        for (offset = 0; offset <= DEPOT_POOL_SIZE - DEPOT_STACK_RECORD_SIZE;
@@ -276,18 +312,36 @@ static void depot_init_pool(void *pool)
                stack->handle.offset = offset >> DEPOT_STACK_ALIGN;
                stack->handle.extra = 0;
 
-               list_add(&stack->list, &free_stacks);
+               /*
+                * Stack traces of size 0 are never saved, and we can simply use
+                * the size field as an indicator if this is a new unused stack
+                * record in the freelist.
+                */
+               stack->size = 0;
+
+               INIT_LIST_HEAD(&stack->hash_list);
+               /*
+                * Add to the freelist front to prioritize never-used entries:
+                * required in case there are entries in the freelist, but their
+                * RCU cookie still belongs to the current RCU grace period
+                * (there can still be concurrent readers).
+                */
+               list_add(&stack->free_list, &free_stacks);
+               counters[DEPOT_COUNTER_FREELIST_SIZE]++;
        }
 
        /* Save reference to the pool to be used by depot_fetch_stack(). */
        stack_pools[pools_num] = pool;
-       pools_num++;
+
+       /* Pairs with concurrent READ_ONCE() in depot_fetch_stack(). */
+       WRITE_ONCE(pools_num, pools_num + 1);
+       ASSERT_EXCLUSIVE_WRITER(pools_num);
 }
 
 /* Keeps the preallocated memory to be used for a new stack depot pool. */
 static void depot_keep_new_pool(void **prealloc)
 {
-       lockdep_assert_held_write(&pool_rwlock);
+       lockdep_assert_held(&pool_lock);
 
        /*
         * If a new pool is already saved or the maximum number of
@@ -310,17 +364,16 @@ static void depot_keep_new_pool(void **prealloc)
         * number of pools is reached. In either case, take note that
         * keeping another pool is not required.
         */
-       new_pool_required = false;
+       WRITE_ONCE(new_pool_required, false);
 }
 
-/* Updates references to the current and the next stack depot pools. */
-static bool depot_update_pools(void **prealloc)
+/*
+ * Try to initialize a new stack depot pool from either a previous or the
+ * current pre-allocation, and release all its entries to the freelist.
+ */
+static bool depot_try_init_pool(void **prealloc)
 {
-       lockdep_assert_held_write(&pool_rwlock);
-
-       /* Check if we still have objects in the freelist. */
-       if (!list_empty(&free_stacks))
-               goto out_keep_prealloc;
+       lockdep_assert_held(&pool_lock);
 
        /* Check if we have a new pool saved and use it. */
        if (new_pool) {
@@ -329,10 +382,9 @@ static bool depot_update_pools(void **prealloc)
 
                /* Take note that we might need a new new_pool. */
                if (pools_num < DEPOT_MAX_POOLS)
-                       new_pool_required = true;
+                       WRITE_ONCE(new_pool_required, true);
 
-               /* Try keeping the preallocated memory for new_pool. */
-               goto out_keep_prealloc;
+               return true;
        }
 
        /* Bail out if we reached the pool limit. */
@@ -349,12 +401,32 @@ static bool depot_update_pools(void **prealloc)
        }
 
        return false;
+}
+
+/* Try to find next free usable entry. */
+static struct stack_record *depot_pop_free(void)
+{
+       struct stack_record *stack;
+
+       lockdep_assert_held(&pool_lock);
+
+       if (list_empty(&free_stacks))
+               return NULL;
+
+       /*
+        * We maintain the invariant that the elements in front are least
+        * recently used, and are therefore more likely to be associated with an
+        * RCU grace period in the past. Consequently it is sufficient to only
+        * check the first entry.
+        */
+       stack = list_first_entry(&free_stacks, struct stack_record, free_list);
+       if (stack->size && !poll_state_synchronize_rcu(stack->rcu_state))
+               return NULL;
+
+       list_del(&stack->free_list);
+       counters[DEPOT_COUNTER_FREELIST_SIZE]--;
 
-out_keep_prealloc:
-       /* Keep the preallocated memory for a new pool if required. */
-       if (*prealloc)
-               depot_keep_new_pool(prealloc);
-       return true;
+       return stack;
 }
 
 /* Allocates a new stack in a stack depot pool. */
@@ -363,19 +435,22 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
 {
        struct stack_record *stack;
 
-       lockdep_assert_held_write(&pool_rwlock);
+       lockdep_assert_held(&pool_lock);
 
-       /* Update current and new pools if required and possible. */
-       if (!depot_update_pools(prealloc))
+       /* This should already be checked by public API entry points. */
+       if (WARN_ON_ONCE(!size))
                return NULL;
 
        /* Check if we have a stack record to save the stack trace. */
-       if (list_empty(&free_stacks))
-               return NULL;
-
-       /* Get and unlink the first entry from the freelist. */
-       stack = list_first_entry(&free_stacks, struct stack_record, list);
-       list_del(&stack->list);
+       stack = depot_pop_free();
+       if (!stack) {
+               /* No usable entries on the freelist - try to refill the freelist. */
+               if (!depot_try_init_pool(prealloc))
+                       return NULL;
+               stack = depot_pop_free();
+               if (WARN_ON(!stack))
+                       return NULL;
+       }
 
        /* Limit number of saved frames to CONFIG_STACKDEPOT_MAX_FRAMES. */
        if (size > CONFIG_STACKDEPOT_MAX_FRAMES)
@@ -394,38 +469,80 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
         */
        kmsan_unpoison_memory(stack, DEPOT_STACK_RECORD_SIZE);
 
+       counters[DEPOT_COUNTER_ALLOCS]++;
+       counters[DEPOT_COUNTER_INUSE]++;
        return stack;
 }
 
 static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
 {
+       const int pools_num_cached = READ_ONCE(pools_num);
        union handle_parts parts = { .handle = handle };
        void *pool;
        size_t offset = parts.offset << DEPOT_STACK_ALIGN;
        struct stack_record *stack;
 
-       lockdep_assert_held(&pool_rwlock);
+       lockdep_assert_not_held(&pool_lock);
 
-       if (parts.pool_index > pools_num) {
+       if (parts.pool_index > pools_num_cached) {
                WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
-                    parts.pool_index, pools_num, handle);
+                    parts.pool_index, pools_num_cached, handle);
                return NULL;
        }
 
        pool = stack_pools[parts.pool_index];
-       if (!pool)
+       if (WARN_ON(!pool))
                return NULL;
 
        stack = pool + offset;
+       if (WARN_ON(!refcount_read(&stack->count)))
+               return NULL;
+
        return stack;
 }
 
 /* Links stack into the freelist. */
 static void depot_free_stack(struct stack_record *stack)
 {
-       lockdep_assert_held_write(&pool_rwlock);
+       unsigned long flags;
+
+       lockdep_assert_not_held(&pool_lock);
 
-       list_add(&stack->list, &free_stacks);
+       raw_spin_lock_irqsave(&pool_lock, flags);
+       printk_deferred_enter();
+
+       /*
+        * Remove the entry from the hash list. Concurrent list traversal may
+        * still observe the entry, but since the refcount is zero, this entry
+        * will no longer be considered as valid.
+        */
+       list_del_rcu(&stack->hash_list);
+
+       /*
+        * Due to being used from constrained contexts such as the allocators,
+        * NMI, or even RCU itself, stack depot cannot rely on primitives that
+        * would sleep (such as synchronize_rcu()) or recursively call into
+        * stack depot again (such as call_rcu()).
+        *
+        * Instead, get an RCU cookie, so that we can ensure this entry isn't
+        * moved onto another list until the next grace period, and concurrent
+        * RCU list traversal remains safe.
+        */
+       stack->rcu_state = get_state_synchronize_rcu();
+
+       /*
+        * Add the entry to the freelist tail, so that older entries are
+        * considered first - their RCU cookie is more likely to no longer be
+        * associated with the current grace period.
+        */
+       list_add_tail(&stack->free_list, &free_stacks);
+
+       counters[DEPOT_COUNTER_FREELIST_SIZE]++;
+       counters[DEPOT_COUNTER_FREES]++;
+       counters[DEPOT_COUNTER_INUSE]--;
+
+       printk_deferred_exit();
+       raw_spin_unlock_irqrestore(&pool_lock, flags);
 }
 
 /* Calculates the hash for a stack. */
@@ -453,22 +570,52 @@ int stackdepot_memcmp(const unsigned long *u1, const unsigned long *u2,
 
 /* Finds a stack in a bucket of the hash table. */
 static inline struct stack_record *find_stack(struct list_head *bucket,
-                                            unsigned long *entries, int size,
-                                            u32 hash)
+                                             unsigned long *entries, int size,
+                                             u32 hash, depot_flags_t flags)
 {
-       struct list_head *pos;
-       struct stack_record *found;
+       struct stack_record *stack, *ret = NULL;
+
+       /*
+        * Stack depot may be used from instrumentation that instruments RCU or
+        * tracing itself; use variant that does not call into RCU and cannot be
+        * traced.
+        *
+        * Note: Such use cases must take care when using refcounting to evict
+        * unused entries, because the stack record free-then-reuse code paths
+        * do call into RCU.
+        */
+       rcu_read_lock_sched_notrace();
 
-       lockdep_assert_held(&pool_rwlock);
+       list_for_each_entry_rcu(stack, bucket, hash_list) {
+               if (stack->hash != hash || stack->size != size)
+                       continue;
+
+               /*
+                * This may race with depot_free_stack() accessing the freelist
+                * management state unioned with @entries. The refcount is zero
+                * in that case and the below refcount_inc_not_zero() will fail.
+                */
+               if (data_race(stackdepot_memcmp(entries, stack->entries, size)))
+                       continue;
+
+               /*
+                * Try to increment refcount. If this succeeds, the stack record
+                * is valid and has not yet been freed.
+                *
+                * If STACK_DEPOT_FLAG_GET is not used, it is undefined behavior
+                * to then call stack_depot_put() later, and we can assume that
+                * a stack record is never placed back on the freelist.
+                */
+               if ((flags & STACK_DEPOT_FLAG_GET) && !refcount_inc_not_zero(&stack->count))
+                       continue;
 
-       list_for_each(pos, bucket) {
-               found = list_entry(pos, struct stack_record, list);
-               if (found->hash == hash &&
-                   found->size == size &&
-                   !stackdepot_memcmp(entries, found->entries, size))
-                       return found;
+               ret = stack;
+               break;
        }
-       return NULL;
+
+       rcu_read_unlock_sched_notrace();
+
+       return ret;
 }
 
 depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
@@ -482,7 +629,6 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
        struct page *page = NULL;
        void *prealloc = NULL;
        bool can_alloc = depot_flags & STACK_DEPOT_FLAG_CAN_ALLOC;
-       bool need_alloc = false;
        unsigned long flags;
        u32 hash;
 
@@ -505,31 +651,16 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
        hash = hash_stack(entries, nr_entries);
        bucket = &stack_table[hash & stack_hash_mask];
 
-       read_lock_irqsave(&pool_rwlock, flags);
-       printk_deferred_enter();
-
-       /* Fast path: look the stack trace up without full locking. */
-       found = find_stack(bucket, entries, nr_entries, hash);
-       if (found) {
-               if (depot_flags & STACK_DEPOT_FLAG_GET)
-                       refcount_inc(&found->count);
-               printk_deferred_exit();
-               read_unlock_irqrestore(&pool_rwlock, flags);
+       /* Fast path: look the stack trace up without locking. */
+       found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
+       if (found)
                goto exit;
-       }
-
-       /* Take note if another stack pool needs to be allocated. */
-       if (new_pool_required)
-               need_alloc = true;
-
-       printk_deferred_exit();
-       read_unlock_irqrestore(&pool_rwlock, flags);
 
        /*
         * Allocate memory for a new pool if required now:
         * we won't be able to do that under the lock.
         */
-       if (unlikely(can_alloc && need_alloc)) {
+       if (unlikely(can_alloc && READ_ONCE(new_pool_required))) {
                /*
                 * Zero out zone modifiers, as we don't have specific zone
                 * requirements. Keep the flags related to allocation in atomic
@@ -543,31 +674,36 @@ depot_stack_handle_t stack_depot_save_flags(unsigned long *entries,
                        prealloc = page_address(page);
        }
 
-       write_lock_irqsave(&pool_rwlock, flags);
+       raw_spin_lock_irqsave(&pool_lock, flags);
        printk_deferred_enter();
 
-       found = find_stack(bucket, entries, nr_entries, hash);
+       /* Try to find again, to avoid concurrently inserting duplicates. */
+       found = find_stack(bucket, entries, nr_entries, hash, depot_flags);
        if (!found) {
                struct stack_record *new =
                        depot_alloc_stack(entries, nr_entries, hash, &prealloc);
 
                if (new) {
-                       list_add(&new->list, bucket);
+                       /*
+                        * This releases the stack record into the bucket and
+                        * makes it visible to readers in find_stack().
+                        */
+                       list_add_rcu(&new->hash_list, bucket);
                        found = new;
                }
-       } else {
-               if (depot_flags & STACK_DEPOT_FLAG_GET)
-                       refcount_inc(&found->count);
+       }
+
+       if (prealloc) {
                /*
-                * Stack depot already contains this stack trace, but let's
-                * keep the preallocated memory for future.
+                * Either stack depot already contains this stack trace, or
+                * depot_alloc_stack() did not consume the preallocated memory.
+                * Try to keep the preallocated memory for future.
                 */
-               if (prealloc)
-                       depot_keep_new_pool(&prealloc);
+               depot_keep_new_pool(&prealloc);
        }
 
        printk_deferred_exit();
-       write_unlock_irqrestore(&pool_rwlock, flags);
+       raw_spin_unlock_irqrestore(&pool_lock, flags);
 exit:
        if (prealloc) {
                /* Stack depot didn't use this memory, free it. */
@@ -592,7 +728,6 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
                               unsigned long **entries)
 {
        struct stack_record *stack;
-       unsigned long flags;
 
        *entries = NULL;
        /*
@@ -604,13 +739,13 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
        if (!handle || stack_depot_disabled)
                return 0;
 
-       read_lock_irqsave(&pool_rwlock, flags);
-       printk_deferred_enter();
-
        stack = depot_fetch_stack(handle);
-
-       printk_deferred_exit();
-       read_unlock_irqrestore(&pool_rwlock, flags);
+       /*
+        * Should never be NULL, otherwise this is a use-after-put (or just a
+        * corrupt handle).
+        */
+       if (WARN(!stack, "corrupt handle or use after stack_depot_put()"))
+               return 0;
 
        *entries = stack->entries;
        return stack->size;
@@ -620,29 +755,20 @@ EXPORT_SYMBOL_GPL(stack_depot_fetch);
 void stack_depot_put(depot_stack_handle_t handle)
 {
        struct stack_record *stack;
-       unsigned long flags;
 
        if (!handle || stack_depot_disabled)
                return;
 
-       write_lock_irqsave(&pool_rwlock, flags);
-       printk_deferred_enter();
-
        stack = depot_fetch_stack(handle);
-       if (WARN_ON(!stack))
-               goto out;
-
-       if (refcount_dec_and_test(&stack->count)) {
-               /* Unlink stack from the hash table. */
-               list_del(&stack->list);
+       /*
+        * Should always be able to find the stack record, otherwise this is an
+        * unbalanced put attempt (or corrupt handle).
+        */
+       if (WARN(!stack, "corrupt handle or unbalanced stack_depot_put()"))
+               return;
 
-               /* Free stack. */
+       if (refcount_dec_and_test(&stack->count))
                depot_free_stack(stack);
-       }
-
-out:
-       printk_deferred_exit();
-       write_unlock_irqrestore(&pool_rwlock, flags);
 }
 EXPORT_SYMBOL_GPL(stack_depot_put);
 
@@ -690,3 +816,30 @@ unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
        return parts.extra;
 }
 EXPORT_SYMBOL(stack_depot_get_extra_bits);
+
+static int stats_show(struct seq_file *seq, void *v)
+{
+       /*
+        * data race ok: These are just statistics counters, and approximate
+        * statistics are ok for debugging.
+        */
+       seq_printf(seq, "pools: %d\n", data_race(pools_num));
+       for (int i = 0; i < DEPOT_COUNTER_COUNT; i++)
+               seq_printf(seq, "%s: %ld\n", counter_names[i], data_race(counters[i]));
+
+       return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(stats);
+
+static int depot_debugfs_init(void)
+{
+       struct dentry *dir;
+
+       if (stack_depot_disabled)
+               return 0;
+
+       dir = debugfs_create_dir("stackdepot", NULL);
+       debugfs_create_file("stats", 0444, dir, NULL, &stats_fops);
+       return 0;
+}
+late_initcall(depot_debugfs_init);
index 94ef5c02b459642f2625775bc66ca147cb2ac992..94c958f7ebb50dd925070157c0d0b2432dfc0483 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/page_owner.h>
 #include <linux/sched/sysctl.h>
 #include <linux/memory-tiers.h>
+#include <linux/compat.h>
 
 #include <asm/tlb.h>
 #include <asm/pgalloc.h>
@@ -809,7 +810,10 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
 {
        loff_t off_end = off + len;
        loff_t off_align = round_up(off, size);
-       unsigned long len_pad, ret;
+       unsigned long len_pad, ret, off_sub;
+
+       if (IS_ENABLED(CONFIG_32BIT) || in_compat_syscall())
+               return 0;
 
        if (off_end <= off_align || (off_end - off_align) < size)
                return 0;
@@ -835,7 +839,13 @@ static unsigned long __thp_get_unmapped_area(struct file *filp,
        if (ret == addr)
                return addr;
 
-       ret += (off - ret) & (size - 1);
+       off_sub = (off - ret) & (size - 1);
+
+       if (current->mm->get_unmapped_area == arch_get_unmapped_area_topdown &&
+           !off_sub)
+               return ret + size;
+
+       ret += off_sub;
        return ret;
 }
 
@@ -2437,7 +2447,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
                        page = pmd_page(old_pmd);
                        folio = page_folio(page);
                        if (!folio_test_dirty(folio) && pmd_dirty(old_pmd))
-                               folio_set_dirty(folio);
+                               folio_mark_dirty(folio);
                        if (!folio_test_referenced(folio) && pmd_young(old_pmd))
                                folio_set_referenced(folio);
                        folio_remove_rmap_pmd(folio, page, vma);
@@ -3563,7 +3573,7 @@ int set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
        }
 
        if (pmd_dirty(pmdval))
-               folio_set_dirty(folio);
+               folio_mark_dirty(folio);
        if (pmd_write(pmdval))
                entry = make_writable_migration_entry(page_to_pfn(page));
        else if (anon_exclusive)
index abd92869874d75f62cd303475ba2b44c67db38ac..4dcb2ee35eca856a43694f4402dea0c1c9bf6d8a 100644 (file)
@@ -2176,6 +2176,9 @@ static void __init memmap_init_reserved_pages(void)
                        start = region->base;
                        end = start + region->size;
 
+                       if (nid == NUMA_NO_NODE || nid >= MAX_NUMNODES)
+                               nid = early_pfn_to_nid(PFN_DOWN(start));
+
                        reserve_bootmem_region(start, end, nid);
                }
        }
index e4c8735e7c85cf061a2ab31c9be250934c680879..46d8d02114cfeeda78049fa986d29ac1b324d7f3 100644 (file)
@@ -2623,8 +2623,9 @@ static unsigned long calculate_high_delay(struct mem_cgroup *memcg,
 }
 
 /*
- * Scheduled by try_charge() to be executed from the userland return path
- * and reclaims memory over the high limit.
+ * Reclaims memory over the high limit. Called directly from
+ * try_charge() (context permitting), as well as from the userland
+ * return path where reclaim is always able to block.
  */
 void mem_cgroup_handle_over_high(gfp_t gfp_mask)
 {
@@ -2643,6 +2644,17 @@ void mem_cgroup_handle_over_high(gfp_t gfp_mask)
        current->memcg_nr_pages_over_high = 0;
 
 retry_reclaim:
+       /*
+        * Bail if the task is already exiting. Unlike memory.max,
+        * memory.high enforcement isn't as strict, and there is no
+        * OOM killer involved, which means the excess could already
+        * be much bigger (and still growing) than it could for
+        * memory.max; the dying task could get stuck in fruitless
+        * reclaim for a long time, which isn't desirable.
+        */
+       if (task_is_dying())
+               goto out;
+
        /*
         * The allocating task should reclaim at least the batch size, but for
         * subsequent retries we only want to do what's necessary to prevent oom
@@ -2693,6 +2705,9 @@ retry_reclaim:
        }
 
        /*
+        * Reclaim didn't manage to push usage below the limit, slow
+        * this allocating task down.
+        *
         * If we exit early, we're guaranteed to die (since
         * schedule_timeout_killable sets TASK_KILLABLE). This means we don't
         * need to account for any ill-begotten jiffies to pay them off later.
@@ -2887,11 +2902,17 @@ done_restock:
                }
        } while ((memcg = parent_mem_cgroup(memcg)));
 
+       /*
+        * Reclaim is set up above to be called from the userland
+        * return path. But also attempt synchronous reclaim to avoid
+        * excessive overrun while the task is still inside the
+        * kernel. If this is successful, the return path will see it
+        * when it rechecks the overage and simply bail out.
+        */
        if (current->memcg_nr_pages_over_high > MEMCG_CHARGE_BATCH &&
            !(current->flags & PF_MEMALLOC) &&
-           gfpflags_allow_blocking(gfp_mask)) {
+           gfpflags_allow_blocking(gfp_mask))
                mem_cgroup_handle_over_high(gfp_mask);
-       }
        return 0;
 }
 
index 4f9b61f4a6682a530a202d02a6998c0b687906dd..636280d04008d8550dda8f9aa12acf6da5d92848 100644 (file)
@@ -982,7 +982,7 @@ static bool has_extra_refcount(struct page_state *ps, struct page *p,
        int count = page_count(p) - 1;
 
        if (extra_pins)
-               count -= 1;
+               count -= folio_nr_pages(page_folio(p));
 
        if (count > 0) {
                pr_err("%#lx: %s still referenced by %d users\n",
index 7e1f4849463aa3645a0eead97f40a90caf5e6d5f..89bcae0b224d6d43b4720c5c71a6c528e683bafb 100644 (file)
@@ -1464,7 +1464,7 @@ static unsigned long zap_pte_range(struct mmu_gather *tlb,
                        delay_rmap = 0;
                        if (!folio_test_anon(folio)) {
                                if (pte_dirty(ptent)) {
-                                       folio_set_dirty(folio);
+                                       folio_mark_dirty(folio);
                                        if (tlb_delay_rmap(tlb)) {
                                                delay_rmap = 1;
                                                force_flush = 1;
index b78e83d351d2864a6a339059ac734b6602eb5824..d89770eaab6b6111117783ca7ff532871c1d71a5 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1825,15 +1825,17 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
                /*
                 * mmap_region() will call shmem_zero_setup() to create a file,
                 * so use shmem's get_unmapped_area in case it can be huge.
-                * do_mmap() will clear pgoff, so match alignment.
                 */
-               pgoff = 0;
                get_area = shmem_get_unmapped_area;
        } else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
                /* Ensures that larger anonymous mappings are THP aligned. */
                get_area = thp_get_unmapped_area;
        }
 
+       /* Always treat pgoff as zero for anonymous memory. */
+       if (!file)
+               pgoff = 0;
+
        addr = get_area(file, addr, len, pgoff, flags);
        if (IS_ERR_VALUE(addr))
                return addr;
index cd4e4ae77c40ae0497efeaa8fb391f6550e51a4b..02147b61712bc9e3536061bb33d3f54f2e5db463 100644 (file)
@@ -1638,7 +1638,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
         */
        dtc->wb_thresh = __wb_calc_thresh(dtc);
        dtc->wb_bg_thresh = dtc->thresh ?
-               div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
+               div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
 
        /*
         * In order to avoid the stacked BDI deadlock we need
index 23620c57c1225bef9e3e1193a7163c36a916951f..2648ec4f04947b2e837377da68d7b8ae1fd48f7a 100644 (file)
@@ -469,7 +469,7 @@ static inline int ra_alloc_folio(struct readahead_control *ractl, pgoff_t index,
 
        if (!folio)
                return -ENOMEM;
-       mark = round_up(mark, 1UL << order);
+       mark = round_down(mark, 1UL << order);
        if (index == mark)
                folio_set_readahead(folio);
        err = filemap_add_folio(ractl->mapping, folio, index, gfp);
@@ -575,7 +575,7 @@ static void ondemand_readahead(struct readahead_control *ractl,
         * It's the expected callback index, assume sequential access.
         * Ramp up sizes, and push forward the readahead window.
         */
-       expected = round_up(ra->start + ra->size - ra->async_size,
+       expected = round_down(ra->start + ra->size - ra->async_size,
                        1UL << order);
        if (index == expected || index == (ra->start + ra->size)) {
                ra->start += ra->size;
index 20e3b0d9cf7ed0d59d86a11b2472f0e138160692..75fcf1f783bc567d1a916c85fb7abddc169f7fb5 100644 (file)
@@ -357,6 +357,7 @@ static __always_inline ssize_t mfill_atomic_hugetlb(
                                              unsigned long dst_start,
                                              unsigned long src_start,
                                              unsigned long len,
+                                             atomic_t *mmap_changing,
                                              uffd_flags_t flags)
 {
        struct mm_struct *dst_mm = dst_vma->vm_mm;
@@ -472,6 +473,15 @@ retry:
                                goto out;
                        }
                        mmap_read_lock(dst_mm);
+                       /*
+                        * If memory mappings are changing because of non-cooperative
+                        * operation (e.g. mremap) running in parallel, bail out and
+                        * request the user to retry later
+                        */
+                       if (mmap_changing && atomic_read(mmap_changing)) {
+                               err = -EAGAIN;
+                               break;
+                       }
 
                        dst_vma = NULL;
                        goto retry;
@@ -506,6 +516,7 @@ extern ssize_t mfill_atomic_hugetlb(struct vm_area_struct *dst_vma,
                                    unsigned long dst_start,
                                    unsigned long src_start,
                                    unsigned long len,
+                                   atomic_t *mmap_changing,
                                    uffd_flags_t flags);
 #endif /* CONFIG_HUGETLB_PAGE */
 
@@ -622,8 +633,8 @@ retry:
         * If this is a HUGETLB vma, pass off to appropriate routine
         */
        if (is_vm_hugetlb_page(dst_vma))
-               return  mfill_atomic_hugetlb(dst_vma, dst_start,
-                                            src_start, len, flags);
+               return  mfill_atomic_hugetlb(dst_vma, dst_start, src_start,
+                                            len, mmap_changing, flags);
 
        if (!vma_is_anonymous(dst_vma) && !vma_is_shmem(dst_vma))
                goto out_unlock;
index 214532173536b790cf032615f73fb3d868d2aae1..a3b68243fd4b18492220339f8a2151598cf6e98a 100644 (file)
@@ -118,12 +118,16 @@ static int vlan_changelink(struct net_device *dev, struct nlattr *tb[],
        }
        if (data[IFLA_VLAN_INGRESS_QOS]) {
                nla_for_each_nested(attr, data[IFLA_VLAN_INGRESS_QOS], rem) {
+                       if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
+                               continue;
                        m = nla_data(attr);
                        vlan_dev_set_ingress_priority(dev, m->to, m->from);
                }
        }
        if (data[IFLA_VLAN_EGRESS_QOS]) {
                nla_for_each_nested(attr, data[IFLA_VLAN_EGRESS_QOS], rem) {
+                       if (nla_type(attr) != IFLA_VLAN_QOS_MAPPING)
+                               continue;
                        m = nla_data(attr);
                        err = vlan_dev_set_egress_priority(dev, m->from, m->to);
                        if (err)
index f01a9b858347b41e88c25632d5d9524cbabba9a1..cb2dab0feee0abe758479a7a001342bf6613df08 100644 (file)
@@ -11551,6 +11551,7 @@ static struct pernet_operations __net_initdata netdev_net_ops = {
 
 static void __net_exit default_device_exit_net(struct net *net)
 {
+       struct netdev_name_node *name_node, *tmp;
        struct net_device *dev, *aux;
        /*
         * Push all migratable network devices back to the
@@ -11573,6 +11574,14 @@ static void __net_exit default_device_exit_net(struct net *net)
                snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
                if (netdev_name_in_use(&init_net, fb_name))
                        snprintf(fb_name, IFNAMSIZ, "dev%%d");
+
+               netdev_for_each_altname_safe(dev, name_node, tmp)
+                       if (netdev_name_in_use(&init_net, name_node->name)) {
+                               netdev_name_node_del(name_node);
+                               synchronize_rcu();
+                               __netdev_name_node_alt_destroy(name_node);
+                       }
+
                err = dev_change_net_namespace(dev, &init_net, fb_name);
                if (err) {
                        pr_emerg("%s: failed to move %s to init_net: %d\n",
index cf93e188785ba7f0fd6e9428762bf02105eb3154..7480b4c8429808378f7c5ec499c4f479d5a4b285 100644 (file)
@@ -63,6 +63,9 @@ int dev_change_name(struct net_device *dev, const char *newname);
 
 #define netdev_for_each_altname(dev, namenode)                         \
        list_for_each_entry((namenode), &(dev)->name_node->list, list)
+#define netdev_for_each_altname_safe(dev, namenode, next)              \
+       list_for_each_entry_safe((namenode), (next), &(dev)->name_node->list, \
+                                list)
 
 int netdev_name_node_alt_create(struct net_device *dev, const char *name);
 int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
index 24061f29c9dd25bcf2e852471d0e8e394ff4121a..ef3e78b6a39c45b9487931e0b7fa438e722aac2e 100644 (file)
@@ -83,6 +83,7 @@
 #include <net/netfilter/nf_conntrack_bpf.h>
 #include <net/netkit.h>
 #include <linux/un.h>
+#include <net/xdp_sock_drv.h>
 
 #include "dev.h"
 
@@ -4092,10 +4093,46 @@ static int bpf_xdp_frags_increase_tail(struct xdp_buff *xdp, int offset)
        memset(skb_frag_address(frag) + skb_frag_size(frag), 0, offset);
        skb_frag_size_add(frag, offset);
        sinfo->xdp_frags_size += offset;
+       if (rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL)
+               xsk_buff_get_tail(xdp)->data_end += offset;
 
        return 0;
 }
 
+static void bpf_xdp_shrink_data_zc(struct xdp_buff *xdp, int shrink,
+                                  struct xdp_mem_info *mem_info, bool release)
+{
+       struct xdp_buff *zc_frag = xsk_buff_get_tail(xdp);
+
+       if (release) {
+               xsk_buff_del_tail(zc_frag);
+               __xdp_return(NULL, mem_info, false, zc_frag);
+       } else {
+               zc_frag->data_end -= shrink;
+       }
+}
+
+static bool bpf_xdp_shrink_data(struct xdp_buff *xdp, skb_frag_t *frag,
+                               int shrink)
+{
+       struct xdp_mem_info *mem_info = &xdp->rxq->mem;
+       bool release = skb_frag_size(frag) == shrink;
+
+       if (mem_info->type == MEM_TYPE_XSK_BUFF_POOL) {
+               bpf_xdp_shrink_data_zc(xdp, shrink, mem_info, release);
+               goto out;
+       }
+
+       if (release) {
+               struct page *page = skb_frag_page(frag);
+
+               __xdp_return(page_address(page), mem_info, false, NULL);
+       }
+
+out:
+       return release;
+}
+
 static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
 {
        struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp);
@@ -4110,12 +4147,7 @@ static int bpf_xdp_frags_shrink_tail(struct xdp_buff *xdp, int offset)
 
                len_free += shrink;
                offset -= shrink;
-
-               if (skb_frag_size(frag) == shrink) {
-                       struct page *page = skb_frag_page(frag);
-
-                       __xdp_return(page_address(page), &xdp->rxq->mem,
-                                    false, NULL);
+               if (bpf_xdp_shrink_data(xdp, frag, shrink)) {
                        n_frags_free++;
                } else {
                        skb_frag_size_sub(frag, shrink);
index f35c2e9984062ba4bed637eaeace4eb9e71dadc0..63de5c635842b6f9e6d92f2a28a69009e54ec68c 100644 (file)
@@ -33,9 +33,6 @@
 
 void reqsk_queue_alloc(struct request_sock_queue *queue)
 {
-       spin_lock_init(&queue->rskq_lock);
-
-       spin_lock_init(&queue->fastopenq.lock);
        queue->fastopenq.rskq_rst_head = NULL;
        queue->fastopenq.rskq_rst_tail = NULL;
        queue->fastopenq.qlen = 0;
index 158dbdebce6a3693deb63e557e856d9cdd7500ae..0a7f46c37f0cfc169e11377107c8342c229da0de 100644 (file)
 #include <linux/interrupt.h>
 #include <linux/poll.h>
 #include <linux/tcp.h>
+#include <linux/udp.h>
 #include <linux/init.h>
 #include <linux/highmem.h>
 #include <linux/user_namespace.h>
@@ -4144,8 +4145,14 @@ bool sk_busy_loop_end(void *p, unsigned long start_time)
 {
        struct sock *sk = p;
 
-       return !skb_queue_empty_lockless(&sk->sk_receive_queue) ||
-              sk_busy_loop_timeout(sk, start_time);
+       if (!skb_queue_empty_lockless(&sk->sk_receive_queue))
+               return true;
+
+       if (sk_is_udp(sk) &&
+           !skb_queue_empty_lockless(&udp_sk(sk)->reader_queue))
+               return true;
+
+       return sk_busy_loop_timeout(sk, start_time);
 }
 EXPORT_SYMBOL(sk_busy_loop_end);
 #endif /* CONFIG_NET_RX_BUSY_POLL */
index 835f4f9d98d25559fb8965a7531c6863448a55c2..4e635dd3d3c8cca0aee00fa508368dc3d8965b93 100644 (file)
@@ -330,6 +330,9 @@ lookup_protocol:
        if (INET_PROTOSW_REUSE & answer_flags)
                sk->sk_reuse = SK_CAN_REUSE;
 
+       if (INET_PROTOSW_ICSK & answer_flags)
+               inet_init_csk_locks(sk);
+
        inet = inet_sk(sk);
        inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
 
index 8e2eb1793685ecd72da75bc841af12b90e85fcc7..459af1f8973958611c43936b0894f6154d23b99a 100644 (file)
@@ -727,6 +727,10 @@ out:
        }
        if (req)
                reqsk_put(req);
+
+       if (newsk)
+               inet_init_csk_locks(newsk);
+
        return newsk;
 out_err:
        newsk = NULL;
index 1baa484d21902d2492fc2830d960100dc09683bf..a1c6de385ccef91fe3c3e072ac5d2a20f0394a2b 100644 (file)
@@ -722,6 +722,7 @@ void tcp_push(struct sock *sk, int flags, int mss_now,
                if (!test_bit(TSQ_THROTTLED, &sk->sk_tsq_flags)) {
                        NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
                        set_bit(TSQ_THROTTLED, &sk->sk_tsq_flags);
+                       smp_mb__after_atomic();
                }
                /* It is possible TX completion already happened
                 * before we set TSQ_THROTTLED.
index 13a1833a4df52956431c5c2fefcb6af80e1a828f..959bfd9f6344f11241dd20246f92bd1d47ff565e 100644 (file)
@@ -199,6 +199,9 @@ lookup_protocol:
        if (INET_PROTOSW_REUSE & answer_flags)
                sk->sk_reuse = SK_CAN_REUSE;
 
+       if (INET_PROTOSW_ICSK & answer_flags)
+               inet_init_csk_locks(sk);
+
        inet = inet_sk(sk);
        inet_assign_bit(IS_ICSK, sk, INET_PROTOSW_ICSK & answer_flags);
 
index 9b06c380866b53bcb395bf255587279db025d11d..20551cfb7da6d8dd098c906477895e26c080fe32 100644 (file)
@@ -928,14 +928,15 @@ copy_uaddr:
  */
 static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
 {
+       DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
        struct sock *sk = sock->sk;
        struct llc_sock *llc = llc_sk(sk);
-       DECLARE_SOCKADDR(struct sockaddr_llc *, addr, msg->msg_name);
        int flags = msg->msg_flags;
        int noblock = flags & MSG_DONTWAIT;
+       int rc = -EINVAL, copied = 0, hdrlen, hh_len;
        struct sk_buff *skb = NULL;
+       struct net_device *dev;
        size_t size = 0;
-       int rc = -EINVAL, copied = 0, hdrlen;
 
        dprintk("%s: sending from %02X to %02X\n", __func__,
                llc->laddr.lsap, llc->daddr.lsap);
@@ -955,22 +956,29 @@ static int llc_ui_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
                if (rc)
                        goto out;
        }
-       hdrlen = llc->dev->hard_header_len + llc_ui_header_len(sk, addr);
+       dev = llc->dev;
+       hh_len = LL_RESERVED_SPACE(dev);
+       hdrlen = llc_ui_header_len(sk, addr);
        size = hdrlen + len;
-       if (size > llc->dev->mtu)
-               size = llc->dev->mtu;
+       size = min_t(size_t, size, READ_ONCE(dev->mtu));
        copied = size - hdrlen;
        rc = -EINVAL;
        if (copied < 0)
                goto out;
        release_sock(sk);
-       skb = sock_alloc_send_skb(sk, size, noblock, &rc);
+       skb = sock_alloc_send_skb(sk, hh_len + size, noblock, &rc);
        lock_sock(sk);
        if (!skb)
                goto out;
-       skb->dev      = llc->dev;
+       if (sock_flag(sk, SOCK_ZAPPED) ||
+           llc->dev != dev ||
+           hdrlen != llc_ui_header_len(sk, addr) ||
+           hh_len != LL_RESERVED_SPACE(dev) ||
+           size > READ_ONCE(dev->mtu))
+               goto out;
+       skb->dev      = dev;
        skb->protocol = llc_proto_type(addr->sllc_arphrd);
-       skb_reserve(skb, hdrlen);
+       skb_reserve(skb, hh_len + hdrlen);
        rc = memcpy_from_msg(skb_put(skb, copied), msg, copied);
        if (rc)
                goto out;
index 6e387aadffcecbec01d63aef4d6289bccc17f59e..4f16d9c88350b4481805c145887df23c681a159d 100644 (file)
@@ -135,22 +135,15 @@ static struct packet_type llc_packet_type __read_mostly = {
        .func = llc_rcv,
 };
 
-static struct packet_type llc_tr_packet_type __read_mostly = {
-       .type = cpu_to_be16(ETH_P_TR_802_2),
-       .func = llc_rcv,
-};
-
 static int __init llc_init(void)
 {
        dev_add_pack(&llc_packet_type);
-       dev_add_pack(&llc_tr_packet_type);
        return 0;
 }
 
 static void __exit llc_exit(void)
 {
        dev_remove_pack(&llc_packet_type);
-       dev_remove_pack(&llc_tr_packet_type);
 }
 
 module_init(llc_init);
index cb0291decf2e56c7d4111e649f41d28577af987e..13438cc0a6b139b6cb10c15ce894153706514811 100644 (file)
@@ -62,7 +62,6 @@ config MAC80211_KUNIT_TEST
        depends on KUNIT
        depends on MAC80211
        default KUNIT_ALL_TESTS
-       depends on !KERNEL_6_2
        help
          Enable this option to test mac80211 internals with kunit.
 
index bf1adcd96b411327ba79b3bdc6734df1afd605ca..4391d8dd634bb557771dcc07c11bab296c5a18f3 100644 (file)
@@ -404,7 +404,10 @@ void sta_info_free(struct ieee80211_local *local, struct sta_info *sta)
        int i;
 
        for (i = 0; i < ARRAY_SIZE(sta->link); i++) {
-               if (!(sta->sta.valid_links & BIT(i)))
+               struct link_sta_info *link_sta;
+
+               link_sta = rcu_access_pointer(sta->link[i]);
+               if (!link_sta)
                        continue;
 
                sta_remove_link(sta, i, false);
@@ -910,6 +913,8 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
        if (ieee80211_vif_is_mesh(&sdata->vif))
                mesh_accept_plinks_update(sdata);
 
+       ieee80211_check_fast_xmit(sta);
+
        return 0;
  out_remove:
        if (sta->sta.valid_links)
index 314998fdb1a5a4853f84a90edf2ba2312933719a..68a48abc72876c4abaa8cf4c95d7c2793dc07813 100644 (file)
@@ -3048,7 +3048,7 @@ void ieee80211_check_fast_xmit(struct sta_info *sta)
            sdata->vif.type == NL80211_IFTYPE_STATION)
                goto out;
 
-       if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED))
+       if (!test_sta_flag(sta, WLAN_STA_AUTHORIZED) || !sta->uploaded)
                goto out;
 
        if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
index 4b55533ce5ca2c29b1648b4f36de3e835c8953a6..c537104411e7d1b1c1b449b55c7a3c43fb7e2ac3 100644 (file)
@@ -24,6 +24,7 @@
 #include <net/sock.h>
 
 #define NFT_MODULE_AUTOLOAD_LIMIT (MODULE_NAME_LEN - sizeof("nft-expr-255-"))
+#define NFT_SET_MAX_ANONLEN 16
 
 unsigned int nf_tables_net_id __read_mostly;
 
@@ -4413,6 +4414,9 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
                if (p[1] != 'd' || strchr(p + 2, '%'))
                        return -EINVAL;
 
+               if (strnlen(name, NFT_SET_MAX_ANONLEN) >= NFT_SET_MAX_ANONLEN)
+                       return -EINVAL;
+
                inuse = (unsigned long *)get_zeroed_page(GFP_KERNEL);
                if (inuse == NULL)
                        return -ENOMEM;
@@ -10988,16 +10992,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
        data->verdict.code = ntohl(nla_get_be32(tb[NFTA_VERDICT_CODE]));
 
        switch (data->verdict.code) {
-       default:
-               switch (data->verdict.code & NF_VERDICT_MASK) {
-               case NF_ACCEPT:
-               case NF_DROP:
-               case NF_QUEUE:
-                       break;
-               default:
-                       return -EINVAL;
-               }
-               fallthrough;
+       case NF_ACCEPT:
+       case NF_DROP:
+       case NF_QUEUE:
+               break;
        case NFT_CONTINUE:
        case NFT_BREAK:
        case NFT_RETURN:
@@ -11032,6 +11030,8 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
 
                data->verdict.chain = chain;
                break;
+       default:
+               return -EINVAL;
        }
 
        desc->len = sizeof(data->verdict);
index 680fe557686e42d3421a445b6c5472bd4056a65a..274b6f7e6bb57e4f270262ef923ebf8d7f1cf02c 100644 (file)
@@ -357,9 +357,10 @@ static int nf_tables_netdev_event(struct notifier_block *this,
                                  unsigned long event, void *ptr)
 {
        struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+       struct nft_base_chain *basechain;
        struct nftables_pernet *nft_net;
-       struct nft_table *table;
        struct nft_chain *chain, *nr;
+       struct nft_table *table;
        struct nft_ctx ctx = {
                .net    = dev_net(dev),
        };
@@ -371,7 +372,8 @@ static int nf_tables_netdev_event(struct notifier_block *this,
        nft_net = nft_pernet(ctx.net);
        mutex_lock(&nft_net->commit_mutex);
        list_for_each_entry(table, &nft_net->tables, list) {
-               if (table->family != NFPROTO_NETDEV)
+               if (table->family != NFPROTO_NETDEV &&
+                   table->family != NFPROTO_INET)
                        continue;
 
                ctx.family = table->family;
@@ -380,6 +382,11 @@ static int nf_tables_netdev_event(struct notifier_block *this,
                        if (!nft_is_base_chain(chain))
                                continue;
 
+                       basechain = nft_base_chain(chain);
+                       if (table->family == NFPROTO_INET &&
+                           basechain->ops.hooknum != NF_INET_INGRESS)
+                               continue;
+
                        ctx.chain = chain;
                        nft_netdev_event(event, dev, &ctx);
                }
index 5284cd2ad532713368db0cd56bdf17baf1e0ed4d..f0eeda97bfcd9da2ea98d4ae69c5a1d4a6c30956 100644 (file)
@@ -350,6 +350,12 @@ static int nft_target_validate(const struct nft_ctx *ctx,
        unsigned int hook_mask = 0;
        int ret;
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_BRIDGE &&
+           ctx->family != NFPROTO_ARP)
+               return -EOPNOTSUPP;
+
        if (nft_is_base_chain(ctx->chain)) {
                const struct nft_base_chain *basechain =
                                                nft_base_chain(ctx->chain);
@@ -595,6 +601,12 @@ static int nft_match_validate(const struct nft_ctx *ctx,
        unsigned int hook_mask = 0;
        int ret;
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_BRIDGE &&
+           ctx->family != NFPROTO_ARP)
+               return -EOPNOTSUPP;
+
        if (nft_is_base_chain(ctx->chain)) {
                const struct nft_base_chain *basechain =
                                                nft_base_chain(ctx->chain);
index ab3362c483b4a78c1e138815764e9e80bfd5d43d..397351fa4d5f82d8bcec25e1d69f327dc60e0199 100644 (file)
@@ -384,6 +384,11 @@ static int nft_flow_offload_validate(const struct nft_ctx *ctx,
 {
        unsigned int hook_mask = (1 << NF_INET_FORWARD);
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        return nft_chain_validate_hooks(ctx->chain, hook_mask);
 }
 
index 79039afde34ecb1ca9fe1494855676ab26e7c53b..cefa25e0dbb0a2c87af43e8230cf7934ce8fa3d1 100644 (file)
@@ -58,17 +58,19 @@ static inline bool nft_limit_eval(struct nft_limit_priv *priv, u64 cost)
 static int nft_limit_init(struct nft_limit_priv *priv,
                          const struct nlattr * const tb[], bool pkts)
 {
+       u64 unit, tokens, rate_with_burst;
        bool invert = false;
-       u64 unit, tokens;
 
        if (tb[NFTA_LIMIT_RATE] == NULL ||
            tb[NFTA_LIMIT_UNIT] == NULL)
                return -EINVAL;
 
        priv->rate = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_RATE]));
+       if (priv->rate == 0)
+               return -EINVAL;
+
        unit = be64_to_cpu(nla_get_be64(tb[NFTA_LIMIT_UNIT]));
-       priv->nsecs = unit * NSEC_PER_SEC;
-       if (priv->rate == 0 || priv->nsecs < unit)
+       if (check_mul_overflow(unit, NSEC_PER_SEC, &priv->nsecs))
                return -EOVERFLOW;
 
        if (tb[NFTA_LIMIT_BURST])
@@ -77,18 +79,25 @@ static int nft_limit_init(struct nft_limit_priv *priv,
        if (pkts && priv->burst == 0)
                priv->burst = NFT_LIMIT_PKT_BURST_DEFAULT;
 
-       if (priv->rate + priv->burst < priv->rate)
+       if (check_add_overflow(priv->rate, priv->burst, &rate_with_burst))
                return -EOVERFLOW;
 
        if (pkts) {
-               tokens = div64_u64(priv->nsecs, priv->rate) * priv->burst;
+               u64 tmp = div64_u64(priv->nsecs, priv->rate);
+
+               if (check_mul_overflow(tmp, priv->burst, &tokens))
+                       return -EOVERFLOW;
        } else {
+               u64 tmp;
+
                /* The token bucket size limits the number of tokens can be
                 * accumulated. tokens_max specifies the bucket size.
                 * tokens_max = unit * (rate + burst) / rate.
                 */
-               tokens = div64_u64(priv->nsecs * (priv->rate + priv->burst),
-                                priv->rate);
+               if (check_mul_overflow(priv->nsecs, rate_with_burst, &tmp))
+                       return -EOVERFLOW;
+
+               tokens = div64_u64(tmp, priv->rate);
        }
 
        if (tb[NFTA_LIMIT_FLAGS]) {
index 583885ce72328fab424da04f888398eb687c896a..808f5802c2704a583c747e71d227965fa5c1a8bf 100644 (file)
@@ -143,6 +143,11 @@ static int nft_nat_validate(const struct nft_ctx *ctx,
        struct nft_nat *priv = nft_expr_priv(expr);
        int err;
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
        if (err < 0)
                return err;
index 35a2c28caa60bb6d50da5febbf5a6d2be7c9bdd9..24d977138572988e87b8c726daf67441f0b41de2 100644 (file)
@@ -166,6 +166,11 @@ static int nft_rt_validate(const struct nft_ctx *ctx, const struct nft_expr *exp
        const struct nft_rt *priv = nft_expr_priv(expr);
        unsigned int hooks;
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        switch (priv->key) {
        case NFT_RT_NEXTHOP4:
        case NFT_RT_NEXTHOP6:
index 9ed85be79452d990ad79ad9a0b31a26bb3f4c6a4..f30163e2ca620783cceda339c702c9f81b29cfa2 100644 (file)
@@ -242,6 +242,11 @@ static int nft_socket_validate(const struct nft_ctx *ctx,
                               const struct nft_expr *expr,
                               const struct nft_data **data)
 {
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        return nft_chain_validate_hooks(ctx->chain,
                                        (1 << NF_INET_PRE_ROUTING) |
                                        (1 << NF_INET_LOCAL_IN) |
index 13da882669a4ee026d286a7903e0c974e60541ac..1d737f89dfc18ccdf816e00407bc9be70c13e8f2 100644 (file)
@@ -186,7 +186,6 @@ static int nft_synproxy_do_init(const struct nft_ctx *ctx,
                break;
 #endif
        case NFPROTO_INET:
-       case NFPROTO_BRIDGE:
                err = nf_synproxy_ipv4_init(snet, ctx->net);
                if (err)
                        goto nf_ct_failure;
@@ -219,7 +218,6 @@ static void nft_synproxy_do_destroy(const struct nft_ctx *ctx)
                break;
 #endif
        case NFPROTO_INET:
-       case NFPROTO_BRIDGE:
                nf_synproxy_ipv4_fini(snet, ctx->net);
                nf_synproxy_ipv6_fini(snet, ctx->net);
                break;
@@ -253,6 +251,11 @@ static int nft_synproxy_validate(const struct nft_ctx *ctx,
                                 const struct nft_expr *expr,
                                 const struct nft_data **data)
 {
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        return nft_chain_validate_hooks(ctx->chain, (1 << NF_INET_LOCAL_IN) |
                                                    (1 << NF_INET_FORWARD));
 }
index ae15cd693f0ec2857215c1daa7e633af222de423..71412adb73d414c43d2082362e854c3ad561d815 100644 (file)
@@ -316,6 +316,11 @@ static int nft_tproxy_validate(const struct nft_ctx *ctx,
                               const struct nft_expr *expr,
                               const struct nft_data **data)
 {
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        return nft_chain_validate_hooks(ctx->chain, 1 << NF_INET_PRE_ROUTING);
 }
 
index 452f8587addadce5a2e1f480d5685eb70c5760b0..1c866757db55247b8e267fb038dd4e1fbd9681ea 100644 (file)
@@ -235,6 +235,11 @@ static int nft_xfrm_validate(const struct nft_ctx *ctx, const struct nft_expr *e
        const struct nft_xfrm *priv = nft_expr_priv(expr);
        unsigned int hooks;
 
+       if (ctx->family != NFPROTO_IPV4 &&
+           ctx->family != NFPROTO_IPV6 &&
+           ctx->family != NFPROTO_INET)
+               return -EOPNOTSUPP;
+
        switch (priv->dir) {
        case XFRM_POLICY_IN:
                hooks = (1 << NF_INET_FORWARD) |
index 4ed8ffd58ff375f3fa9f262e6f3b4d1a1aaf2731..9c962347cf859f16fc76e4d8a2fd22cdb3d142d6 100644 (file)
@@ -374,7 +374,7 @@ static void netlink_skb_destructor(struct sk_buff *skb)
        if (is_vmalloc_addr(skb->head)) {
                if (!skb->cloned ||
                    !atomic_dec_return(&(skb_shinfo(skb)->dataref)))
-                       vfree(skb->head);
+                       vfree_atomic(skb->head);
 
                skb->head = NULL;
        }
index 01c4cdfef45df32ad0b0b942e416d6bc267687e1..8435a20968ef5112d44164ecbf89071f7ee4b855 100644 (file)
@@ -419,7 +419,7 @@ static int rds_recv_track_latency(struct rds_sock *rs, sockptr_t optval,
 
        rs->rs_rx_traces = trace.rx_traces;
        for (i = 0; i < rs->rs_rx_traces; i++) {
-               if (trace.rx_trace_pos[i] > RDS_MSG_RX_DGRAM_TRACE_MAX) {
+               if (trace.rx_trace_pos[i] >= RDS_MSG_RX_DGRAM_TRACE_MAX) {
                        rs->rs_rx_traces = 0;
                        return -EFAULT;
                }
index 92a12e3d0fe63646b1d82751c9986e08de6ab673..ff3d396a65aac0dec81fc79a6bea44c24cfd7a68 100644 (file)
@@ -1560,6 +1560,9 @@ tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
             chain_prev = chain,
                     chain = __tcf_get_next_chain(block, chain),
                     tcf_chain_put(chain_prev)) {
+               if (chain->tmplt_ops && add)
+                       chain->tmplt_ops->tmplt_reoffload(chain, true, cb,
+                                                         cb_priv);
                for (tp = __tcf_get_next_proto(chain, NULL); tp;
                     tp_prev = tp,
                             tp = __tcf_get_next_proto(chain, tp),
@@ -1575,6 +1578,9 @@ tcf_block_playback_offloads(struct tcf_block *block, flow_setup_cb_t *cb,
                                goto err_playback_remove;
                        }
                }
+               if (chain->tmplt_ops && !add)
+                       chain->tmplt_ops->tmplt_reoffload(chain, false, cb,
+                                                         cb_priv);
        }
 
        return 0;
@@ -3000,7 +3006,8 @@ static int tc_chain_tmplt_add(struct tcf_chain *chain, struct net *net,
        ops = tcf_proto_lookup_ops(name, true, extack);
        if (IS_ERR(ops))
                return PTR_ERR(ops);
-       if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump) {
+       if (!ops->tmplt_create || !ops->tmplt_destroy || !ops->tmplt_dump ||
+           !ops->tmplt_reoffload) {
                NL_SET_ERR_MSG(extack, "Chain templates are not supported with specified classifier");
                module_put(ops->owner);
                return -EOPNOTSUPP;
index e5314a31f75ae3a6db31cb81a3ebf5316a3005ff..efb9d2811b73d18862f824b0b7a8b4e6b905271d 100644 (file)
@@ -2721,6 +2721,28 @@ static void fl_tmplt_destroy(void *tmplt_priv)
        kfree(tmplt);
 }
 
+static void fl_tmplt_reoffload(struct tcf_chain *chain, bool add,
+                              flow_setup_cb_t *cb, void *cb_priv)
+{
+       struct fl_flow_tmplt *tmplt = chain->tmplt_priv;
+       struct flow_cls_offload cls_flower = {};
+
+       cls_flower.rule = flow_rule_alloc(0);
+       if (!cls_flower.rule)
+               return;
+
+       cls_flower.common.chain_index = chain->index;
+       cls_flower.command = add ? FLOW_CLS_TMPLT_CREATE :
+                                  FLOW_CLS_TMPLT_DESTROY;
+       cls_flower.cookie = (unsigned long) tmplt;
+       cls_flower.rule->match.dissector = &tmplt->dissector;
+       cls_flower.rule->match.mask = &tmplt->mask;
+       cls_flower.rule->match.key = &tmplt->dummy_key;
+
+       cb(TC_SETUP_CLSFLOWER, &cls_flower, cb_priv);
+       kfree(cls_flower.rule);
+}
+
 static int fl_dump_key_val(struct sk_buff *skb,
                           void *val, int val_type,
                           void *mask, int mask_type, int len)
@@ -3628,6 +3650,7 @@ static struct tcf_proto_ops cls_fl_ops __read_mostly = {
        .bind_class     = fl_bind_class,
        .tmplt_create   = fl_tmplt_create,
        .tmplt_destroy  = fl_tmplt_destroy,
+       .tmplt_reoffload = fl_tmplt_reoffload,
        .tmplt_dump     = fl_tmplt_dump,
        .get_exts       = fl_get_exts,
        .owner          = THIS_MODULE,
index 52f7c4f1e7670d723a6858614f071f73dbd88dc5..5a33908015f3e3197ad11869c6f5134799307c56 100644 (file)
@@ -164,7 +164,7 @@ static int __smc_diag_dump(struct sock *sk, struct sk_buff *skb,
        }
        if (smc_conn_lgr_valid(&smc->conn) && smc->conn.lgr->is_smcd &&
            (req->diag_ext & (1 << (SMC_DIAG_DMBINFO - 1))) &&
-           !list_empty(&smc->conn.lgr->list)) {
+           !list_empty(&smc->conn.lgr->list) && smc->conn.rmb_desc) {
                struct smc_connection *conn = &smc->conn;
                struct smcd_diag_dmbinfo dinfo;
                struct smcd_dev *smcd = conn->lgr->smcd;
index bfb2f78523a8289f0a6ea758ca61c53d06832273..545017a3daa4d6b20255c51c6c0dea73ec32ecfc 100644 (file)
@@ -717,12 +717,12 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
                                ARRAY_SIZE(rqstp->rq_bvec), xdr);
 
        iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
-                     count, 0);
+                     count, rqstp->rq_res.len);
        err = sock_sendmsg(svsk->sk_sock, &msg);
        if (err == -ECONNREFUSED) {
                /* ICMP error on earlier request. */
                iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, rqstp->rq_bvec,
-                             count, 0);
+                             count, rqstp->rq_res.len);
                err = sock_sendmsg(svsk->sk_sock, &msg);
        }
 
index a9ac85e09af37ca8f7d1599e7057f98e7d8200be..10345388ad139f5f9b35025b2336c548bd3344be 100644 (file)
@@ -206,7 +206,6 @@ config CFG80211_KUNIT_TEST
        depends on KUNIT
        depends on CFG80211
        default KUNIT_ALL_TESTS
-       depends on !KERNEL_6_2
        help
          Enable this option to test cfg80211 functions with kunit.
 
index 60877b532993219c6607c28d6b4e0fb6ae2506ad..b09700400d09744ee1b0c990e46806264df25e3b 100644 (file)
@@ -4020,6 +4020,7 @@ static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *
                }
                wiphy_unlock(&rdev->wiphy);
 
+               if_start = 0;
                wp_idx++;
        }
  out:
index 9f13aa3353e31f9692ce41db10a977ac2614d7d8..1eadfac03cc41d35709c001a77759a23f7dbdc39 100644 (file)
@@ -167,8 +167,10 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
                contd = XDP_PKT_CONTD;
 
        err = __xsk_rcv_zc(xs, xskb, len, contd);
-       if (err || likely(!frags))
-               goto out;
+       if (err)
+               goto err;
+       if (likely(!frags))
+               return 0;
 
        xskb_list = &xskb->pool->xskb_list;
        list_for_each_entry_safe(pos, tmp, xskb_list, xskb_list_node) {
@@ -177,11 +179,13 @@ static int xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len)
                len = pos->xdp.data_end - pos->xdp.data;
                err = __xsk_rcv_zc(xs, pos, len, contd);
                if (err)
-                       return err;
+                       goto err;
                list_del(&pos->xskb_list_node);
        }
 
-out:
+       return 0;
+err:
+       xsk_buff_free(xdp);
        return err;
 }
 
index 28711cc44ced216573938f392de3b452f2176410..ce60ecd48a4dc88eed7582bc0701f7c72acc84f5 100644 (file)
@@ -555,6 +555,7 @@ struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
 
        xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
        xskb->xdp.data_meta = xskb->xdp.data;
+       xskb->xdp.flags = 0;
 
        if (pool->dma_need_sync) {
                dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
diff --git a/samples/cgroup/.gitignore b/samples/cgroup/.gitignore
new file mode 100644 (file)
index 0000000..3a01611
--- /dev/null
@@ -0,0 +1,3 @@
+/cgroup_event_listener
+/memcg_event_listener
+
index 9b7a37ae28a8818a41dada5d1dc12c5c65c791f9..a9e552a1e9105b5efb559a23e4a2943c102b12a2 100644 (file)
@@ -97,7 +97,6 @@ KBUILD_CFLAGS += $(call cc-option, -Wunused-const-variable)
 KBUILD_CFLAGS += $(call cc-option, -Wpacked-not-aligned)
 KBUILD_CFLAGS += $(call cc-option, -Wformat-overflow)
 KBUILD_CFLAGS += $(call cc-option, -Wformat-truncation)
-KBUILD_CFLAGS += $(call cc-option, -Wstringop-overflow)
 KBUILD_CFLAGS += $(call cc-option, -Wstringop-truncation)
 
 KBUILD_CPPFLAGS += -Wundef
@@ -113,7 +112,6 @@ KBUILD_CFLAGS += $(call cc-disable-warning, restrict)
 KBUILD_CFLAGS += $(call cc-disable-warning, packed-not-aligned)
 KBUILD_CFLAGS += $(call cc-disable-warning, format-overflow)
 KBUILD_CFLAGS += $(call cc-disable-warning, format-truncation)
-KBUILD_CFLAGS += $(call cc-disable-warning, stringop-overflow)
 KBUILD_CFLAGS += $(call cc-disable-warning, stringop-truncation)
 
 ifdef CONFIG_CC_IS_CLANG
index 7717354ce0950af9627939b787efa98b4e50621c..98e1150bee9d0cbecb79c7e81cb05159f6160b04 100644 (file)
@@ -469,8 +469,10 @@ static int apparmor_file_open(struct file *file)
         * Cache permissions granted by the previous exec check, with
         * implicit read and executable mmap which are required to
         * actually execute the image.
+        *
+        * Illogically, FMODE_EXEC is in f_flags, not f_mode.
         */
-       if (current->in_execve) {
+       if (file->f_flags & __FMODE_EXEC) {
                fctx->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP;
                return 0;
        }
index 76f55dd13cb801078ba71079bf7e1c58eb2ada3b..8af2136069d239129c2994e5ee0f3e9b696ed7ea 100644 (file)
@@ -237,10 +237,6 @@ static int datablob_parse(char *datablob, const char **format,
                        break;
                }
                *decrypted_data = strsep(&datablob, " \t");
-               if (!*decrypted_data) {
-                       pr_info("encrypted_key: decrypted_data is missing\n");
-                       break;
-               }
                ret = 0;
                break;
        case Opt_load:
index 3c3af149bf1c12a94c318d188984ab4bda4a2edc..04a92c3d65d44de5502dd5955146e58cba4f4978 100644 (file)
@@ -328,7 +328,8 @@ static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
 static int tomoyo_file_open(struct file *f)
 {
        /* Don't check read permission here if called from execve(). */
-       if (current->in_execve)
+       /* Illogically, FMODE_EXEC is in f_flags, not f_mode. */
+       if (f->f_flags & __FMODE_EXEC)
                return 0;
        return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path,
                                            f->f_flags);
index d9d9923af85c2e60ca9b2161fd93867df3346d1b..a4b902f9e1c486801a7c14072e796a1b1f8e92ad 100644 (file)
@@ -15,7 +15,7 @@ LIBS = -L../ -L$(OUTPUT) -lm -lcpupower
 OBJS = $(OUTPUT)main.o $(OUTPUT)parse.o $(OUTPUT)system.o $(OUTPUT)benchmark.o
 endif
 
-CFLAGS += -D_GNU_SOURCE -I../lib -DDEFAULT_CONFIG_FILE=\"$(confdir)/cpufreq-bench.conf\"
+override CFLAGS += -D_GNU_SOURCE -I../lib -DDEFAULT_CONFIG_FILE=\"$(confdir)/cpufreq-bench.conf\"
 
 $(OUTPUT)%.o : %.c
        $(ECHO) "  CC      " $@
index 0b12c36902d82ddf23d8d71ac5067e282f1b1564..caff3834671f9dfb7d261d5b6633532f71ecd9f5 100644 (file)
@@ -65,4 +65,6 @@ cxl_core-y += config_check.o
 cxl_core-y += cxl_core_test.o
 cxl_core-y += cxl_core_exports.o
 
+KBUILD_CFLAGS := $(filter-out -Wmissing-prototypes -Wmissing-declarations, $(KBUILD_CFLAGS))
+
 obj-m += test/
index 61d5f7bcddf9a6ef9d5df5d0c4346bd93f7181f9..6b192789785612d810c6ff577b1ac47aadd9e9b3 100644 (file)
@@ -8,3 +8,5 @@ obj-m += cxl_mock_mem.o
 cxl_test-y := cxl.o
 cxl_mock-y := mock.o
 cxl_mock_mem-y := mem.o
+
+KBUILD_CFLAGS := $(filter-out -Wmissing-prototypes -Wmissing-declarations, $(KBUILD_CFLAGS))
index 8153251ea389a7dcff59d13f258cee0b066c7dbf..91a3627f301a79b90036c2cfd82217819b9bb757 100644 (file)
@@ -82,4 +82,6 @@ libnvdimm-$(CONFIG_NVDIMM_KEYS) += $(NVDIMM_SRC)/security.o
 libnvdimm-y += libnvdimm_test.o
 libnvdimm-y += config_check.o
 
+KBUILD_CFLAGS := $(filter-out -Wmissing-prototypes -Wmissing-declarations, $(KBUILD_CFLAGS))
+
 obj-m += test/
index c54d1697f439a47908f360e75b2760861a9d7939..d508486cc0bdc2c917f9386aa2aea796f12d2c1d 100755 (executable)
@@ -162,7 +162,7 @@ prio_arp()
        local mode=$1
 
        for primary_reselect in 0 1 2; do
-               prio_test "mode active-backup arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect"
+               prio_test "mode $mode arp_interval 100 arp_ip_target ${g_ip4} primary eth1 primary_reselect $primary_reselect"
                log_test "prio" "$mode arp_ip_target primary_reselect $primary_reselect"
        done
 }
@@ -178,7 +178,7 @@ prio_ns()
        fi
 
        for primary_reselect in 0 1 2; do
-               prio_test "mode active-backup arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect"
+               prio_test "mode $mode arp_interval 100 ns_ip6_target ${g_ip6} primary eth1 primary_reselect $primary_reselect"
                log_test "prio" "$mode ns_ip6_target primary_reselect $primary_reselect"
        done
 }
@@ -194,9 +194,9 @@ prio()
 
        for mode in $modes; do
                prio_miimon $mode
-               prio_arp $mode
-               prio_ns $mode
        done
+       prio_arp "active-backup"
+       prio_ns "active-backup"
 }
 
 arp_validate_test()
index 6091b45d226baf192c2d380ba893be15592f323d..79b65bdf05db6586726cc76d3313f12368d21dc5 100644 (file)
@@ -1 +1 @@
-timeout=120
+timeout=1200
index 4855ef597a152135979694fb3e9145f1db4e8bcf..f98435c502f61aa665cefc39bea873e66a273ade 100755 (executable)
@@ -270,6 +270,7 @@ for port in 0 1; do
        echo 1 > $NSIM_DEV_SYS/new_port
     fi
     NSIM_NETDEV=`get_netdev_name old_netdevs`
+    ifconfig $NSIM_NETDEV up
 
     msg="new NIC device created"
     exp0=( 0 0 0 0 )
@@ -431,6 +432,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     overflow_table0 "overflow NIC table"
@@ -488,6 +490,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     overflow_table0 "overflow NIC table"
@@ -544,6 +547,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     overflow_table0 "destroy NIC"
@@ -573,6 +577,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     msg="create VxLANs v6"
@@ -633,6 +638,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     echo 110 > $NSIM_DEV_DFS/ports/$port/udp_ports_inject_error
@@ -688,6 +694,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     msg="create VxLANs v6"
@@ -747,6 +754,7 @@ for port in 0 1; do
     fi
 
     echo $port > $NSIM_DEV_SYS/new_port
+    NSIM_NETDEV=`get_netdev_name old_netdevs`
     ifconfig $NSIM_NETDEV up
 
     msg="create VxLANs v6"
@@ -877,6 +885,7 @@ msg="re-add a port"
 
 echo 2 > $NSIM_DEV_SYS/del_port
 echo 2 > $NSIM_DEV_SYS/new_port
+NSIM_NETDEV=`get_netdev_name old_netdevs`
 check_tables
 
 msg="replace VxLAN in overflow table"
index c8416c54b4637b1380810f0c9f71bc99e1710b8e..b1fd7362c2feec339228036dace5d28fe1b1719b 100644 (file)
@@ -42,17 +42,6 @@ function die() {
        exit 1
 }
 
-# save existing dmesg so we can detect new content
-function save_dmesg() {
-       SAVED_DMESG=$(mktemp --tmpdir -t klp-dmesg-XXXXXX)
-       dmesg > "$SAVED_DMESG"
-}
-
-# cleanup temporary dmesg file from save_dmesg()
-function cleanup_dmesg_file() {
-       rm -f "$SAVED_DMESG"
-}
-
 function push_config() {
        DYNAMIC_DEBUG=$(grep '^kernel/livepatch' /sys/kernel/debug/dynamic_debug/control | \
                        awk -F'[: ]' '{print "file " $1 " line " $2 " " $4}')
@@ -99,7 +88,6 @@ function set_ftrace_enabled() {
 
 function cleanup() {
        pop_config
-       cleanup_dmesg_file
 }
 
 # setup_config - save the current config and set a script exit trap that
@@ -280,7 +268,15 @@ function set_pre_patch_ret {
 function start_test {
        local test="$1"
 
-       save_dmesg
+       # Dump something unique into the dmesg log, then stash the entry
+       # in LAST_DMESG.  The check_result() function will use it to
+       # find new kernel messages since the test started.
+       local last_dmesg_msg="livepatch kselftest timestamp: $(date --rfc-3339=ns)"
+       log "$last_dmesg_msg"
+       loop_until 'dmesg | grep -q "$last_dmesg_msg"' ||
+               die "buffer busy? can't find canary dmesg message: $last_dmesg_msg"
+       LAST_DMESG=$(dmesg | grep "$last_dmesg_msg")
+
        echo -n "TEST: $test ... "
        log "===== TEST: $test ====="
 }
@@ -291,23 +287,24 @@ function check_result {
        local expect="$*"
        local result
 
-       # Note: when comparing dmesg output, the kernel log timestamps
-       # help differentiate repeated testing runs.  Remove them with a
-       # post-comparison sed filter.
-
-       result=$(dmesg | comm --nocheck-order -13 "$SAVED_DMESG" - | \
+       # Test results include any new dmesg entry since LAST_DMESG, then:
+       # - include lines matching keywords
+       # - exclude lines matching keywords
+       # - filter out dmesg timestamp prefixes
+       result=$(dmesg | awk -v last_dmesg="$LAST_DMESG" 'p; $0 == last_dmesg { p=1 }' | \
                 grep -e 'livepatch:' -e 'test_klp' | \
                 grep -v '\(tainting\|taints\) kernel' | \
                 sed 's/^\[[ 0-9.]*\] //')
 
        if [[ "$expect" == "$result" ]] ; then
                echo "ok"
+       elif [[ "$result" == "" ]] ; then
+               echo -e "not ok\n\nbuffer overrun? can't find canary dmesg entry: $LAST_DMESG\n"
+               die "livepatch kselftest(s) failed"
        else
                echo -e "not ok\n\n$(diff -upr --label expected --label result <(echo "$expect") <(echo "$result"))\n"
                die "livepatch kselftest(s) failed"
        fi
-
-       cleanup_dmesg_file
 }
 
 # check_sysfs_rights(modname, rel_path, expected_rights) - check sysfs
index 0899019a7fcb4b04bcedca44227f2c2dd5a83597..e14bdd4455f2d2798077b8a701790bcee0732e90 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
 # Kselftest framework requirement - SKIP code is 4.
index 380b691d3eb9fbe9c1070937d9561b732343aec0..b748c48908d9d4af9ba31fe7d2443329c13c3dc2 100644 (file)
@@ -566,7 +566,7 @@ static int ksm_merge_hugepages_time(int merge_type, int mapping, int prot,
        if (map_ptr_orig == MAP_FAILED)
                err(2, "initial mmap");
 
-       if (madvise(map_ptr, len + HPAGE_SIZE, MADV_HUGEPAGE))
+       if (madvise(map_ptr, len, MADV_HUGEPAGE))
                err(2, "MADV_HUGEPAGE");
 
        pagemap_fd = open("/proc/self/pagemap", O_RDONLY);
index 193281560b61be23d3b55857030ea07b4ad3f95d..86e8f2048a409028b28ece3f755d06f535726c47 100644 (file)
@@ -15,6 +15,7 @@
 #include <unistd.h>
 #include <sys/mman.h>
 #include <fcntl.h>
+#include "vm_util.h"
 
 #define LENGTH (256UL*1024*1024)
 #define PROTECTION (PROT_READ | PROT_WRITE)
@@ -58,10 +59,16 @@ int main(int argc, char **argv)
 {
        void *addr;
        int ret;
+       size_t hugepage_size;
        size_t length = LENGTH;
        int flags = FLAGS;
        int shift = 0;
 
+       hugepage_size = default_huge_page_size();
+       /* munmap with fail if the length is not page aligned */
+       if (hugepage_size > length)
+               length = hugepage_size;
+
        if (argc > 1)
                length = atol(argv[1]) << 20;
        if (argc > 2) {
index 1d4c1589c3055d3bb22eebe2c02fa7b015e4a665..2f8b991f78cb4cade90dc05f502a647a955fb582 100644 (file)
@@ -360,7 +360,8 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
                              char pattern_seed)
 {
        void *addr, *src_addr, *dest_addr, *dest_preamble_addr;
-       unsigned long long i;
+       int d;
+       unsigned long long t;
        struct timespec t_start = {0, 0}, t_end = {0, 0};
        long long  start_ns, end_ns, align_mask, ret, offset;
        unsigned long long threshold;
@@ -378,8 +379,8 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
 
        /* Set byte pattern for source block. */
        srand(pattern_seed);
-       for (i = 0; i < threshold; i++)
-               memset((char *) src_addr + i, (char) rand(), 1);
+       for (t = 0; t < threshold; t++)
+               memset((char *) src_addr + t, (char) rand(), 1);
 
        /* Mask to zero out lower bits of address for alignment */
        align_mask = ~(c.dest_alignment - 1);
@@ -420,8 +421,8 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
 
                /* Set byte pattern for the dest preamble block. */
                srand(pattern_seed);
-               for (i = 0; i < c.dest_preamble_size; i++)
-                       memset((char *) dest_preamble_addr + i, (char) rand(), 1);
+               for (d = 0; d < c.dest_preamble_size; d++)
+                       memset((char *) dest_preamble_addr + d, (char) rand(), 1);
        }
 
        clock_gettime(CLOCK_MONOTONIC, &t_start);
@@ -437,14 +438,14 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
 
        /* Verify byte pattern after remapping */
        srand(pattern_seed);
-       for (i = 0; i < threshold; i++) {
+       for (t = 0; t < threshold; t++) {
                char c = (char) rand();
 
-               if (((char *) dest_addr)[i] != c) {
+               if (((char *) dest_addr)[t] != c) {
                        ksft_print_msg("Data after remap doesn't match at offset %llu\n",
-                                      i);
+                                      t);
                        ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
-                                       ((char *) dest_addr)[i] & 0xff);
+                                       ((char *) dest_addr)[t] & 0xff);
                        ret = -1;
                        goto clean_up_dest;
                }
@@ -453,14 +454,14 @@ static long long remap_region(struct config c, unsigned int threshold_mb,
        /* Verify the dest preamble byte pattern after remapping */
        if (c.dest_preamble_size) {
                srand(pattern_seed);
-               for (i = 0; i < c.dest_preamble_size; i++) {
+               for (d = 0; d < c.dest_preamble_size; d++) {
                        char c = (char) rand();
 
-                       if (((char *) dest_preamble_addr)[i] != c) {
+                       if (((char *) dest_preamble_addr)[d] != c) {
                                ksft_print_msg("Preamble data after remap doesn't match at offset %d\n",
-                                              i);
+                                              d);
                                ksft_print_msg("Expected: %#x\t Got: %#x\n", c & 0xff,
-                                              ((char *) dest_preamble_addr)[i] & 0xff);
+                                              ((char *) dest_preamble_addr)[d] & 0xff);
                                ret = -1;
                                goto clean_up_dest;
                        }
index 45cae7cab27e12705c59cc56f6fdf5e675805f92..a0a75f3029043727b96bdb59728ed80d4d5cd9c0 100755 (executable)
@@ -29,9 +29,15 @@ check_supported_x86_64()
        # See man 1 gzip under '-f'.
        local pg_table_levels=$(gzip -dcfq "${config}" | grep PGTABLE_LEVELS | cut -d'=' -f 2)
 
+       local cpu_supports_pl5=$(awk '/^flags/ {if (/la57/) {print 0;}
+               else {print 1}; exit}' /proc/cpuinfo 2>/dev/null)
+
        if [[ "${pg_table_levels}" -lt 5 ]]; then
                echo "$0: PGTABLE_LEVELS=${pg_table_levels}, must be >= 5 to run this test"
                exit $ksft_skip
+       elif [[ "${cpu_supports_pl5}" -ne 0 ]]; then
+               echo "$0: CPU does not have the necessary la57 flag to support page table level 5"
+               exit $ksft_skip
        fi
 }
 
index 70a02301f4c276ba6313c3baa1ab3b5058a68b0c..3d2d2eb9d6fff077cca24fd82a2a4990c34706d1 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
 set -e
index 8da562a9ae87e445a7e3003c4e07f589e3f85f0d..19ff7505166096483d79709676c03eb8a9135fc5 100644 (file)
@@ -1,5 +1,6 @@
 CONFIG_USER_NS=y
 CONFIG_NET_NS=y
+CONFIG_BONDING=m
 CONFIG_BPF_SYSCALL=y
 CONFIG_TEST_BPF=m
 CONFIG_NUMA=y
@@ -14,9 +15,13 @@ CONFIG_VETH=y
 CONFIG_NET_IPVTI=y
 CONFIG_IPV6_VTI=y
 CONFIG_DUMMY=y
+CONFIG_BRIDGE_VLAN_FILTERING=y
 CONFIG_BRIDGE=y
+CONFIG_CRYPTO_CHACHA20POLY1305=m
 CONFIG_VLAN_8021Q=y
 CONFIG_IFB=y
+CONFIG_INET_DIAG=y
+CONFIG_IP_GRE=m
 CONFIG_NETFILTER=y
 CONFIG_NETFILTER_ADVANCED=y
 CONFIG_NF_CONNTRACK=m
@@ -25,15 +30,36 @@ CONFIG_IP6_NF_IPTABLES=m
 CONFIG_IP_NF_IPTABLES=m
 CONFIG_IP6_NF_NAT=m
 CONFIG_IP_NF_NAT=m
+CONFIG_IPV6_GRE=m
+CONFIG_IPV6_SEG6_LWTUNNEL=y
+CONFIG_L2TP_ETH=m
+CONFIG_L2TP_IP=m
+CONFIG_L2TP=m
+CONFIG_L2TP_V3=y
+CONFIG_MACSEC=m
+CONFIG_MACVLAN=y
+CONFIG_MACVTAP=y
+CONFIG_MPLS=y
+CONFIG_MPTCP=y
 CONFIG_NF_TABLES=m
 CONFIG_NF_TABLES_IPV6=y
 CONFIG_NF_TABLES_IPV4=y
 CONFIG_NFT_NAT=m
+CONFIG_NET_ACT_GACT=m
+CONFIG_NET_CLS_BASIC=m
+CONFIG_NET_CLS_U32=m
+CONFIG_NET_IPGRE_DEMUX=m
+CONFIG_NET_IPGRE=m
+CONFIG_NET_SCH_FQ_CODEL=m
+CONFIG_NET_SCH_HTB=m
 CONFIG_NET_SCH_FQ=m
 CONFIG_NET_SCH_ETF=m
 CONFIG_NET_SCH_NETEM=y
+CONFIG_PSAMPLE=m
+CONFIG_TCP_MD5SIG=y
 CONFIG_TEST_BLACKHOLE_DEV=m
 CONFIG_KALLSYMS=y
+CONFIG_TLS=m
 CONFIG_TRACEPOINTS=y
 CONFIG_NET_DROP_MONITOR=m
 CONFIG_NETDEVSIM=m
@@ -48,7 +74,9 @@ CONFIG_BAREUDP=m
 CONFIG_IPV6_IOAM6_LWTUNNEL=y
 CONFIG_CRYPTO_SM4_GENERIC=y
 CONFIG_AMT=m
+CONFIG_TUN=y
 CONFIG_VXLAN=m
 CONFIG_IP_SCTP=m
 CONFIG_NETFILTER_XT_MATCH_POLICY=m
 CONFIG_CRYPTO_ARIA=y
+CONFIG_XFRM_INTERFACE=m
index a26c5624429fb1a029d1d472921154e73a7ea86b..4287a85298907969dbd7df7da0e1969494f7857e 100755 (executable)
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
 # SPDX-License-Identifier: GPL-2.0
 
 readonly ksft_skip=4
@@ -33,6 +33,10 @@ chk_rps() {
 
        rps_mask=$($cmd /sys/class/net/$dev_name/queues/rx-0/rps_cpus)
        printf "%-60s" "$msg"
+
+       # In case there is more than 32 CPUs we need to remove commas from masks
+       rps_mask=${rps_mask//,}
+       expected_rps_mask=${expected_rps_mask//,}
        if [ $rps_mask -eq $expected_rps_mask ]; then
                echo "[ ok ]"
        else
index a148181641026e18e7d9c138ab7b6dde89cc90fd..e9fa14e1073226829e883d7c6621ae9e9a2ce173 100644 (file)
@@ -3,19 +3,16 @@
 #define _GNU_SOURCE
 #include <sched.h>
 
+#include <fcntl.h>
+
 #include <netinet/in.h>
 #include <sys/socket.h>
 #include <sys/sysinfo.h>
 
 #include "../kselftest_harness.h"
 
-#define CLIENT_PER_SERVER      32 /* More sockets, more reliable */
-#define NR_SERVER              self->nproc
-#define NR_CLIENT              (CLIENT_PER_SERVER * NR_SERVER)
-
 FIXTURE(so_incoming_cpu)
 {
-       int nproc;
        int *servers;
        union {
                struct sockaddr addr;
@@ -56,12 +53,47 @@ FIXTURE_VARIANT_ADD(so_incoming_cpu, after_all_listen)
        .when_to_set = AFTER_ALL_LISTEN,
 };
 
+static void write_sysctl(struct __test_metadata *_metadata,
+                        char *filename, char *string)
+{
+       int fd, len, ret;
+
+       fd = open(filename, O_WRONLY);
+       ASSERT_NE(fd, -1);
+
+       len = strlen(string);
+       ret = write(fd, string, len);
+       ASSERT_EQ(ret, len);
+}
+
+static void setup_netns(struct __test_metadata *_metadata)
+{
+       ASSERT_EQ(unshare(CLONE_NEWNET), 0);
+       ASSERT_EQ(system("ip link set lo up"), 0);
+
+       write_sysctl(_metadata, "/proc/sys/net/ipv4/ip_local_port_range", "10000 60001");
+       write_sysctl(_metadata, "/proc/sys/net/ipv4/tcp_tw_reuse", "0");
+}
+
+#define NR_PORT                                (60001 - 10000 - 1)
+#define NR_CLIENT_PER_SERVER_DEFAULT   32
+static int nr_client_per_server, nr_server, nr_client;
+
 FIXTURE_SETUP(so_incoming_cpu)
 {
-       self->nproc = get_nprocs();
-       ASSERT_LE(2, self->nproc);
+       setup_netns(_metadata);
+
+       nr_server = get_nprocs();
+       ASSERT_LE(2, nr_server);
+
+       if (NR_CLIENT_PER_SERVER_DEFAULT * nr_server < NR_PORT)
+               nr_client_per_server = NR_CLIENT_PER_SERVER_DEFAULT;
+       else
+               nr_client_per_server = NR_PORT / nr_server;
+
+       nr_client = nr_client_per_server * nr_server;
 
-       self->servers = malloc(sizeof(int) * NR_SERVER);
+       self->servers = malloc(sizeof(int) * nr_server);
        ASSERT_NE(self->servers, NULL);
 
        self->in_addr.sin_family = AF_INET;
@@ -74,7 +106,7 @@ FIXTURE_TEARDOWN(so_incoming_cpu)
 {
        int i;
 
-       for (i = 0; i < NR_SERVER; i++)
+       for (i = 0; i < nr_server; i++)
                close(self->servers[i]);
 
        free(self->servers);
@@ -110,10 +142,10 @@ int create_server(struct __test_metadata *_metadata,
        if (variant->when_to_set == BEFORE_LISTEN)
                set_so_incoming_cpu(_metadata, fd, cpu);
 
-       /* We don't use CLIENT_PER_SERVER here not to block
+       /* We don't use nr_client_per_server here not to block
         * this test at connect() if SO_INCOMING_CPU is broken.
         */
-       ret = listen(fd, NR_CLIENT);
+       ret = listen(fd, nr_client);
        ASSERT_EQ(ret, 0);
 
        if (variant->when_to_set == AFTER_LISTEN)
@@ -128,7 +160,7 @@ void create_servers(struct __test_metadata *_metadata,
 {
        int i, ret;
 
-       for (i = 0; i < NR_SERVER; i++) {
+       for (i = 0; i < nr_server; i++) {
                self->servers[i] = create_server(_metadata, self, variant, i);
 
                if (i == 0) {
@@ -138,7 +170,7 @@ void create_servers(struct __test_metadata *_metadata,
        }
 
        if (variant->when_to_set == AFTER_ALL_LISTEN) {
-               for (i = 0; i < NR_SERVER; i++)
+               for (i = 0; i < nr_server; i++)
                        set_so_incoming_cpu(_metadata, self->servers[i], i);
        }
 }
@@ -149,7 +181,7 @@ void create_clients(struct __test_metadata *_metadata,
        cpu_set_t cpu_set;
        int i, j, fd, ret;
 
-       for (i = 0; i < NR_SERVER; i++) {
+       for (i = 0; i < nr_server; i++) {
                CPU_ZERO(&cpu_set);
 
                CPU_SET(i, &cpu_set);
@@ -162,7 +194,7 @@ void create_clients(struct __test_metadata *_metadata,
                ret = sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
                ASSERT_EQ(ret, 0);
 
-               for (j = 0; j < CLIENT_PER_SERVER; j++) {
+               for (j = 0; j < nr_client_per_server; j++) {
                        fd  = socket(AF_INET, SOCK_STREAM, 0);
                        ASSERT_NE(fd, -1);
 
@@ -180,8 +212,8 @@ void verify_incoming_cpu(struct __test_metadata *_metadata,
        int i, j, fd, cpu, ret, total = 0;
        socklen_t len = sizeof(int);
 
-       for (i = 0; i < NR_SERVER; i++) {
-               for (j = 0; j < CLIENT_PER_SERVER; j++) {
+       for (i = 0; i < nr_server; i++) {
+               for (j = 0; j < nr_client_per_server; j++) {
                        /* If we see -EAGAIN here, SO_INCOMING_CPU is broken */
                        fd = accept(self->servers[i], &self->addr, &self->addrlen);
                        ASSERT_NE(fd, -1);
@@ -195,7 +227,7 @@ void verify_incoming_cpu(struct __test_metadata *_metadata,
                }
        }
 
-       ASSERT_EQ(total, NR_CLIENT);
+       ASSERT_EQ(total, nr_client);
        TH_LOG("SO_INCOMING_CPU is very likely to be "
               "working correctly with %d sockets.", total);
 }
index 88754296196870a5d0ef3afb52373c8d40cbc598..2348d2c20d0a1aaf3a05a1c7005983f442708b3c 100644 (file)
@@ -24,6 +24,11 @@ bool rseq_validate_cpu_id(void)
 {
        return rseq_mm_cid_available();
 }
+static
+bool rseq_use_cpu_index(void)
+{
+       return false;   /* Use mm_cid */
+}
 #else
 # define RSEQ_PERCPU   RSEQ_PERCPU_CPU_ID
 static
@@ -36,6 +41,11 @@ bool rseq_validate_cpu_id(void)
 {
        return rseq_current_cpu_raw() >= 0;
 }
+static
+bool rseq_use_cpu_index(void)
+{
+       return true;    /* Use cpu_id as index. */
+}
 #endif
 
 struct percpu_lock_entry {
@@ -274,7 +284,7 @@ void test_percpu_list(void)
        /* Generate list entries for every usable cpu. */
        sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
        for (i = 0; i < CPU_SETSIZE; i++) {
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
                for (j = 1; j <= 100; j++) {
                        struct percpu_list_node *node;
@@ -299,7 +309,7 @@ void test_percpu_list(void)
        for (i = 0; i < CPU_SETSIZE; i++) {
                struct percpu_list_node *node;
 
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
 
                while ((node = __percpu_list_pop(&list, i))) {
index 20403d58345cd523186b9423750ea7ad669cdd96..2f37961240caa7cc43f142fac32fd7f9c9c211d4 100644 (file)
@@ -288,6 +288,11 @@ bool rseq_validate_cpu_id(void)
 {
        return rseq_mm_cid_available();
 }
+static
+bool rseq_use_cpu_index(void)
+{
+       return false;   /* Use mm_cid */
+}
 # ifdef TEST_MEMBARRIER
 /*
  * Membarrier does not currently support targeting a mm_cid, so
@@ -312,6 +317,11 @@ bool rseq_validate_cpu_id(void)
 {
        return rseq_current_cpu_raw() >= 0;
 }
+static
+bool rseq_use_cpu_index(void)
+{
+       return true;    /* Use cpu_id as index. */
+}
 # ifdef TEST_MEMBARRIER
 static
 int rseq_membarrier_expedited(int cpu)
@@ -715,7 +725,7 @@ void test_percpu_list(void)
        /* Generate list entries for every usable cpu. */
        sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
        for (i = 0; i < CPU_SETSIZE; i++) {
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
                for (j = 1; j <= 100; j++) {
                        struct percpu_list_node *node;
@@ -752,7 +762,7 @@ void test_percpu_list(void)
        for (i = 0; i < CPU_SETSIZE; i++) {
                struct percpu_list_node *node;
 
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
 
                while ((node = __percpu_list_pop(&list, i))) {
@@ -902,7 +912,7 @@ void test_percpu_buffer(void)
        /* Generate list entries for every usable cpu. */
        sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
        for (i = 0; i < CPU_SETSIZE; i++) {
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
                /* Worse-case is every item in same CPU. */
                buffer.c[i].array =
@@ -952,7 +962,7 @@ void test_percpu_buffer(void)
        for (i = 0; i < CPU_SETSIZE; i++) {
                struct percpu_buffer_node *node;
 
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
 
                while ((node = __percpu_buffer_pop(&buffer, i))) {
@@ -1113,7 +1123,7 @@ void test_percpu_memcpy_buffer(void)
        /* Generate list entries for every usable cpu. */
        sched_getaffinity(0, sizeof(allowed_cpus), &allowed_cpus);
        for (i = 0; i < CPU_SETSIZE; i++) {
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
                /* Worse-case is every item in same CPU. */
                buffer.c[i].array =
@@ -1160,7 +1170,7 @@ void test_percpu_memcpy_buffer(void)
        for (i = 0; i < CPU_SETSIZE; i++) {
                struct percpu_memcpy_buffer_node item;
 
-               if (!CPU_ISSET(i, &allowed_cpus))
+               if (rseq_use_cpu_index() && !CPU_ISSET(i, &allowed_cpus))
                        continue;
 
                while (__percpu_memcpy_buffer_pop(&buffer, &item, i)) {
index 5b5c9d558dee07bc1f7afd7df280e1189858451e..97b86980b768f4fa09da58f16d71ba42f42d2c8d 100644 (file)
@@ -38,10 +38,10 @@ unsigned long long timing(clockid_t clk_id, unsigned long long samples)
        i *= 1000000000ULL;
        i += finish.tv_nsec - start.tv_nsec;
 
-       printf("%lu.%09lu - %lu.%09lu = %llu (%.1fs)\n",
-               finish.tv_sec, finish.tv_nsec,
-               start.tv_sec, start.tv_nsec,
-               i, (double)i / 1000000000.0);
+       ksft_print_msg("%lu.%09lu - %lu.%09lu = %llu (%.1fs)\n",
+                      finish.tv_sec, finish.tv_nsec,
+                      start.tv_sec, start.tv_nsec,
+                      i, (double)i / 1000000000.0);
 
        return i;
 }
@@ -53,7 +53,7 @@ unsigned long long calibrate(void)
        pid_t pid, ret;
        int seconds = 15;
 
-       printf("Calibrating sample size for %d seconds worth of syscalls ...\n", seconds);
+       ksft_print_msg("Calibrating sample size for %d seconds worth of syscalls ...\n", seconds);
 
        samples = 0;
        pid = getpid();
@@ -98,24 +98,36 @@ bool le(int i_one, int i_two)
 }
 
 long compare(const char *name_one, const char *name_eval, const char *name_two,
-            unsigned long long one, bool (*eval)(int, int), unsigned long long two)
+            unsigned long long one, bool (*eval)(int, int), unsigned long long two,
+            bool skip)
 {
        bool good;
 
-       printf("\t%s %s %s (%lld %s %lld): ", name_one, name_eval, name_two,
-              (long long)one, name_eval, (long long)two);
+       if (skip) {
+               ksft_test_result_skip("%s %s %s\n", name_one, name_eval,
+                                     name_two);
+               return 0;
+       }
+
+       ksft_print_msg("\t%s %s %s (%lld %s %lld): ", name_one, name_eval, name_two,
+                      (long long)one, name_eval, (long long)two);
        if (one > INT_MAX) {
-               printf("Miscalculation! Measurement went negative: %lld\n", (long long)one);
-               return 1;
+               ksft_print_msg("Miscalculation! Measurement went negative: %lld\n", (long long)one);
+               good = false;
+               goto out;
        }
        if (two > INT_MAX) {
-               printf("Miscalculation! Measurement went negative: %lld\n", (long long)two);
-               return 1;
+               ksft_print_msg("Miscalculation! Measurement went negative: %lld\n", (long long)two);
+               good = false;
+               goto out;
        }
 
        good = eval(one, two);
        printf("%s\n", good ? "✔️" : "❌");
 
+out:
+       ksft_test_result(good, "%s %s %s\n", name_one, name_eval, name_two);
+
        return good ? 0 : 1;
 }
 
@@ -142,15 +154,22 @@ int main(int argc, char *argv[])
        unsigned long long samples, calc;
        unsigned long long native, filter1, filter2, bitmap1, bitmap2;
        unsigned long long entry, per_filter1, per_filter2;
+       bool skip = false;
 
        setbuf(stdout, NULL);
 
-       printf("Running on:\n");
+       ksft_print_header();
+       ksft_set_plan(7);
+
+       ksft_print_msg("Running on:\n");
+       ksft_print_msg("");
        system("uname -a");
 
-       printf("Current BPF sysctl settings:\n");
+       ksft_print_msg("Current BPF sysctl settings:\n");
        /* Avoid using "sysctl" which may not be installed. */
+       ksft_print_msg("");
        system("grep -H . /proc/sys/net/core/bpf_jit_enable");
+       ksft_print_msg("");
        system("grep -H . /proc/sys/net/core/bpf_jit_harden");
 
        if (argc > 1)
@@ -158,11 +177,11 @@ int main(int argc, char *argv[])
        else
                samples = calibrate();
 
-       printf("Benchmarking %llu syscalls...\n", samples);
+       ksft_print_msg("Benchmarking %llu syscalls...\n", samples);
 
        /* Native call */
        native = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
-       printf("getpid native: %llu ns\n", native);
+       ksft_print_msg("getpid native: %llu ns\n", native);
 
        ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0);
        assert(ret == 0);
@@ -172,35 +191,37 @@ int main(int argc, char *argv[])
        assert(ret == 0);
 
        bitmap1 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
-       printf("getpid RET_ALLOW 1 filter (bitmap): %llu ns\n", bitmap1);
+       ksft_print_msg("getpid RET_ALLOW 1 filter (bitmap): %llu ns\n", bitmap1);
 
        /* Second filter resulting in a bitmap */
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &bitmap_prog);
        assert(ret == 0);
 
        bitmap2 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
-       printf("getpid RET_ALLOW 2 filters (bitmap): %llu ns\n", bitmap2);
+       ksft_print_msg("getpid RET_ALLOW 2 filters (bitmap): %llu ns\n", bitmap2);
 
        /* Third filter, can no longer be converted to bitmap */
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog);
        assert(ret == 0);
 
        filter1 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
-       printf("getpid RET_ALLOW 3 filters (full): %llu ns\n", filter1);
+       ksft_print_msg("getpid RET_ALLOW 3 filters (full): %llu ns\n", filter1);
 
        /* Fourth filter, can not be converted to bitmap because of filter 3 */
        ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &bitmap_prog);
        assert(ret == 0);
 
        filter2 = timing(CLOCK_PROCESS_CPUTIME_ID, samples) / samples;
-       printf("getpid RET_ALLOW 4 filters (full): %llu ns\n", filter2);
+       ksft_print_msg("getpid RET_ALLOW 4 filters (full): %llu ns\n", filter2);
 
        /* Estimations */
 #define ESTIMATE(fmt, var, what)       do {                    \
                var = (what);                                   \
-               printf("Estimated " fmt ": %llu ns\n", var);    \
-               if (var > INT_MAX)                              \
-                       goto more_samples;                      \
+               ksft_print_msg("Estimated " fmt ": %llu ns\n", var);    \
+               if (var > INT_MAX) {                            \
+                       skip = true;                            \
+                       ret |= 1;                               \
+               }                                               \
        } while (0)
 
        ESTIMATE("total seccomp overhead for 1 bitmapped filter", calc,
@@ -218,31 +239,34 @@ int main(int argc, char *argv[])
        ESTIMATE("seccomp per-filter overhead (filters / 4)", per_filter2,
                 (filter2 - native - entry) / 4);
 
-       printf("Expectations:\n");
-       ret |= compare("native", "≤", "1 bitmap", native, le, bitmap1);
-       bits = compare("native", "≤", "1 filter", native, le, filter1);
+       ksft_print_msg("Expectations:\n");
+       ret |= compare("native", "≤", "1 bitmap", native, le, bitmap1,
+                      skip);
+       bits = compare("native", "≤", "1 filter", native, le, filter1,
+                      skip);
        if (bits)
-               goto more_samples;
+               skip = true;
 
        ret |= compare("per-filter (last 2 diff)", "≈", "per-filter (filters / 4)",
-                       per_filter1, approx, per_filter2);
+                      per_filter1, approx, per_filter2, skip);
 
        bits = compare("1 bitmapped", "≈", "2 bitmapped",
-                       bitmap1 - native, approx, bitmap2 - native);
+                      bitmap1 - native, approx, bitmap2 - native, skip);
        if (bits) {
-               printf("Skipping constant action bitmap expectations: they appear unsupported.\n");
-               goto out;
+               ksft_print_msg("Skipping constant action bitmap expectations: they appear unsupported.\n");
+               skip = true;
        }
 
-       ret |= compare("entry", "≈", "1 bitmapped", entry, approx, bitmap1 - native);
-       ret |= compare("entry", "≈", "2 bitmapped", entry, approx, bitmap2 - native);
+       ret |= compare("entry", "≈", "1 bitmapped", entry, approx,
+                      bitmap1 - native, skip);
+       ret |= compare("entry", "≈", "2 bitmapped", entry, approx,
+                      bitmap2 - native, skip);
        ret |= compare("native + entry + (per filter * 4)", "≈", "4 filters total",
-                       entry + (per_filter1 * 4) + native, approx, filter2);
-       if (ret == 0)
-               goto out;
+                      entry + (per_filter1 * 4) + native, approx, filter2,
+                      skip);
 
-more_samples:
-       printf("Saw unexpected benchmark result. Try running again with more samples?\n");
-out:
-       return 0;
+       if (ret)
+               ksft_print_msg("Saw unexpected benchmark result. Try running again with more samples?\n");
+
+       ksft_finished();
 }