linux-next
authorAndrew Morton <akpm@linux-foundation.org>
Thu, 31 Dec 2020 22:04:58 +0000 (22:04 +0000)
committerJohannes Weiner <hannes@cmpxchg.org>
Thu, 31 Dec 2020 22:04:58 +0000 (22:04 +0000)
GIT e775f22d9be6cc53a0b81a0b15bde19d463bbccc

Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
428 files changed:
Documentation/dev-tools/kasan.rst
Documentation/devicetree/bindings/dma/ti/k3-bcdma.yaml
Documentation/devicetree/bindings/dma/ti/k3-pktdma.yaml
Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
Documentation/devicetree/bindings/regulator/nxp,pf8x00-regulator.yaml
Documentation/devicetree/bindings/reset/brcm,bcm4908-misc-pcie-reset.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt [deleted file]
Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.yaml [new file with mode: 0644]
Documentation/devicetree/bindings/sound/ti,j721e-cpb-audio.yaml
Documentation/devicetree/bindings/sound/ti,j721e-cpb-ivi-audio.yaml
Documentation/security/keys/core.rst
Documentation/watch_queue.rst
MAINTAINERS
arch/alpha/kernel/syscalls/syscall.tbl
arch/arc/Makefile
arch/arc/boot/Makefile
arch/arm/boot/dts/am335x-evm.dts
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/am335x-icev2.dts
arch/arm/boot/dts/am33xx-l4.dtsi
arch/arm/boot/dts/am574x-idk.dts
arch/arm/boot/dts/bcm2711.dtsi
arch/arm/boot/dts/dra71-evm.dts
arch/arm/boot/dts/dra76x.dtsi
arch/arm/boot/dts/omap3-gta04.dtsi
arch/arm/boot/dts/omap3-n950-n9.dtsi
arch/arm/boot/dts/ste-ux500-samsung-golden.dts
arch/arm/configs/aspeed_g5_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/mach-omap2/omap_device.c
arch/arm/mach-omap2/pmic-cpcap.c
arch/arm/tools/syscall.tbl
arch/arm64/boot/dts/bitmain/bm1880.dtsi
arch/arm64/include/asm/kvm_host.h
arch/arm64/include/asm/unistd.h
arch/arm64/include/asm/unistd32.h
arch/arm64/kernel/smp.c
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/include/hyp/adjust_pc.h
arch/arm64/kvm/hyp/nvhe/hyp-main.c
arch/arm64/kvm/hyp/nvhe/hyp-smp.c
arch/arm64/kvm/hyp/nvhe/psci-relay.c
arch/arm64/kvm/sys_regs.c
arch/arm64/kvm/va_layout.c
arch/csky/Kconfig
arch/csky/abiv1/inc/abi/ckmmu.h
arch/csky/abiv2/inc/abi/ckmmu.h
arch/csky/abiv2/inc/abi/entry.h
arch/csky/include/asm/atomic.h [deleted file]
arch/csky/include/asm/barrier.h
arch/csky/include/asm/cmpxchg.h
arch/csky/include/asm/futex.h [new file with mode: 0644]
arch/csky/include/asm/memory.h
arch/csky/include/asm/mmu_context.h
arch/csky/include/asm/page.h
arch/csky/include/asm/pgalloc.h
arch/csky/include/asm/pgtable.h
arch/csky/include/asm/processor.h
arch/csky/include/asm/segment.h
arch/csky/include/asm/spinlock.h
arch/csky/include/asm/spinlock_types.h
arch/csky/kernel/atomic.S
arch/csky/kernel/entry.S
arch/csky/kernel/head.S
arch/csky/kernel/perf_event.c
arch/csky/kernel/ptrace.c
arch/csky/kernel/setup.c
arch/csky/kernel/smp.c
arch/csky/kernel/vmlinux.lds.S
arch/csky/mm/fault.c
arch/csky/mm/init.c
arch/h8300/Kconfig.cpu
arch/h8300/boot/dts/edosk2674.dts
arch/h8300/boot/dts/h8300h_sim.dts
arch/h8300/boot/dts/h8s_sim.dts
arch/h8300/configs/edosk2674_defconfig
arch/h8300/configs/h8300h-sim_defconfig
arch/h8300/configs/h8s-sim_defconfig
arch/h8300/kernel/setup.c
arch/h8300/lib/memset.S
arch/ia64/kernel/syscalls/syscall.tbl
arch/m68k/kernel/syscalls/syscall.tbl
arch/microblaze/kernel/syscalls/syscall.tbl
arch/mips/kernel/syscalls/syscall_n32.tbl
arch/mips/kernel/syscalls/syscall_n64.tbl
arch/mips/kernel/syscalls/syscall_o32.tbl
arch/nds32/configs/defconfig
arch/nds32/kernel/setup.c
arch/nds32/kernel/time.c
arch/nds32/kernel/traps.c
arch/parisc/kernel/syscalls/syscall.tbl
arch/powerpc/kernel/head_book3s_32.S
arch/powerpc/kernel/syscalls/syscall.tbl
arch/s390/kernel/syscalls/syscall.tbl
arch/sh/kernel/syscalls/syscall.tbl
arch/sparc/Kconfig
arch/sparc/boot/piggyback.c
arch/sparc/include/asm/backoff.h
arch/sparc/include/asm/elf_64.h
arch/sparc/include/asm/extable.h [moved from arch/sparc/include/asm/extable_64.h with 92% similarity]
arch/sparc/include/asm/pgtsrmmu.h
arch/sparc/include/asm/processor_32.h
arch/sparc/include/asm/thread_info_64.h
arch/sparc/include/asm/uaccess.h
arch/sparc/include/asm/uaccess_32.h
arch/sparc/include/asm/uaccess_64.h
arch/sparc/kernel/head_32.S
arch/sparc/kernel/head_64.S
arch/sparc/kernel/pci.c
arch/sparc/kernel/process_32.c
arch/sparc/kernel/setup_32.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/syscalls/syscall.tbl
arch/sparc/kernel/unaligned_32.c
arch/sparc/kernel/viohs.c
arch/sparc/lib/checksum_32.S
arch/sparc/lib/copy_user.S
arch/sparc/lib/memset.S
arch/sparc/mm/Makefile
arch/sparc/mm/extable.c [deleted file]
arch/sparc/mm/fault_32.c
arch/sparc/mm/mm_32.h
arch/sparc/mm/srmmu.c
arch/x86/entry/syscalls/syscall_32.tbl
arch/x86/entry/syscalls/syscall_64.tbl
arch/xtensa/Kconfig
arch/xtensa/kernel/syscalls/syscall.tbl
block/blk-core.c
block/blk-mq-debugfs.c
block/blk-mq.c
block/blk-pm.c
block/blk-pm.h
certs/blacklist.c
certs/system_keyring.c
crypto/asymmetric_keys/asymmetric_type.c
crypto/asymmetric_keys/pkcs7_parser.h
crypto/asymmetric_keys/pkcs7_trust.c
crypto/asymmetric_keys/pkcs7_verify.c
drivers/atm/idt77252.c
drivers/base/platform.c
drivers/bluetooth/btintel.c
drivers/bluetooth/btmtksdio.c
drivers/bluetooth/btqca.c
drivers/bluetooth/btqca.h
drivers/bluetooth/btqcomsmd.c
drivers/bluetooth/btrtl.c
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_bcm.c
drivers/bluetooth/hci_ldisc.c
drivers/bluetooth/hci_serdev.c
drivers/clocksource/h8300_timer8.c
drivers/dma/dw-edma/dw-edma-core.c
drivers/dma/idxd/sysfs.c
drivers/dma/mediatek/mtk-hsdma.c
drivers/dma/milbeaut-xdmac.c
drivers/dma/qcom/bam_dma.c
drivers/dma/qcom/gpi.c
drivers/dma/ti/k3-udma.c
drivers/firewire/net.c
drivers/firmware/dmi-id.c
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
drivers/gpu/drm/amd/amdgpu/nv.c
drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
drivers/gpu/drm/imx/Kconfig
drivers/gpu/drm/imx/parallel-display.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/ipu-v3/ipu-di.c
drivers/hwtracing/coresight/coresight-cti-platform.c
drivers/ide/Kconfig
drivers/ide/aec62xx.c
drivers/ide/alim15x3.c
drivers/ide/amd74xx.c
drivers/ide/atiixp.c
drivers/ide/cmd64x.c
drivers/ide/cs5520.c
drivers/ide/cs5530.c
drivers/ide/cs5535.c
drivers/ide/cs5536.c
drivers/ide/cy82c693.c
drivers/ide/delkin_cb.c
drivers/ide/hpt366.c
drivers/ide/ide-acpi.c
drivers/ide/ide-atapi.c
drivers/ide/ide-io.c
drivers/ide/ide-pci-generic.c
drivers/ide/ide-pm.c
drivers/ide/it8172.c
drivers/ide/it8213.c
drivers/ide/it821x.c
drivers/ide/jmicron.c
drivers/ide/ns87415.c
drivers/ide/opti621.c
drivers/ide/pdc202xx_new.c
drivers/ide/pdc202xx_old.c
drivers/ide/piix.c
drivers/ide/sc1200.c
drivers/ide/serverworks.c
drivers/ide/setup-pci.c
drivers/ide/siimage.c
drivers/ide/sis5513.c
drivers/ide/sl82c105.c
drivers/ide/slc90e66.c
drivers/ide/triflex.c
drivers/ide/via82cxxx.c
drivers/input/keyboard/Kconfig
drivers/input/misc/da7280.c
drivers/input/touchscreen/raydium_i2c_ts.c
drivers/irqchip/irq-bcm2836.c
drivers/irqchip/irq-renesas-h8300h.c
drivers/irqchip/irq-renesas-h8s.c
drivers/irqchip/irq-sl28cpld.c
drivers/leds/leds-ariel.c
drivers/leds/leds-blinkm.c
drivers/leds/leds-lm3530.c
drivers/leds/leds-lm3533.c
drivers/leds/leds-lm355x.c
drivers/leds/leds-lm3642.c
drivers/leds/leds-max8997.c
drivers/leds/leds-netxbig.c
drivers/leds/leds-ss4200.c
drivers/leds/leds-wm831x-status.c
drivers/md/dm-crypt.c
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/freescale/ucc_geth.h
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/iavf/iavf_main.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.c
drivers/net/ethernet/marvell/mvpp2/mvpp2_prs.h
drivers/net/ethernet/pensando/ionic/ionic_txrx.c
drivers/net/ethernet/smsc/Kconfig
drivers/net/ethernet/smsc/smc91x.c
drivers/net/wireless/ath/ath11k/core.c
drivers/net/wireless/ath/ath11k/dp_rx.c
drivers/net/wireless/ath/ath11k/mac.c
drivers/net/wireless/ath/ath11k/pci.c
drivers/net/wireless/ath/ath11k/pci.h
drivers/net/wireless/ath/ath11k/peer.c
drivers/net/wireless/ath/ath11k/peer.h
drivers/net/wireless/ath/ath11k/qmi.c
drivers/net/wireless/ath/ath11k/qmi.h
drivers/net/wireless/ath/ath11k/wmi.c
drivers/net/wireless/mediatek/mt76/mt7915/init.c
drivers/net/wireless/mediatek/mt76/sdio.c
drivers/net/wireless/mediatek/mt76/usb.c
drivers/net/wireless/realtek/rtlwifi/core.c
drivers/regulator/bd718x7-regulator.c
drivers/regulator/pf8x00-regulator.c
drivers/reset/Kconfig
drivers/reset/hisilicon/reset-hi3660.c
drivers/reset/reset-simple.c
drivers/scsi/cxgbi/cxgb4i/Kconfig
drivers/scsi/mpt3sas/mpt3sas_base.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_transport_spi.c
drivers/scsi/ses.c
drivers/scsi/ufs/ufs-mediatek-trace.h
drivers/scsi/ufs/ufs-mediatek.c
drivers/scsi/ufs/ufs-mediatek.h
drivers/scsi/ufs/ufs.h
drivers/scsi/ufs/ufshcd-pci.c
drivers/scsi/ufs/ufshcd.c
drivers/scsi/ufs/ufshcd.h
drivers/spi/spi-geni-qcom.c
drivers/spi/spi-stm32.c
drivers/tty/serial/sh-sci.c
drivers/usb/Makefile
drivers/usb/cdns3/Kconfig
drivers/usb/cdns3/Makefile
drivers/usb/cdns3/cdns3-debug.h [moved from drivers/usb/cdns3/debug.h with 100% similarity]
drivers/usb/cdns3/cdns3-ep0.c [moved from drivers/usb/cdns3/ep0.c with 99% similarity]
drivers/usb/cdns3/cdns3-gadget.c [moved from drivers/usb/cdns3/gadget.c with 99% similarity]
drivers/usb/cdns3/cdns3-gadget.h [moved from drivers/usb/cdns3/gadget.h with 100% similarity]
drivers/usb/cdns3/cdns3-imx.c
drivers/usb/cdns3/cdns3-plat.c [new file with mode: 0644]
drivers/usb/cdns3/cdns3-trace.c [moved from drivers/usb/cdns3/trace.c with 89% similarity]
drivers/usb/cdns3/cdns3-trace.h [moved from drivers/usb/cdns3/trace.h with 99% similarity]
drivers/usb/cdns3/cdnsp-debug.h [new file with mode: 0644]
drivers/usb/cdns3/cdnsp-ep0.c [new file with mode: 0644]
drivers/usb/cdns3/cdnsp-gadget.c [new file with mode: 0644]
drivers/usb/cdns3/cdnsp-gadget.h [new file with mode: 0644]
drivers/usb/cdns3/cdnsp-mem.c [new file with mode: 0644]
drivers/usb/cdns3/cdnsp-pci.c [new file with mode: 0644]
drivers/usb/cdns3/cdnsp-ring.c [new file with mode: 0644]
drivers/usb/cdns3/cdnsp-trace.c [new file with mode: 0644]
drivers/usb/cdns3/cdnsp-trace.h [new file with mode: 0644]
drivers/usb/cdns3/core.c
drivers/usb/cdns3/core.h
drivers/usb/cdns3/drd.c
drivers/usb/cdns3/drd.h
drivers/usb/cdns3/gadget-export.h
drivers/usb/cdns3/host-export.h
drivers/usb/cdns3/host.c
drivers/usb/typec/ucsi/ucsi.c
fs/Kconfig
fs/Makefile
fs/btrfs/block-group.c
fs/btrfs/btrfs_inode.h
fs/btrfs/ctree.c
fs/btrfs/ctree.h
fs/btrfs/delayed-ref.h
fs/btrfs/dev-replace.c
fs/btrfs/discard.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/file-item.c
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/ordered-data.c
fs/btrfs/qgroup.c
fs/btrfs/reflink.c
fs/btrfs/send.c
fs/btrfs/space-info.c
fs/btrfs/space-info.h
fs/btrfs/super.c
fs/btrfs/tests/btrfs-tests.c
fs/btrfs/tests/inode-tests.c
fs/btrfs/transaction.c
fs/btrfs/volumes.c
fs/ext4/ext4_jbd2.c
fs/ext4/ext4_jbd2.h
fs/ext4/file.c
fs/ext4/inode.c
fs/ext4/ioctl.c
fs/ext4/namei.c
fs/ext4/resize.c
fs/ext4/super.c
fs/ext4/xattr.c
fs/jfs/jfs_filsys.h
fs/jfs/jfs_mount.c
fs/mount.h
fs/mount_notify.c [new file with mode: 0644]
fs/namespace.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4xdr.c
fs/nfsd/nfssvc.c
fs/nfsd/xdr4.h
include/crypto/public_key.h
include/keys/encrypted-type.h
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/compiler_attributes.h
include/linux/compiler_types.h
include/linux/cpuhotplug.h
include/linux/dcache.h
include/linux/ide.h
include/linux/key.h
include/linux/lsm_hook_defs.h
include/linux/lsm_hooks.h
include/linux/rcupdate.h
include/linux/security.h
include/linux/syscalls.h
include/linux/verification.h
include/linux/watch_queue.h
include/net/xdp_sock.h
include/net/xsk_buff_pool.h
include/trace/events/btrfs.h
include/uapi/asm-generic/unistd.h
include/uapi/linux/ppp-ioctl.h
include/uapi/linux/watch_queue.h
init/main.c
kernel/bpf/hashtab.c
kernel/bpf/syscall.c
kernel/dma/map_benchmark.c
kernel/irq/msi.c
kernel/rcu/tasks.h
kernel/sys_ni.c
lib/Kconfig.debug
lib/extable.c
lib/test_xarray.c
lib/xarray.c
mm/percpu.c
net/bluetooth/hci_conn.c
net/bluetooth/hci_request.c
net/bluetooth/mgmt.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv6/netfilter/ip6_tables.c
net/netfilter/ipset/ip_set_hash_gen.h
net/netfilter/nf_tables_api.c
net/packet/af_packet.c
net/sched/sch_taprio.c
net/sunrpc/svcsock.c
net/xdp/xsk.c
net/xdp/xsk_buff_pool.c
net/xdp/xsk_queue.h
net/xfrm/xfrm_input.c
samples/Kconfig
samples/watch_queue/watch_test.c
security/integrity/ima/ima_mok.c
security/keys/Kconfig
security/keys/big_key.c
security/keys/key.c
security/keys/keyctl.c
security/keys/keyctl_pkey.c
security/keys/keyring.c
security/keys/process_keys.c
security/security.c
sound/pci/hda/hda_intel.c
sound/soc/atmel/Kconfig
sound/soc/codecs/Kconfig
sound/soc/codecs/max98373-i2c.c
sound/soc/codecs/max98373-sdw.c
sound/soc/codecs/max98373.c
sound/soc/codecs/max98373.h
sound/soc/codecs/rt711.c
sound/soc/fsl/imx-hdmi.c
sound/soc/intel/boards/haswell.c
sound/soc/intel/skylake/cnl-sst.c
sound/soc/meson/axg-tdm-interface.c
sound/soc/meson/axg-tdmin.c
sound/soc/qcom/lpass-cpu.c
sound/soc/qcom/lpass-platform.c
sound/soc/sh/rcar/adg.c
sound/soc/soc-dapm.c
sound/soc/sof/Kconfig
tools/testing/selftests/Makefile
tools/testing/selftests/bpf/Makefile
tools/testing/selftests/bpf/xdpxceiver.c
tools/testing/selftests/vDSO/.gitignore
tools/testing/selftests/vDSO/vdso_test_correctness.c
tools/testing/selftests/wireguard/qemu/debug.config

index 0fc3fb1860c4a082f896dc343f8e244e02b12755..ec6fef1f252cab26f749174c6a8390fb45b0a7e2 100644 (file)
@@ -22,7 +22,11 @@ out-of-bounds accesses for global variables is only supported since Clang 11.
 Tag-based KASAN is only supported in Clang.
 
 Currently generic KASAN is supported for the x86_64, arm, arm64, xtensa, s390
+<<<<<<< HEAD
 and riscv architectures, and tag-based KASAN modes are supported only for arm64.
+=======
+and riscv architectures, and tag-based KASAN is supported only for arm64.
+>>>>>>> linux-next/akpm-base
 
 Usage
 -----
index b15f68c499cb2396a1a8626049ff82d5b6744a45..df29d59d13a8dc4671dcbe4144ea6180cc1991d3 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/dma/ti/k3-bcdma.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments K3 DMSS BCDMA Device Tree Bindings
 
 maintainers:
-  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+  - Peter Ujfalusi <peter.ujfalusi@gmail.com>
 
 description: |
   The Block Copy DMA (BCDMA) is intended to perform similar functions as the TR
index b13ab60cd740f52ffaddc37cbfc192fa10911822..ea19d12a9337e8e385050858bab4e1387eeaf613 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/dma/ti/k3-pktdma.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments K3 DMSS PKTDMA Device Tree Bindings
 
 maintainers:
-  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+  - Peter Ujfalusi <peter.ujfalusi@gmail.com>
 
 description: |
   The Packet DMA (PKTDMA) is intended to perform similar functions as the packet
index 9a87fd9041eba1efb7333d86c8ebf4a1fa92cf40..6a09bbf83d4629215b62fba92c4eeaa2520b1e9f 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2019 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/dma/ti/k3-udma.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments K3 NAVSS Unified DMA Device Tree Bindings
 
 maintainers:
-  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+  - Peter Ujfalusi <peter.ujfalusi@gmail.com>
 
 description: |
   The UDMA-P is intended to perform similar (but significantly upgraded)
index a6c259ce97850c6dc196b40d38f6a81dfa63bf9a..956156fe52a3eb6040a44d8a4d393cf25b857484 100644 (file)
@@ -19,7 +19,9 @@ description: |
 properties:
   compatible:
     enum:
-      - nxp,pf8x00
+      - nxp,pf8100
+      - nxp,pf8121a
+      - nxp,pf8200
 
   reg:
     maxItems: 1
@@ -118,7 +120,7 @@ examples:
         #size-cells = <0>;
 
         pmic@8 {
-            compatible = "nxp,pf8x00";
+            compatible = "nxp,pf8100";
             reg = <0x08>;
 
             regulators {
diff --git a/Documentation/devicetree/bindings/reset/brcm,bcm4908-misc-pcie-reset.yaml b/Documentation/devicetree/bindings/reset/brcm,bcm4908-misc-pcie-reset.yaml
new file mode 100644 (file)
index 0000000..88aebb3
--- /dev/null
@@ -0,0 +1,39 @@
+# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/brcm,bcm4908-misc-pcie-reset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Broadcom MISC block PCIe reset controller
+
+description: This document describes reset controller handling PCIe PERST#
+  signals. On BCM4908 it's a part of the MISC block.
+
+maintainers:
+  - Rafał Miłecki <rafal@milecki.pl>
+
+properties:
+  compatible:
+    const: brcm,bcm4908-misc-pcie-reset
+
+  reg:
+    maxItems: 1
+
+  "#reset-cells":
+    description: PCIe core id
+    const: 1
+
+required:
+  - compatible
+  - reg
+  - "#reset-cells"
+
+additionalProperties: false
+
+examples:
+  - |
+    reset-controller@ff802644 {
+        compatible = "brcm,bcm4908-misc-pcie-reset";
+        reg = <0xff802644 0x04>;
+        #reset-cells = <1>;
+    };
diff --git a/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt b/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.txt
deleted file mode 100644 (file)
index 2df4bdd..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-Hisilicon System Reset Controller
-======================================
-
-Please also refer to reset.txt in this directory for common reset
-controller binding usage.
-
-The reset controller registers are part of the system-ctl block on
-hi3660 and hi3670 SoCs.
-
-Required properties:
-- compatible: should be one of the following:
-                "hisilicon,hi3660-reset" for HI3660
-                "hisilicon,hi3670-reset", "hisilicon,hi3660-reset" for HI3670
-- hisi,rst-syscon: phandle of the reset's syscon.
-- #reset-cells : Specifies the number of cells needed to encode a
-  reset source.  The type shall be a <u32> and the value shall be 2.
-
-        Cell #1 : offset of the reset assert control
-                  register from the syscon register base
-                  offset + 4: deassert control register
-                  offset + 8: status control register
-        Cell #2 : bit position of the reset in the reset control register
-
-Example:
-       iomcu: iomcu@ffd7e000 {
-               compatible = "hisilicon,hi3660-iomcu", "syscon";
-               reg = <0x0 0xffd7e000 0x0 0x1000>;
-       };
-
-       iomcu_rst: iomcu_rst_controller {
-               compatible = "hisilicon,hi3660-reset";
-               hisi,rst-syscon = <&iomcu>;
-               #reset-cells = <2>;
-       };
-
-Specifying reset lines connected to IP modules
-==============================================
-example:
-
-        i2c0: i2c@..... {
-                ...
-               resets = <&iomcu_rst 0x20 3>; /* offset: 0x20; bit: 3 */
-                ...
-        };
diff --git a/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.yaml b/Documentation/devicetree/bindings/reset/hisilicon,hi3660-reset.yaml
new file mode 100644 (file)
index 0000000..9bf4095
--- /dev/null
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/reset/hisilicon,hi3660-reset.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Hisilicon System Reset Controller
+
+maintainers:
+  - Wei Xu <xuwei5@hisilicon.com>
+
+description: |
+  Please also refer to reset.txt in this directory for common reset
+  controller binding usage.
+  The reset controller registers are part of the system-ctl block on
+  hi3660 and hi3670 SoCs.
+
+properties:
+  compatible:
+    oneOf:
+      - items:
+          - const: hisilicon,hi3660-reset
+      - items:
+          - const: hisilicon,hi3670-reset
+          - const: hisilicon,hi3660-reset
+
+  hisilicon,rst-syscon:
+    description: phandle of the reset's syscon.
+    $ref: /schemas/types.yaml#/definitions/phandle
+
+  '#reset-cells':
+    description: |
+      Specifies the number of cells needed to encode a reset source.
+      Cell #1 : offset of the reset assert control register from the syscon
+                register base
+                offset + 4: deassert control register
+                offset + 8: status control register
+      Cell #2 : bit position of the reset in the reset control register
+    const: 2
+
+required:
+  - compatible
+
+additionalProperties: false
+
+examples:
+  - |
+    #include <dt-bindings/interrupt-controller/irq.h>
+    #include <dt-bindings/interrupt-controller/arm-gic.h>
+    #include <dt-bindings/clock/hi3660-clock.h>
+
+    iomcu: iomcu@ffd7e000 {
+        compatible = "hisilicon,hi3660-iomcu", "syscon";
+        reg = <0xffd7e000 0x1000>;
+    };
+
+    iomcu_rst: iomcu_rst_controller {
+        compatible = "hisilicon,hi3660-reset";
+        hisilicon,rst-syscon = <&iomcu>;
+        #reset-cells = <2>;
+    };
+
+    /* Specifying reset lines connected to IP modules */
+    i2c@ffd71000 {
+        compatible = "snps,designware-i2c";
+        reg = <0xffd71000 0x1000>;
+        interrupts = <GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>;
+        #address-cells = <1>;
+        #size-cells = <0>;
+        clock-frequency = <400000>;
+        clocks = <&crg_ctrl HI3660_CLK_GATE_I2C0>;
+        resets = <&iomcu_rst 0x20 3>;
+        pinctrl-names = "default";
+        pinctrl-0 = <&i2c0_pmx_func &i2c0_cfg_func>;
+        status = "disabled";
+    };
+...
index 805da4d6a88ed53c2592e9c4cf0adf42f8320a15..ec06789b21dfc4ccd1b90cf96ba56374cd8848c0 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/sound/ti,j721e-cpb-audio.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments J721e Common Processor Board Audio Support
 
 maintainers:
-  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+  - Peter Ujfalusi <peter.ujfalusi@gmail.com>
 
 description: |
   The audio support on the board is using pcm3168a codec connected to McASP10
index bb780f621628788c8983d706283ddf1da8a7217a..ee9f960de36b7c9eb083680d589b518272a0522d 100644 (file)
@@ -1,4 +1,6 @@
 # SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+# Copyright (C) 2020 Texas Instruments Incorporated
+# Author: Peter Ujfalusi <peter.ujfalusi@ti.com>
 %YAML 1.2
 ---
 $id: http://devicetree.org/schemas/sound/ti,j721e-cpb-ivi-audio.yaml#
@@ -7,7 +9,7 @@ $schema: http://devicetree.org/meta-schemas/core.yaml#
 title: Texas Instruments J721e Common Processor Board Audio Support
 
 maintainers:
-  - Peter Ujfalusi <peter.ujfalusi@ti.com>
+  - Peter Ujfalusi <peter.ujfalusi@gmail.com>
 
 description: |
   The Infotainment board plugs into the Common Processor Board, the support of the
index aa0081685ee11fd7c5072e6fbd1a15d34a4e42a3..b3ed5c581034c74104ab82a32bb8a70c5c275658 100644 (file)
@@ -1040,8 +1040,8 @@ The keyctl syscall functions are:
 
      "key" is the ID of the key to be watched.
 
-     "queue_fd" is a file descriptor referring to an open "/dev/watch_queue"
-     which manages the buffer into which notifications will be delivered.
+     "queue_fd" is a file descriptor referring to an open pipe which
+     manages the buffer into which notifications will be delivered.
 
      "filter" is either NULL to remove a watch or a filter specification to
      indicate what events are required from the key.
index 54f13ad5fc1766248a74c6c685ad95be8f2ad230..85565e8a15dade916635095a63670d8ba00f6696 100644 (file)
@@ -8,6 +8,7 @@ opened by userspace.  This can be used in conjunction with::
 
   * Key/keyring notifications
 
+  * Mount notifications.
 
 The notifications buffers can be enabled by:
 
@@ -237,6 +238,11 @@ Any particular buffer can be fed from multiple sources.  Sources include:
 
     See Documentation/security/keys/core.rst for more information.
 
+  * WATCH_TYPE_MOUNT_NOTIFY
+
+    Notifications of this type indicate changes to mount attributes and the
+    mount topology within the subtree at the indicated point.
+
 
 Event Filtering
 ===============
@@ -296,9 +302,10 @@ A buffer is created with something like the following::
        pipe2(fds, O_TMPFILE);
        ioctl(fds[1], IOC_WATCH_QUEUE_SET_SIZE, 256);
 
-It can then be set to receive keyring change notifications::
+It can then be set to receive notifications::
 
        keyctl(KEYCTL_WATCH_KEY, KEY_SPEC_SESSION_KEYRING, fds[1], 0x01);
+       watch_mount(AT_FDCWD, "/", 0, fds[1], 0x02);
 
 The notifications can then be consumed by something like the following::
 
@@ -335,6 +342,9 @@ The notifications can then be consumed by something like the following::
                                case WATCH_TYPE_KEY_NOTIFY:
                                        saw_key_change(&n.n);
                                        break;
+                               case WATCH_TYPE_MOUNT_NOTIFY:
+                                       saw_mount_change(&n.n);
+                                       break;
                                }
 
                                p += len;
index f37dff07d8b63eaad6e66515c91bcf3f9ebf7de8..3aa423fb8315dc51f4fc6a91f19e72fa5d0fd3d8 100644 (file)
@@ -3556,7 +3556,7 @@ S:        Supported
 F:     drivers/net/ethernet/broadcom/bnxt/
 
 BROADCOM BRCM80211 IEEE802.11n WIRELESS DRIVER
-M:     Arend van Spriel <arend.vanspriel@broadcom.com>
+M:     Arend van Spriel <aspriel@gmail.com>
 M:     Franky Lin <franky.lin@broadcom.com>
 M:     Hante Meuleman <hante.meuleman@broadcom.com>
 M:     Chi-hsien Lin <chi-hsien.lin@infineon.com>
@@ -3890,6 +3890,15 @@ S:       Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
 F:     Documentation/devicetree/bindings/usb/cdns,usb3.yaml
 F:     drivers/usb/cdns3/
+X:     drivers/usb/cdns3/cdnsp*
+
+CADENCE USBSSP DRD IP DRIVER
+M:     Pawel Laszczak <pawell@cadence.com>
+L:     linux-usb@vger.kernel.org
+S:     Maintained
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git
+F:     drivers/usb/cdns3/
+X:     drivers/usb/cdns3/cdns3*
 
 CADET FM/AM RADIO RECEIVER DRIVER
 M:     Hans Verkuil <hverkuil@xs4all.nl>
@@ -5339,7 +5348,7 @@ F:        drivers/hwmon/dme1737.c
 DMI/SMBIOS SUPPORT
 M:     Jean Delvare <jdelvare@suse.com>
 S:     Maintained
-T:     quilt http://jdelvare.nerim.net/devel/linux/jdelvare-dmi/
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jdelvare/staging.git dmi-for-next
 F:     Documentation/ABI/testing/sysfs-firmware-dmi-tables
 F:     drivers/firmware/dmi-id.c
 F:     drivers/firmware/dmi_scan.c
@@ -12862,7 +12871,7 @@ F:      include/misc/ocxl*
 F:     include/uapi/misc/ocxl.h
 
 OMAP AUDIO SUPPORT
-M:     Peter Ujfalusi <peter.ujfalusi@ti.com>
+M:     Peter Ujfalusi <peter.ujfalusi@gmail.com>
 M:     Jarkko Nikula <jarkko.nikula@bitmer.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 L:     linux-omap@vger.kernel.org
@@ -17553,7 +17562,7 @@ F:      arch/xtensa/
 F:     drivers/irqchip/irq-xtensa-*
 
 TEXAS INSTRUMENTS ASoC DRIVERS
-M:     Peter Ujfalusi <peter.ujfalusi@ti.com>
+M:     Peter Ujfalusi <peter.ujfalusi@gmail.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
 F:     sound/soc/ti/
@@ -17565,6 +17574,19 @@ S:     Supported
 F:     Documentation/devicetree/bindings/iio/dac/ti,dac7612.txt
 F:     drivers/iio/dac/ti-dac7612.c
 
+TEXAS INSTRUMENTS DMA DRIVERS
+M:     Peter Ujfalusi <peter.ujfalusi@gmail.com>
+L:     dmaengine@vger.kernel.org
+S:     Maintained
+F:     Documentation/devicetree/bindings/dma/ti-dma-crossbar.txt
+F:     Documentation/devicetree/bindings/dma/ti-edma.txt
+F:     Documentation/devicetree/bindings/dma/ti/
+F:     drivers/dma/ti/
+X:     drivers/dma/ti/cppi41.c
+F:     include/linux/dma/k3-udma-glue.h
+F:     include/linux/dma/ti-cppi5.h
+F:     include/linux/dma/k3-psil.h
+
 TEXAS INSTRUMENTS' SYSTEM CONTROL INTERFACE (TISCI) PROTOCOL DRIVER
 M:     Nishanth Menon <nm@ti.com>
 M:     Tero Kristo <t-kristo@ti.com>
@@ -17850,7 +17872,7 @@ F:      Documentation/devicetree/bindings/net/nfc/trf7970a.txt
 F:     drivers/nfc/trf7970a.c
 
 TI TWL4030 SERIES SOC CODEC DRIVER
-M:     Peter Ujfalusi <peter.ujfalusi@ti.com>
+M:     Peter Ujfalusi <peter.ujfalusi@gmail.com>
 L:     alsa-devel@alsa-project.org (moderated for non-subscribers)
 S:     Maintained
 F:     sound/soc/codecs/twl4030*
index a6617067dbe6457a9d8cae8f60823e10813344ce..d2e2ca96990e5de980b40e934603d6ba0b96cddd 100644 (file)
 549    common  faccessat2                      sys_faccessat2
 550    common  process_madvise                 sys_process_madvise
 551    common  epoll_pwait2                    sys_epoll_pwait2
+552    common  watch_mount                     sys_watch_mount
index 0c6bf0d1df7ad1ea1028f649559829cf6cca0793..578bdbbb0fa7fc40d2a14e895ac28ecf0d6b541d 100644 (file)
@@ -102,16 +102,22 @@ libs-y            += arch/arc/lib/ $(LIBGCC)
 
 boot           := arch/arc/boot
 
-#default target for make without any arguments.
-KBUILD_IMAGE   := $(boot)/bootpImage
-
-all:   bootpImage
-bootpImage: vmlinux
-
-boot_targets += uImage uImage.bin uImage.gz
+boot_targets := uImage.bin uImage.gz uImage.lzma
 
+PHONY += $(boot_targets)
 $(boot_targets): vmlinux
        $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
 
+uimage-default-y                       := uImage.bin
+uimage-default-$(CONFIG_KERNEL_GZIP)   := uImage.gz
+uimage-default-$(CONFIG_KERNEL_LZMA)   := uImage.lzma
+
+PHONY += uImage
+uImage: $(uimage-default-y)
+       @ln -sf $< $(boot)/uImage
+       @$(kecho) '  Image $(boot)/uImage is ready'
+
+CLEAN_FILES += $(boot)/uImage
+
 archclean:
        $(Q)$(MAKE) $(clean)=$(boot)
index 538b92f4dd2530459c190c80517412200a46bd6e..5648748c285f52c46a5fbe8df1d5c8e683cafc88 100644 (file)
@@ -1,5 +1,4 @@
 # SPDX-License-Identifier: GPL-2.0
-targets := vmlinux.bin vmlinux.bin.gz uImage
 
 # uImage build relies on mkimage being availble on your host for ARC target
 # You will need to build u-boot for ARC, rename mkimage to arc-elf32-mkimage
@@ -7,23 +6,18 @@ targets := vmlinux.bin vmlinux.bin.gz uImage
 
 OBJCOPYFLAGS= -O binary -R .note -R .note.gnu.build-id -R .comment -S
 
-LINUX_START_TEXT = $$(readelf -h vmlinux | \
+LINUX_START_TEXT = $$($(READELF) -h vmlinux | \
                        grep "Entry point address" | grep -o 0x.*)
 
 UIMAGE_LOADADDR    = $(CONFIG_LINUX_LINK_BASE)
 UIMAGE_ENTRYADDR   = $(LINUX_START_TEXT)
 
-suffix-y := bin
-suffix-$(CONFIG_KERNEL_GZIP)   := gz
-suffix-$(CONFIG_KERNEL_LZMA)   := lzma
-
-targets += uImage
+targets += vmlinux.bin
+targets += vmlinux.bin.gz
+targets += vmlinux.bin.lzma
 targets += uImage.bin
 targets += uImage.gz
 targets += uImage.lzma
-extra-y += vmlinux.bin
-extra-y += vmlinux.bin.gz
-extra-y += vmlinux.bin.lzma
 
 $(obj)/vmlinux.bin: vmlinux FORCE
        $(call if_changed,objcopy)
@@ -42,7 +36,3 @@ $(obj)/uImage.gz: $(obj)/vmlinux.bin.gz FORCE
 
 $(obj)/uImage.lzma: $(obj)/vmlinux.bin.lzma FORCE
        $(call if_changed,uimage,lzma)
-
-$(obj)/uImage: $(obj)/uImage.$(suffix-y)
-       @ln -sf $(notdir $<) $@
-       @echo '  Image $@ is ready'
index 7c6f2c11f0e105bc6fbe31805baf5c41d34de01f..902e295b309e9b7c60bb88bc8f19e0e2d898a016 100644 (file)
        };
 };
 
-&mac {
+&mac_sw {
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&cpsw_default>;
        pinctrl-1 = <&cpsw_sleep>;
        status = "okay";
-       slaves = <1>;
 };
 
-&davinci_mdio {
+&davinci_mdio_sw {
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&davinci_mdio_default>;
        pinctrl-1 = <&davinci_mdio_sleep>;
-       status = "okay";
 
        ethphy0: ethernet-phy@0 {
                reg = <0>;
        };
 };
 
-&cpsw_emac0 {
+&cpsw_port1 {
        phy-handle = <&ethphy0>;
        phy-mode = "rgmii-id";
+       ti,dual-emac-pvid = <1>;
+};
+
+&cpsw_port2 {
+        status = "disabled";
 };
 
 &tscadc {
index b43b94122d3c5483df80b40e57ca5140214f519a..d5f8d5e2eb5d28c456f75c80194420d2db541c2e 100644 (file)
        };
 };
 
-&mac {
+&mac_sw {
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&cpsw_default>;
        pinctrl-1 = <&cpsw_sleep>;
-       dual_emac = <1>;
        status = "okay";
 };
 
-&davinci_mdio {
+&davinci_mdio_sw {
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&davinci_mdio_default>;
        pinctrl-1 = <&davinci_mdio_sleep>;
-       status = "okay";
 
        ethphy0: ethernet-phy@0 {
                reg = <0>;
        };
 };
 
-&cpsw_emac0 {
+&cpsw_port1 {
        phy-handle = <&ethphy0>;
        phy-mode = "rgmii-id";
-       dual_emac_res_vlan = <1>;
+       ti,dual-emac-pvid = <1>;
 };
 
-&cpsw_emac1 {
+&cpsw_port2 {
        phy-handle = <&ethphy1>;
        phy-mode = "rgmii-id";
-       dual_emac_res_vlan = <2>;
+       ti,dual-emac-pvid = <2>;
 };
 
 &mmc1 {
index b958ab56a41237abc077c88cde863747223818c0..e923d065304d9f0064cfc58cea79dd5ad2183f55 100644 (file)
        };
 };
 
-&cpsw_emac0 {
+&cpsw_port1 {
        phy-handle = <&ethphy0>;
        phy-mode = "rmii";
-       dual_emac_res_vlan = <1>;
+       ti,dual-emac-pvid = <1>;
 };
 
-&cpsw_emac1 {
+&cpsw_port2 {
        phy-handle = <&ethphy1>;
        phy-mode = "rmii";
-       dual_emac_res_vlan = <2>;
+       ti,dual-emac-pvid = <2>;
 };
 
-&mac {
+&mac_sw {
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&cpsw_default>;
        pinctrl-1 = <&cpsw_sleep>;
        status = "okay";
-       dual_emac;
 };
 
-&davinci_mdio {
+&davinci_mdio_sw {
        pinctrl-names = "default", "sleep";
        pinctrl-0 = <&davinci_mdio_default>;
        pinctrl-1 = <&davinci_mdio_sleep>;
-       status = "okay";
        reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>;
        reset-delay-us = <2>;   /* PHY datasheet states 1uS min */
 
index 78088506d25b0e97237cdcdc043e208e152152e4..1fb22088caebbc1eb5925752d726061ce88aa5f1 100644 (file)
                                        phys = <&phy_gmii_sel 2 1>;
                                };
                        };
+
+                       mac_sw: switch@0 {
+                               compatible = "ti,am335x-cpsw-switch", "ti,cpsw-switch";
+                               reg = <0x0 0x4000>;
+                               ranges = <0 0 0x4000>;
+                               clocks = <&cpsw_125mhz_gclk>;
+                               clock-names = "fck";
+                               #address-cells = <1>;
+                               #size-cells = <1>;
+                               syscon = <&scm_conf>;
+                               status = "disabled";
+
+                               interrupts = <40 41 42 43>;
+                               interrupt-names = "rx_thresh", "rx", "tx", "misc";
+
+                               ethernet-ports {
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+
+                                       cpsw_port1: port@1 {
+                                               reg = <1>;
+                                               label = "port1";
+                                               mac-address = [ 00 00 00 00 00 00 ];
+                                               phys = <&phy_gmii_sel 1 1>;
+                                       };
+
+                                       cpsw_port2: port@2 {
+                                               reg = <2>;
+                                               label = "port2";
+                                               mac-address = [ 00 00 00 00 00 00 ];
+                                               phys = <&phy_gmii_sel 2 1>;
+                                       };
+                               };
+
+                               davinci_mdio_sw: mdio@1000 {
+                                       compatible = "ti,cpsw-mdio","ti,davinci_mdio";
+                                       clocks = <&cpsw_125mhz_gclk>;
+                                       clock-names = "fck";
+                                       #address-cells = <1>;
+                                       #size-cells = <0>;
+                                       bus_freq = <1000000>;
+                                       reg = <0x1000 0x100>;
+                               };
+
+                               cpts {
+                                       clocks = <&cpsw_cpts_rft_clk>;
+                                       clock-names = "cpts";
+                               };
+                       };
                };
 
                target-module@180000 {                  /* 0x4a180000, ap 5 10.0 */
index 37758761cd884b43821c431211b85dd24e05fd1d..1b8f3a28af0595bd880af77fca826052a44403df 100644 (file)
@@ -39,3 +39,7 @@
 &m_can0 {
        status = "disabled";
 };
+
+&emif1 {
+       status = "okay";
+};
index 4847dd305317a69923553578068c3ecdf399861a..f53a51cc91f061f21432fe21c4d7f2553eb8bd5e 100644 (file)
 
 &dsi1 {
        interrupts = <GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>;
+       compatible = "brcm,bcm2711-dsi1";
 };
 
 &gpio {
index cad58f733bd6f7bd344c6f0340b8ebb6178e6fdb..6d2cca6b44883e59ad312a8ea3d40879cdcdbcc9 100644 (file)
                                regulator-name = "lp8733-ldo0";
                                regulator-min-microvolt = <3300000>;
                                regulator-max-microvolt = <3300000>;
+                               regulator-boot-on;
+                               regulator-always-on;
                        };
 
                        lp8733_ldo1_reg: ldo1 {
index 2f326151116b7a9ca06461536b3881d5b691aa8d..a09e7bd77fc7a60cecfdedf67bdfb37f1f09254f 100644 (file)
@@ -9,6 +9,13 @@
        compatible = "ti,dra762", "ti,dra7";
 
        ocp {
+               emif1: emif@4c000000 {
+                       compatible = "ti,emif-dra7xx";
+                       reg = <0x4c000000 0x200>;
+                       interrupts = <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
+                       status = "disabled";
+               };
+
                target-module@42c01900 {
                        compatible = "ti,sysc-dra7-mcan", "ti,sysc";
                        ranges = <0x0 0x42c00000 0x2000>;
        /* dra76x is not affected by i887 */
        max-frequency = <96000000>;
 };
+
+&cpu0_opp_table {
+       opp_plus@1800000000 {
+               opp-hz = /bits/ 64 <1800000000>;
+               opp-microvolt = <1250000 950000 1250000>,
+                               <1250000 950000 1250000>;
+               opp-supported-hw = <0xFF 0x08>;
+       };
+};
+
+&opp_supply_mpu {
+       ti,efuse-settings = <
+       /* uV   offset */
+       1060000 0x0
+       1160000 0x4
+       1210000 0x8
+       1250000 0xC
+       >;
+};
+
+&abb_mpu {
+       ti,abb_info = <
+       /*uV            ABB     efuse   rbb_m fbb_m     vset_m*/
+       1060000         0       0x0     0 0x02000000 0x01F00000
+       1160000         0       0x4     0 0x02000000 0x01F00000
+       1210000         0       0x8     0 0x02000000 0x01F00000
+       1250000         0       0xC     0 0x02000000 0x01F00000
+       >;
+};
index c8745bc800f717530fccdf2da9b42fd9b8816ff9..cbe9ce73917099b55e5651790d5d5a63c2f97466 100644 (file)
                };
 
                twl_power: power {
-                       compatible = "ti,twl4030-power";
-                       ti,use_poweroff;
+                       compatible = "ti,twl4030-power-idle";
+                       ti,system-power-controller;
                };
        };
 };
index 11d41e86f814d26e42249d9ff96ea4c0745d3955..7dde9fbb06d33c83a0e4e5f36e0968ac94e45bfd 100644 (file)
                clock-names = "sysclk";
        };
 };
+
+&aes1_target {
+       status = "disabled";
+};
+
+&aes2_target {
+       status = "disabled";
+};
index 496f9d3ba7b7ea4bff154769bc54ab1eb98b0b70..60fe6189e728c9ca989d34e73ae1fe1084fd8ecd 100644 (file)
                                panel@0 {
                                        compatible = "samsung,s6e63m0";
                                        reg = <0>;
+                                       max-brightness = <15>;
                                        vdd3-supply = <&panel_reg_3v0>;
                                        vci-supply = <&panel_reg_1v8>;
                                        reset-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
index 047975eccefb3c7a8db6d2a2cf39ceb9fd09c81b..c793fc5d2e101b057592e14142279a17677e7c30 100644 (file)
@@ -258,6 +258,10 @@ CONFIG_UBIFS_FS=y
 CONFIG_SQUASHFS=y
 CONFIG_SQUASHFS_XZ=y
 CONFIG_SQUASHFS_ZSTD=y
+CONFIG_PSTORE=y
+CONFIG_PSTORE_CONSOLE=y
+CONFIG_PSTORE_PMSG=y
+CONFIG_PSTORE_RAM=y
 # CONFIG_NETWORK_FILESYSTEMS is not set
 CONFIG_HARDENED_USERCOPY=y
 CONFIG_FORTIFY_SOURCE=y
index 1c11d1557779aedde6500e11a0ac54c6c6613542..b515c31f0ab75e9ff718da641d8a6b6ee00d72f6 100644 (file)
@@ -279,6 +279,7 @@ CONFIG_SERIAL_OMAP_CONSOLE=y
 CONFIG_SERIAL_DEV_BUS=y
 CONFIG_I2C_CHARDEV=y
 CONFIG_SPI=y
+CONFIG_SPI_GPIO=m
 CONFIG_SPI_OMAP24XX=y
 CONFIG_SPI_TI_QSPI=m
 CONFIG_HSI=m
@@ -296,7 +297,6 @@ CONFIG_GPIO_TWL4030=y
 CONFIG_W1=m
 CONFIG_HDQ_MASTER_OMAP=m
 CONFIG_W1_SLAVE_DS250X=m
-CONFIG_POWER_AVS=y
 CONFIG_POWER_RESET=y
 CONFIG_POWER_RESET_GPIO=y
 CONFIG_BATTERY_BQ27XXX=m
index f3191704cab9fc2c3600c74f29d761235e8b140d..56d6814bec26a042b89afc3879b10680aa95f4d4 100644 (file)
@@ -230,10 +230,12 @@ static int _omap_device_notifier_call(struct notifier_block *nb,
                break;
        case BUS_NOTIFY_BIND_DRIVER:
                od = to_omap_device(pdev);
-               if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED) &&
-                   pm_runtime_status_suspended(dev)) {
+               if (od) {
                        od->_driver_status = BUS_NOTIFY_BIND_DRIVER;
-                       pm_runtime_set_active(dev);
+                       if (od->_state == OMAP_DEVICE_STATE_ENABLED &&
+                           pm_runtime_status_suspended(dev)) {
+                               pm_runtime_set_active(dev);
+                       }
                }
                break;
        case BUS_NOTIFY_ADD_DEVICE:
index eab281a5fc9f72432f053eef7a39a0b9cacdc967..09076ad0576d98d31cd4d16d97b3fe904143f95c 100644 (file)
@@ -71,7 +71,7 @@ static struct omap_voltdm_pmic omap_cpcap_iva = {
        .vp_vstepmin = OMAP4_VP_VSTEPMIN_VSTEPMIN,
        .vp_vstepmax = OMAP4_VP_VSTEPMAX_VSTEPMAX,
        .vddmin = 900000,
-       .vddmax = 1350000,
+       .vddmax = 1375000,
        .vp_timeout_us = OMAP4_VP_VLIMITTO_TIMEOUT_US,
        .i2c_slave_addr = 0x44,
        .volt_reg_addr = 0x0,
index 20e1170e2e0aaeb52ea10f1802c637164ea79b5e..96e59d529f48676676a002ecf7d7c8f65f268501 100644 (file)
 439    common  faccessat2                      sys_faccessat2
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2
+442    common  watch_mount                     sys_watch_mount
index fa6e6905f58880f6f6b9447f1bd53ecaa8eb7236..53a9b76057aa1775b9a7f7e5348d2dea68fa45a8 100644 (file)
                                compatible = "snps,dw-apb-gpio-port";
                                gpio-controller;
                                #gpio-cells = <2>;
-                               snps,nr-gpios = <32>;
+                               ngpios = <32>;
                                reg = <0>;
                                interrupt-controller;
                                #interrupt-cells = <2>;
                                compatible = "snps,dw-apb-gpio-port";
                                gpio-controller;
                                #gpio-cells = <2>;
-                               snps,nr-gpios = <32>;
+                               ngpios = <32>;
                                reg = <0>;
                                interrupt-controller;
                                #interrupt-cells = <2>;
                                compatible = "snps,dw-apb-gpio-port";
                                gpio-controller;
                                #gpio-cells = <2>;
-                               snps,nr-gpios = <8>;
+                               ngpios = <8>;
                                reg = <0>;
                                interrupt-controller;
                                #interrupt-cells = <2>;
index 11beda85ee7e536693fd65751aebe880421f0c20..8fcfab0c25672db32d47f7eb78fdda1671595ddc 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/jump_label.h>
 #include <linux/kvm_types.h>
 #include <linux/percpu.h>
+#include <linux/psci.h>
 #include <asm/arch_gicv3.h>
 #include <asm/barrier.h>
 #include <asm/cpufeature.h>
@@ -240,6 +241,28 @@ struct kvm_host_data {
        struct kvm_pmu_events pmu_events;
 };
 
+struct kvm_host_psci_config {
+       /* PSCI version used by host. */
+       u32 version;
+
+       /* Function IDs used by host if version is v0.1. */
+       struct psci_0_1_function_ids function_ids_0_1;
+
+       bool psci_0_1_cpu_suspend_implemented;
+       bool psci_0_1_cpu_on_implemented;
+       bool psci_0_1_cpu_off_implemented;
+       bool psci_0_1_migrate_implemented;
+};
+
+extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
+#define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
+
+extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
+#define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
+
+extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
+#define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
+
 struct vcpu_reset_state {
        unsigned long   pc;
        unsigned long   r0;
index 86a9d7b3eabe9ee254df4abd7f4fd008176d1cf5..949788f5ba4007049d0e92b55895ef738c1949fa 100644 (file)
@@ -38,7 +38,7 @@
 #define __ARM_NR_compat_set_tls                (__ARM_NR_COMPAT_BASE + 5)
 #define __ARM_NR_COMPAT_END            (__ARM_NR_COMPAT_BASE + 0x800)
 
-#define __NR_compat_syscalls           442
+#define __NR_compat_syscalls           443
 #endif
 
 #define __ARCH_WANT_SYS_CLONE
index cccfbbefbf952e2c37d3cb39e046a3877df40638..da564a18586d7ed8958aa044f42b7109fd3b929f 100644 (file)
@@ -891,6 +891,8 @@ __SYSCALL(__NR_faccessat2, sys_faccessat2)
 __SYSCALL(__NR_process_madvise, sys_process_madvise)
 #define __NR_epoll_pwait2 441
 __SYSCALL(__NR_epoll_pwait2, compat_sys_epoll_pwait2)
+#define __NR_watch_mount 442
+__SYSCALL(__NR_watch_mount, sys_watch_mount)
 
 /*
  * Please add new compat syscalls above this comment and update
index 6bc3a3698c3d1ea9178a239c8ad96835b0d9d1c2..d08948c6979b63e98c5c76a0a7a58b8824c9dd76 100644 (file)
@@ -434,7 +434,7 @@ static void __init hyp_mode_check(void)
                           "CPU: CPUs started in inconsistent modes");
        else
                pr_info("CPU: All CPU(s) started at EL1\n");
-       if (IS_ENABLED(CONFIG_KVM))
+       if (IS_ENABLED(CONFIG_KVM) && !is_kernel_in_hyp_mode())
                kvm_compute_layout();
 }
 
index 6e637d2b4cfb7fe0978fd491f4506da0cc7793e7..e207e4541f55200703bf07644c90359db73b054a 100644 (file)
@@ -65,10 +65,6 @@ static bool vgic_present;
 static DEFINE_PER_CPU(unsigned char, kvm_arm_hardware_enabled);
 DEFINE_STATIC_KEY_FALSE(userspace_irqchip_in_use);
 
-extern u64 kvm_nvhe_sym(__cpu_logical_map)[NR_CPUS];
-extern u32 kvm_nvhe_sym(kvm_host_psci_version);
-extern struct psci_0_1_function_ids kvm_nvhe_sym(kvm_host_psci_0_1_function_ids);
-
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
 {
        return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
@@ -1604,9 +1600,12 @@ static void init_cpu_logical_map(void)
         * allow any other CPUs from the `possible` set to boot.
         */
        for_each_online_cpu(cpu)
-               kvm_nvhe_sym(__cpu_logical_map)[cpu] = cpu_logical_map(cpu);
+               hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu);
 }
 
+#define init_psci_0_1_impl_state(config, what) \
+       config.psci_0_1_ ## what ## _implemented = psci_ops.what
+
 static bool init_psci_relay(void)
 {
        /*
@@ -1618,8 +1617,15 @@ static bool init_psci_relay(void)
                return false;
        }
 
-       kvm_nvhe_sym(kvm_host_psci_version) = psci_ops.get_version();
-       kvm_nvhe_sym(kvm_host_psci_0_1_function_ids) = get_psci_0_1_function_ids();
+       kvm_host_psci_config.version = psci_ops.get_version();
+
+       if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) {
+               kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids();
+               init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend);
+               init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on);
+               init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off);
+               init_psci_0_1_impl_state(kvm_host_psci_config, migrate);
+       }
        return true;
 }
 
index b1f60923a8feb80ff676a0ba84f3b23f3e4f398c..61716359035d6fa2a600a4def8d731681617de09 100644 (file)
@@ -59,4 +59,13 @@ static inline void __adjust_pc(struct kvm_vcpu *vcpu)
        }
 }
 
+/*
+ * Skip an instruction while host sysregs are live.
+ * Assumes host is always 64-bit.
+ */
+static inline void kvm_skip_host_instr(void)
+{
+       write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
+}
+
 #endif
index bde658d51404b46a64f6406e250f7713b191e622..a906f9e2ff34fba2d5588263b1f024e23b09963d 100644 (file)
@@ -157,11 +157,6 @@ static void default_host_smc_handler(struct kvm_cpu_context *host_ctxt)
        __kvm_hyp_host_forward_smc(host_ctxt);
 }
 
-static void skip_host_instruction(void)
-{
-       write_sysreg_el2(read_sysreg_el2(SYS_ELR) + 4, SYS_ELR);
-}
-
 static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
 {
        bool handled;
@@ -170,11 +165,8 @@ static void handle_host_smc(struct kvm_cpu_context *host_ctxt)
        if (!handled)
                default_host_smc_handler(host_ctxt);
 
-       /*
-        * Unlike HVC, the return address of an SMC is the instruction's PC.
-        * Move the return address past the instruction.
-        */
-       skip_host_instruction();
+       /* SMC was trapped, move ELR past the current PC. */
+       kvm_skip_host_instr();
 }
 
 void handle_trap(struct kvm_cpu_context *host_ctxt)
index cbab0c6246e20fdafe100846d7471ab775adda0b..2997aa156d8e5c2d17ad241e1abcc388c6116276 100644 (file)
  * Other CPUs should not be allowed to boot because their features were
  * not checked against the finalized system capabilities.
  */
-u64 __ro_after_init __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
+u64 __ro_after_init hyp_cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
 
 u64 cpu_logical_map(unsigned int cpu)
 {
-       if (cpu >= ARRAY_SIZE(__cpu_logical_map))
+       if (cpu >= ARRAY_SIZE(hyp_cpu_logical_map))
                hyp_panic();
 
-       return __cpu_logical_map[cpu];
+       return hyp_cpu_logical_map[cpu];
 }
 
 unsigned long __hyp_per_cpu_offset(unsigned int cpu)
index 08dc9de693147fbb964271110940b99bc2c4727a..e3947846ffcb9acd8d4528190741fdb48325e711 100644 (file)
@@ -7,11 +7,8 @@
 #include <asm/kvm_asm.h>
 #include <asm/kvm_hyp.h>
 #include <asm/kvm_mmu.h>
-#include <kvm/arm_hypercalls.h>
 #include <linux/arm-smccc.h>
 #include <linux/kvm_host.h>
-#include <linux/psci.h>
-#include <kvm/arm_psci.h>
 #include <uapi/linux/psci.h>
 
 #include <nvhe/trap_handler.h>
@@ -22,9 +19,8 @@ void kvm_hyp_cpu_resume(unsigned long r0);
 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
 
 /* Config options set by the host. */
-__ro_after_init u32 kvm_host_psci_version;
-__ro_after_init struct psci_0_1_function_ids kvm_host_psci_0_1_function_ids;
-__ro_after_init s64 hyp_physvirt_offset;
+struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
+s64 __ro_after_init hyp_physvirt_offset;
 
 #define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)
 
@@ -47,19 +43,16 @@ struct psci_boot_args {
 static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
 static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
 
-static u64 get_psci_func_id(struct kvm_cpu_context *host_ctxt)
-{
-       DECLARE_REG(u64, func_id, host_ctxt, 0);
-
-       return func_id;
-}
+#define        is_psci_0_1(what, func_id)                                      \
+       (kvm_host_psci_config.psci_0_1_ ## what ## _implemented &&      \
+        (func_id) == kvm_host_psci_config.function_ids_0_1.what)
 
 static bool is_psci_0_1_call(u64 func_id)
 {
-       return (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend) ||
-              (func_id == kvm_host_psci_0_1_function_ids.cpu_on) ||
-              (func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
-              (func_id == kvm_host_psci_0_1_function_ids.migrate);
+       return (is_psci_0_1(cpu_suspend, func_id) ||
+               is_psci_0_1(cpu_on, func_id) ||
+               is_psci_0_1(cpu_off, func_id) ||
+               is_psci_0_1(migrate, func_id));
 }
 
 static bool is_psci_0_2_call(u64 func_id)
@@ -69,16 +62,6 @@ static bool is_psci_0_2_call(u64 func_id)
               (PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
 }
 
-static bool is_psci_call(u64 func_id)
-{
-       switch (kvm_host_psci_version) {
-       case PSCI_VERSION(0, 1):
-               return is_psci_0_1_call(func_id);
-       default:
-               return is_psci_0_2_call(func_id);
-       }
-}
-
 static unsigned long psci_call(unsigned long fn, unsigned long arg0,
                               unsigned long arg1, unsigned long arg2)
 {
@@ -248,15 +231,14 @@ asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
 
 static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
 {
-       if ((func_id == kvm_host_psci_0_1_function_ids.cpu_off) ||
-           (func_id == kvm_host_psci_0_1_function_ids.migrate))
+       if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
                return psci_forward(host_ctxt);
-       else if (func_id == kvm_host_psci_0_1_function_ids.cpu_on)
+       if (is_psci_0_1(cpu_on, func_id))
                return psci_cpu_on(func_id, host_ctxt);
-       else if (func_id == kvm_host_psci_0_1_function_ids.cpu_suspend)
+       if (is_psci_0_1(cpu_suspend, func_id))
                return psci_cpu_suspend(func_id, host_ctxt);
-       else
-               return PSCI_RET_NOT_SUPPORTED;
+
+       return PSCI_RET_NOT_SUPPORTED;
 }
 
 static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
@@ -298,20 +280,23 @@ static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_
 
 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
 {
-       u64 func_id = get_psci_func_id(host_ctxt);
+       DECLARE_REG(u64, func_id, host_ctxt, 0);
        unsigned long ret;
 
-       if (!is_psci_call(func_id))
-               return false;
-
-       switch (kvm_host_psci_version) {
+       switch (kvm_host_psci_config.version) {
        case PSCI_VERSION(0, 1):
+               if (!is_psci_0_1_call(func_id))
+                       return false;
                ret = psci_0_1_handler(func_id, host_ctxt);
                break;
        case PSCI_VERSION(0, 2):
+               if (!is_psci_0_2_call(func_id))
+                       return false;
                ret = psci_0_2_handler(func_id, host_ctxt);
                break;
        default:
+               if (!is_psci_0_2_call(func_id))
+                       return false;
                ret = psci_1_0_handler(func_id, host_ctxt);
                break;
        }
index 3313dedfa5053413bae960bd44eee01439cb63df..d46e7f706cb060e82d344841a890db9abea6f266 100644 (file)
@@ -594,6 +594,10 @@ static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
 {
        u64 pmcr, val;
 
+       /* No PMU available, PMCR_EL0 may UNDEF... */
+       if (!kvm_arm_support_pmu_v3())
+               return;
+
        pmcr = read_sysreg(pmcr_el0);
        /*
         * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
index d8cc51bd60bf22a7769de23536fca461413f7bb9..70fcd6a12fe1f1f1f7830e0f4370aff3b9ce98c4 100644 (file)
@@ -34,17 +34,16 @@ static u64 __early_kern_hyp_va(u64 addr)
 }
 
 /*
- * Store a hyp VA <-> PA offset into a hyp-owned variable.
+ * Store a hyp VA <-> PA offset into a EL2-owned variable.
  */
 static void init_hyp_physvirt_offset(void)
 {
-       extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
        u64 kern_va, hyp_va;
 
        /* Compute the offset from the hyp VA and PA of a random symbol. */
-       kern_va = (u64)kvm_ksym_ref(__hyp_text_start);
+       kern_va = (u64)lm_alias(__hyp_text_start);
        hyp_va = __early_kern_hyp_va(kern_va);
-       CHOOSE_NVHE_SYM(hyp_physvirt_offset) = (s64)__pa(kern_va) - (s64)hyp_va;
+       hyp_physvirt_offset = (s64)__pa(kern_va) - (s64)hyp_va;
 }
 
 /*
index 89dd2fcf38fa168c16d15f028e0442e97193463c..af640a41b749f9fbdca5a7353e2d88a0bf82980a 100644 (file)
@@ -7,7 +7,7 @@ config CSKY
        select ARCH_HAS_SYNC_DMA_FOR_CPU
        select ARCH_HAS_SYNC_DMA_FOR_DEVICE
        select ARCH_USE_BUILTIN_BSWAP
-       select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2
+       select ARCH_USE_QUEUED_RWLOCKS
        select ARCH_WANT_FRAME_POINTERS if !CPU_CK610
        select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
        select COMMON_CLK
@@ -48,6 +48,7 @@ config CSKY
        select HAVE_FUNCTION_TRACER
        select HAVE_FUNCTION_GRAPH_TRACER
        select HAVE_FUNCTION_ERROR_INJECTION
+       select HAVE_FUTEX_CMPXCHG if FUTEX && SMP
        select HAVE_FTRACE_MCOUNT_RECORD
        select HAVE_KERNEL_GZIP
        select HAVE_KERNEL_LZO
@@ -192,6 +193,22 @@ config CPU_CK860
 endchoice
 
 choice
+       prompt "PAGE OFFSET"
+       default PAGE_OFFSET_80000000
+
+config PAGE_OFFSET_80000000
+       bool "PAGE OFFSET 2G (user:kernel = 2:2)"
+
+config PAGE_OFFSET_A0000000
+       bool "PAGE OFFSET 2.5G (user:kernel = 2.5:1.5)"
+endchoice
+
+config PAGE_OFFSET
+       hex
+       default 0x80000000 if PAGE_OFFSET_80000000
+       default 0xa0000000 if PAGE_OFFSET_A0000000
+choice
+
        prompt "C-SKY PMU type"
        depends on PERF_EVENTS
        depends on CPU_CK807 || CPU_CK810 || CPU_CK860
index ba8eb58708351a8296d64268408be9a191103e0c..cceb3afb4c91679058aece518d29f8b77a7f75c7 100644 (file)
@@ -89,13 +89,13 @@ static inline void tlb_invalid_indexed(void)
        cpwcr("cpcr8", 0x02000000);
 }
 
-static inline void setup_pgd(unsigned long pgd, bool kernel)
+static inline void setup_pgd(pgd_t *pgd)
 {
-       cpwcr("cpcr29", pgd | BIT(0));
+       cpwcr("cpcr29", __pa(pgd) | BIT(0));
 }
 
-static inline unsigned long get_pgd(void)
+static inline pgd_t *get_pgd(void)
 {
-       return cprcr("cpcr29") & ~BIT(0);
+       return __va(cprcr("cpcr29") & ~BIT(0));
 }
 #endif /* __ASM_CSKY_CKMMUV1_H */
index 73ded7c72482b8355372201e2a55f35c5cc123b1..c39b13810550f1172e04d5342828e8e6cf5e0e56 100644 (file)
@@ -100,16 +100,16 @@ static inline void tlb_invalid_indexed(void)
        mtcr("cr<8, 15>", 0x02000000);
 }
 
-static inline void setup_pgd(unsigned long pgd, bool kernel)
+static inline void setup_pgd(pgd_t *pgd)
 {
-       if (kernel)
-               mtcr("cr<28, 15>", pgd | BIT(0));
-       else
-               mtcr("cr<29, 15>", pgd | BIT(0));
+#ifdef CONFIG_CPU_HAS_TLBI
+       mtcr("cr<28, 15>", __pa(pgd) | BIT(0));
+#endif
+       mtcr("cr<29, 15>", __pa(pgd) | BIT(0));
 }
 
-static inline unsigned long get_pgd(void)
+static inline pgd_t *get_pgd(void)
 {
-       return mfcr("cr<29, 15>") & ~BIT(0);
+       return __va(mfcr("cr<29, 15>") & ~BIT(0));
 }
 #endif /* __ASM_CSKY_CKMMUV2_H */
index bedcc6f06bba23bbd99992b83bab860b38bdc7af..ab382bca73a2f63ba2ec24d737f2c473e5c44b4b 100644 (file)
@@ -26,6 +26,9 @@
        stw     tls, (sp, 0)
        stw     lr, (sp, 4)
 
+       RD_MEH  lr
+       WR_MEH  lr
+
        mfcr    lr, epc
        movi    tls, \epc_inc
        add     lr, tls
        mtcr    \rx, cr<8, 15>
 .endm
 
+#ifdef CONFIG_PAGE_OFFSET_80000000
+#define MSA_SET cr<30, 15>
+#define MSA_CLR cr<31, 15>
+#endif
+
+#ifdef CONFIG_PAGE_OFFSET_A0000000
+#define MSA_SET cr<31, 15>
+#define MSA_CLR cr<30, 15>
+#endif
+
 .macro SETUP_MMU
        /* Init psr and enable ee */
        lrw     r6, DEFAULT_PSR_VALUE
         * 31 - 29 | 28 - 9 | 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
         *   BA     Reserved  SH  WA  B   SO SEC  C   D   V
         */
-       mfcr    r6, cr<30, 15> /* Get MSA0 */
+       mfcr    r6, MSA_SET /* Get MSA */
 2:
        lsri    r6, 29
        lsli    r6, 29
        addi    r6, 0x1ce
-       mtcr    r6, cr<30, 15> /* Set MSA0 */
+       mtcr    r6, MSA_SET /* Set MSA */
 
        movi    r6, 0
-       mtcr    r6, cr<31, 15> /* Clr MSA1 */
+       mtcr    r6, MSA_CLR /* Clr MSA */
 
        /* enable MMU */
        mfcr    r6, cr18
diff --git a/arch/csky/include/asm/atomic.h b/arch/csky/include/asm/atomic.h
deleted file mode 100644 (file)
index e369d73..0000000
+++ /dev/null
@@ -1,212 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-#ifndef __ASM_CSKY_ATOMIC_H
-#define __ASM_CSKY_ATOMIC_H
-
-#include <linux/version.h>
-#include <asm/cmpxchg.h>
-#include <asm/barrier.h>
-
-#ifdef CONFIG_CPU_HAS_LDSTEX
-
-#define __atomic_add_unless __atomic_add_unless
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-       unsigned long tmp, ret;
-
-       smp_mb();
-
-       asm volatile (
-       "1:     ldex.w          %0, (%3) \n"
-       "       mov             %1, %0   \n"
-       "       cmpne           %0, %4   \n"
-       "       bf              2f       \n"
-       "       add             %0, %2   \n"
-       "       stex.w          %0, (%3) \n"
-       "       bez             %0, 1b   \n"
-       "2:                              \n"
-               : "=&r" (tmp), "=&r" (ret)
-               : "r" (a), "r"(&v->counter), "r"(u)
-               : "memory");
-
-       if (ret != u)
-               smp_mb();
-
-       return ret;
-}
-
-#define ATOMIC_OP(op, c_op)                                            \
-static inline void atomic_##op(int i, atomic_t *v)                     \
-{                                                                      \
-       unsigned long tmp;                                              \
-                                                                       \
-       asm volatile (                                                  \
-       "1:     ldex.w          %0, (%2) \n"                            \
-       "       " #op "         %0, %1   \n"                            \
-       "       stex.w          %0, (%2) \n"                            \
-       "       bez             %0, 1b   \n"                            \
-               : "=&r" (tmp)                                           \
-               : "r" (i), "r"(&v->counter)                             \
-               : "memory");                                            \
-}
-
-#define ATOMIC_OP_RETURN(op, c_op)                                     \
-static inline int atomic_##op##_return(int i, atomic_t *v)             \
-{                                                                      \
-       unsigned long tmp, ret;                                         \
-                                                                       \
-       smp_mb();                                                       \
-       asm volatile (                                                  \
-       "1:     ldex.w          %0, (%3) \n"                            \
-       "       " #op "         %0, %2   \n"                            \
-       "       mov             %1, %0   \n"                            \
-       "       stex.w          %0, (%3) \n"                            \
-       "       bez             %0, 1b   \n"                            \
-               : "=&r" (tmp), "=&r" (ret)                              \
-               : "r" (i), "r"(&v->counter)                             \
-               : "memory");                                            \
-       smp_mb();                                                       \
-                                                                       \
-       return ret;                                                     \
-}
-
-#define ATOMIC_FETCH_OP(op, c_op)                                      \
-static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
-{                                                                      \
-       unsigned long tmp, ret;                                         \
-                                                                       \
-       smp_mb();                                                       \
-       asm volatile (                                                  \
-       "1:     ldex.w          %0, (%3) \n"                            \
-       "       mov             %1, %0   \n"                            \
-       "       " #op "         %0, %2   \n"                            \
-       "       stex.w          %0, (%3) \n"                            \
-       "       bez             %0, 1b   \n"                            \
-               : "=&r" (tmp), "=&r" (ret)                              \
-               : "r" (i), "r"(&v->counter)                             \
-               : "memory");                                            \
-       smp_mb();                                                       \
-                                                                       \
-       return ret;                                                     \
-}
-
-#else /* CONFIG_CPU_HAS_LDSTEX */
-
-#include <linux/irqflags.h>
-
-#define __atomic_add_unless __atomic_add_unless
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
-       unsigned long tmp, ret, flags;
-
-       raw_local_irq_save(flags);
-
-       asm volatile (
-       "       ldw             %0, (%3) \n"
-       "       mov             %1, %0   \n"
-       "       cmpne           %0, %4   \n"
-       "       bf              2f       \n"
-       "       add             %0, %2   \n"
-       "       stw             %0, (%3) \n"
-       "2:                              \n"
-               : "=&r" (tmp), "=&r" (ret)
-               : "r" (a), "r"(&v->counter), "r"(u)
-               : "memory");
-
-       raw_local_irq_restore(flags);
-
-       return ret;
-}
-
-#define ATOMIC_OP(op, c_op)                                            \
-static inline void atomic_##op(int i, atomic_t *v)                     \
-{                                                                      \
-       unsigned long tmp, flags;                                       \
-                                                                       \
-       raw_local_irq_save(flags);                                      \
-                                                                       \
-       asm volatile (                                                  \
-       "       ldw             %0, (%2) \n"                            \
-       "       " #op "         %0, %1   \n"                            \
-       "       stw             %0, (%2) \n"                            \
-               : "=&r" (tmp)                                           \
-               : "r" (i), "r"(&v->counter)                             \
-               : "memory");                                            \
-                                                                       \
-       raw_local_irq_restore(flags);                                   \
-}
-
-#define ATOMIC_OP_RETURN(op, c_op)                                     \
-static inline int atomic_##op##_return(int i, atomic_t *v)             \
-{                                                                      \
-       unsigned long tmp, ret, flags;                                  \
-                                                                       \
-       raw_local_irq_save(flags);                                      \
-                                                                       \
-       asm volatile (                                                  \
-       "       ldw             %0, (%3) \n"                            \
-       "       " #op "         %0, %2   \n"                            \
-       "       stw             %0, (%3) \n"                            \
-       "       mov             %1, %0   \n"                            \
-               : "=&r" (tmp), "=&r" (ret)                              \
-               : "r" (i), "r"(&v->counter)                             \
-               : "memory");                                            \
-                                                                       \
-       raw_local_irq_restore(flags);                                   \
-                                                                       \
-       return ret;                                                     \
-}
-
-#define ATOMIC_FETCH_OP(op, c_op)                                      \
-static inline int atomic_fetch_##op(int i, atomic_t *v)                        \
-{                                                                      \
-       unsigned long tmp, ret, flags;                                  \
-                                                                       \
-       raw_local_irq_save(flags);                                      \
-                                                                       \
-       asm volatile (                                                  \
-       "       ldw             %0, (%3) \n"                            \
-       "       mov             %1, %0   \n"                            \
-       "       " #op "         %0, %2   \n"                            \
-       "       stw             %0, (%3) \n"                            \
-               : "=&r" (tmp), "=&r" (ret)                              \
-               : "r" (i), "r"(&v->counter)                             \
-               : "memory");                                            \
-                                                                       \
-       raw_local_irq_restore(flags);                                   \
-                                                                       \
-       return ret;                                                     \
-}
-
-#endif /* CONFIG_CPU_HAS_LDSTEX */
-
-#define atomic_add_return atomic_add_return
-ATOMIC_OP_RETURN(add, +)
-#define atomic_sub_return atomic_sub_return
-ATOMIC_OP_RETURN(sub, -)
-
-#define atomic_fetch_add atomic_fetch_add
-ATOMIC_FETCH_OP(add, +)
-#define atomic_fetch_sub atomic_fetch_sub
-ATOMIC_FETCH_OP(sub, -)
-#define atomic_fetch_and atomic_fetch_and
-ATOMIC_FETCH_OP(and, &)
-#define atomic_fetch_or atomic_fetch_or
-ATOMIC_FETCH_OP(or, |)
-#define atomic_fetch_xor atomic_fetch_xor
-ATOMIC_FETCH_OP(xor, ^)
-
-#define atomic_and atomic_and
-ATOMIC_OP(and, &)
-#define atomic_or atomic_or
-ATOMIC_OP(or, |)
-#define atomic_xor atomic_xor
-ATOMIC_OP(xor, ^)
-
-#undef ATOMIC_FETCH_OP
-#undef ATOMIC_OP_RETURN
-#undef ATOMIC_OP
-
-#include <asm-generic/atomic.h>
-
-#endif /* __ASM_CSKY_ATOMIC_H */
index a430e7fddf350cff5e5f24b5719b66b54b3da426..117e6224defa3f6c64c47f4f32373dad5a86a9dc 100644 (file)
@@ -8,6 +8,61 @@
 
 #define nop()  asm volatile ("nop\n":::"memory")
 
+#ifdef CONFIG_SMP
+
+/*
+ * bar.brwarws: ordering barrier for all load/store instructions
+ *              before/after
+ *
+ * |31|30 26|25 21|20 16|15  10|9   5|4           0|
+ *  1  10000 00000 00000 100001        00001 0 bw br aw ar
+ *
+ * b: before
+ * a: after
+ * r: read
+ * w: write
+ *
+ * Here are all combinations:
+ *
+ * bar.brw
+ * bar.br
+ * bar.bw
+ * bar.arw
+ * bar.ar
+ * bar.aw
+ * bar.brwarw
+ * bar.brarw
+ * bar.bwarw
+ * bar.brwar
+ * bar.brwaw
+ * bar.brar
+ * bar.bwaw
+ */
+#define __bar_brw()    asm volatile (".long 0x842cc000\n":::"memory")
+#define __bar_br()     asm volatile (".long 0x8424c000\n":::"memory")
+#define __bar_bw()     asm volatile (".long 0x8428c000\n":::"memory")
+#define __bar_arw()    asm volatile (".long 0x8423c000\n":::"memory")
+#define __bar_ar()     asm volatile (".long 0x8421c000\n":::"memory")
+#define __bar_aw()     asm volatile (".long 0x8422c000\n":::"memory")
+#define __bar_brwarw() asm volatile (".long 0x842fc000\n":::"memory")
+#define __bar_brarw()  asm volatile (".long 0x8427c000\n":::"memory")
+#define __bar_bwarw()  asm volatile (".long 0x842bc000\n":::"memory")
+#define __bar_brwar()  asm volatile (".long 0x842dc000\n":::"memory")
+#define __bar_brwaw()  asm volatile (".long 0x842ec000\n":::"memory")
+#define __bar_brar()   asm volatile (".long 0x8425c000\n":::"memory")
+#define __bar_brar()   asm volatile (".long 0x8425c000\n":::"memory")
+#define __bar_bwaw()   asm volatile (".long 0x842ac000\n":::"memory")
+
+#define __smp_mb()     __bar_brwarw()
+#define __smp_rmb()    __bar_brar()
+#define __smp_wmb()    __bar_bwaw()
+
+#define ACQUIRE_FENCE          ".long 0x8427c000\n"
+#define __smp_acquire_fence()  __bar_brarw()
+#define __smp_release_fence()  __bar_brwaw()
+
+#endif /* CONFIG_SMP */
+
 /*
  * sync:        completion barrier, all sync.xx instructions
  *              guarantee the last response recieved by bus transaction
  * sync.s:      inherit from sync, but also shareable to other cores
  * sync.i:      inherit from sync, but also flush cpu pipeline
  * sync.is:     the same with sync.i + sync.s
- *
- * bar.brwarw:  ordering barrier for all load/store instructions before it
- * bar.brwarws: ordering barrier for all load/store instructions before it
- *                                             and shareable to other cores
- * bar.brar:    ordering barrier for all load       instructions before it
- * bar.brars:   ordering barrier for all load       instructions before it
- *                                             and shareable to other cores
- * bar.bwaw:    ordering barrier for all store      instructions before it
- * bar.bwaws:   ordering barrier for all store      instructions before it
- *                                             and shareable to other cores
  */
+#define mb()           asm volatile ("sync\n":::"memory")
 
 #ifdef CONFIG_CPU_HAS_CACHEV2
-#define mb()           asm volatile ("sync.s\n":::"memory")
-
-#ifdef CONFIG_SMP
-#define __smp_mb()     asm volatile ("bar.brwarws\n":::"memory")
-#define __smp_rmb()    asm volatile ("bar.brars\n":::"memory")
-#define __smp_wmb()    asm volatile ("bar.bwaws\n":::"memory")
-#endif /* CONFIG_SMP */
-
-#define sync_is()      asm volatile ("sync.is\n":::"memory")
-
-#else /* !CONFIG_CPU_HAS_CACHEV2 */
-#define mb()           asm volatile ("sync\n":::"memory")
+/*
+ * Using three sync.is to prevent speculative PTW
+ */
+#define sync_is()      asm volatile ("sync.is\nsync.is\nsync.is\n":::"memory")
 #endif
 
 #include <asm-generic/barrier.h>
index 89224530a0eed162e970bbc877576d7d899c1535..dabc8e46ce7b400c484de3118a4c7b3c4d6320d2 100644 (file)
@@ -3,12 +3,12 @@
 #ifndef __ASM_CSKY_CMPXCHG_H
 #define __ASM_CSKY_CMPXCHG_H
 
-#ifdef CONFIG_CPU_HAS_LDSTEX
+#ifdef CONFIG_SMP
 #include <asm/barrier.h>
 
 extern void __bad_xchg(void);
 
-#define __xchg(new, ptr, size)                                 \
+#define __xchg_relaxed(new, ptr, size)                         \
 ({                                                             \
        __typeof__(ptr) __ptr = (ptr);                          \
        __typeof__(new) __new = (new);                          \
@@ -16,7 +16,6 @@ extern void __bad_xchg(void);
        unsigned long tmp;                                      \
        switch (size) {                                         \
        case 4:                                                 \
-               smp_mb();                                       \
                asm volatile (                                  \
                "1:     ldex.w          %0, (%3) \n"            \
                "       mov             %1, %2   \n"            \
@@ -25,7 +24,6 @@ extern void __bad_xchg(void);
                        : "=&r" (__ret), "=&r" (tmp)            \
                        : "r" (__new), "r"(__ptr)               \
                        :);                                     \
-               smp_mb();                                       \
                break;                                          \
        default:                                                \
                __bad_xchg();                                   \
@@ -33,9 +31,10 @@ extern void __bad_xchg(void);
        __ret;                                                  \
 })
 
-#define xchg(ptr, x)   (__xchg((x), (ptr), sizeof(*(ptr))))
+#define xchg_relaxed(ptr, x) \
+               (__xchg_relaxed((x), (ptr), sizeof(*(ptr))))
 
-#define __cmpxchg(ptr, old, new, size)                         \
+#define __cmpxchg_relaxed(ptr, old, new, size)                 \
 ({                                                             \
        __typeof__(ptr) __ptr = (ptr);                          \
        __typeof__(new) __new = (new);                          \
@@ -44,7 +43,6 @@ extern void __bad_xchg(void);
        __typeof__(*(ptr)) __ret;                               \
        switch (size) {                                         \
        case 4:                                                 \
-               smp_mb();                                       \
                asm volatile (                                  \
                "1:     ldex.w          %0, (%3) \n"            \
                "       cmpne           %0, %4   \n"            \
@@ -56,7 +54,6 @@ extern void __bad_xchg(void);
                        : "=&r" (__ret), "=&r" (__tmp)          \
                        : "r" (__new), "r"(__ptr), "r"(__old)   \
                        :);                                     \
-               smp_mb();                                       \
                break;                                          \
        default:                                                \
                __bad_xchg();                                   \
@@ -64,8 +61,18 @@ extern void __bad_xchg(void);
        __ret;                                                  \
 })
 
-#define cmpxchg(ptr, o, n) \
-       (__cmpxchg((ptr), (o), (n), sizeof(*(ptr))))
+#define cmpxchg_relaxed(ptr, o, n) \
+       (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
+
+#define cmpxchg(ptr, o, n)                                     \
+({                                                             \
+       __typeof__(*(ptr)) __ret;                               \
+       __smp_release_fence();                                  \
+       __ret = cmpxchg_relaxed(ptr, o, n);                     \
+       __smp_acquire_fence();                                  \
+       __ret;                                                  \
+})
+
 #else
 #include <asm-generic/cmpxchg.h>
 #endif
diff --git a/arch/csky/include/asm/futex.h b/arch/csky/include/asm/futex.h
new file mode 100644 (file)
index 0000000..dbe2f99
--- /dev/null
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_FUTEX_H
+#define __ASM_CSKY_FUTEX_H
+
+#ifndef CONFIG_SMP
+#include <asm-generic/futex.h>
+#else
+#include <linux/atomic.h>
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg)             \
+{                                                                      \
+       u32 tmp;                                                        \
+                                                                       \
+       __atomic_pre_full_fence();                                      \
+                                                                       \
+       __asm__ __volatile__ (                                          \
+       "1:     ldex.w  %[ov], %[u]                     \n"             \
+       "       "insn"                                  \n"             \
+       "2:     stex.w  %[t], %[u]                      \n"             \
+       "       bez     %[t], 1b                        \n"             \
+       "4:                                             \n"             \
+       "       .section .fixup,\"ax\"                  \n"             \
+       "       .balign 4                               \n"             \
+       "5:     mov %[r], %[e]                          \n"             \
+       "       jmpi 4b                                 \n"             \
+       "       .previous                               \n"             \
+       "       .section __ex_table,\"a\"               \n"             \
+       "       .balign 4                               \n"             \
+       "       .long   1b, 5b                          \n"             \
+       "       .long   2b, 5b                          \n"             \
+       "       .previous                               \n"             \
+       : [r] "+r" (ret), [ov] "=&r" (oldval),                          \
+         [u] "+m" (*uaddr), [t] "=&r" (tmp)                            \
+       : [op] "Jr" (oparg), [e] "jr" (-EFAULT)                         \
+       : "memory");                                                    \
+                                                                       \
+       __atomic_post_full_fence();                                     \
+}
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+       int oldval = 0, ret = 0;
+
+       if (!access_ok(uaddr, sizeof(u32)))
+               return -EFAULT;
+
+       switch (op) {
+       case FUTEX_OP_SET:
+               __futex_atomic_op("mov %[t], %[ov]",
+                                 ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_ADD:
+               __futex_atomic_op("add %[t], %[ov], %[op]",
+                                 ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_OR:
+               __futex_atomic_op("or %[t], %[ov], %[op]",
+                                 ret, oldval, uaddr, oparg);
+               break;
+       case FUTEX_OP_ANDN:
+               __futex_atomic_op("and %[t], %[ov], %[op]",
+                                 ret, oldval, uaddr, ~oparg);
+               break;
+       case FUTEX_OP_XOR:
+               __futex_atomic_op("xor %[t], %[ov], %[op]",
+                                 ret, oldval, uaddr, oparg);
+               break;
+       default:
+               ret = -ENOSYS;
+       }
+
+       if (!ret)
+               *oval = oldval;
+
+       return ret;
+}
+
+
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+                             u32 oldval, u32 newval)
+{
+       int ret = 0;
+       u32 val, tmp;
+
+       if (!access_ok(uaddr, sizeof(u32)))
+               return -EFAULT;
+
+       __atomic_pre_full_fence();
+
+       __asm__ __volatile__ (
+       "1:     ldex.w  %[v], %[u]                      \n"
+       "       cmpne   %[v], %[ov]                     \n"
+       "       bt      4f                              \n"
+       "       mov     %[t], %[nv]                     \n"
+       "2:     stex.w  %[t], %[u]                      \n"
+       "       bez     %[t], 1b                        \n"
+       "4:                                             \n"
+       "       .section .fixup,\"ax\"                  \n"
+       "       .balign 4                               \n"
+       "5:     mov %[r], %[e]                          \n"
+       "       jmpi 4b                                 \n"
+       "       .previous                               \n"
+       "       .section __ex_table,\"a\"               \n"
+       "       .balign 4                               \n"
+       "       .long   1b, 5b                          \n"
+       "       .long   2b, 5b                          \n"
+       "       .previous                               \n"
+       : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr),
+         [t] "=&r" (tmp)
+       : [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "Jr" (-EFAULT)
+       : "memory");
+
+       __atomic_post_full_fence();
+
+       *uval = val;
+       return ret;
+}
+
+#endif /* CONFIG_SMP */
+#endif /* __ASM_CSKY_FUTEX_H */
index a65c6759f53753307774b38d7e22d02939f6bde5..d12179801ae3cff612749a3624c2bc2ed3b95404 100644 (file)
@@ -10,7 +10,7 @@
 
 #define FIXADDR_TOP    _AC(0xffffc000, UL)
 #define PKMAP_BASE     _AC(0xff800000, UL)
-#define VMALLOC_START  _AC(0xc0008000, UL)
+#define VMALLOC_START  (PAGE_OFFSET + LOWMEM_LIMIT + (PAGE_SIZE * 8))
 #define VMALLOC_END    (PKMAP_BASE - (PAGE_SIZE * 2))
 
 #ifdef CONFIG_HAVE_TCM
index b227d29393a8fc5b92c6ea753d7444f4ea6701f2..3767dbffd02f4f2fc3b7bb38a7c111fb4c7d1b05 100644 (file)
 #include <linux/sched.h>
 #include <abi/ckmmu.h>
 
-#define TLBMISS_HANDLER_SETUP_PGD(pgd) \
-       setup_pgd(__pa(pgd), false)
-
-#define TLBMISS_HANDLER_SETUP_PGD_KERNEL(pgd) \
-       setup_pgd(__pa(pgd), true)
-
 #define ASID_MASK              ((1 << CONFIG_CPU_ASID_BITS) - 1)
 #define cpu_asid(mm)           (atomic64_read(&mm->context.asid) & ASID_MASK)
 
@@ -36,7 +30,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
        if (prev != next)
                check_and_switch_context(next, cpu);
 
-       TLBMISS_HANDLER_SETUP_PGD(next->pgd);
+       setup_pgd(next->pgd);
        write_mmu_entryhi(next->context.asid.counter);
 
        flush_icache_deferred(next);
index 9b98bf31d57cec0aae47a219dbe6b3fd2db8c263..3b91fc3cf36f9e5b12d1dfdade4a59684f9729b1 100644 (file)
@@ -24,7 +24,7 @@
  * address region. We use them mapping kernel 1GB direct-map address area and
  * for more than 1GB of memory we use highmem.
  */
-#define PAGE_OFFSET    0x80000000
+#define PAGE_OFFSET    CONFIG_PAGE_OFFSET
 #define SSEG_SIZE      0x20000000
 #define LOWMEM_LIMIT   (SSEG_SIZE * 2)
 
index d58d8146b729c4f8259a2bc1ce2cd805e5b39f41..33878c4aaa60de0ca694fc1986dec96cc3c8abe3 100644 (file)
@@ -71,7 +71,7 @@ do {                                                  \
 } while (0)
 
 extern void pagetable_init(void);
-extern void pre_mmu_init(void);
+extern void mmu_init(unsigned long min_pfn, unsigned long max_pfn);
 extern void pre_trap_init(void);
 
 #endif /* __ASM_CSKY_PGALLOC_H */
index 2002cb7f105361e1fd57fa8037afbb44da239325..6ec97af0d1ff6d1fde0439860b01f4cac6e92bcd 100644 (file)
@@ -14,7 +14,7 @@
 #define PGDIR_SIZE             (1UL << PGDIR_SHIFT)
 #define PGDIR_MASK             (~(PGDIR_SIZE-1))
 
-#define USER_PTRS_PER_PGD      (0x80000000UL/PGDIR_SIZE)
+#define USER_PTRS_PER_PGD      (PAGE_OFFSET/PGDIR_SIZE)
 #define FIRST_USER_ADDRESS     0UL
 
 /*
index 4800f6563abbfb74ec346741d358cb36daea8eae..3b4be4cb2ad07e31e1d3df6a8f7658649a9890e0 100644 (file)
@@ -28,7 +28,7 @@ extern struct cpuinfo_csky cpu_data[];
  * for a 64 bit kernel expandable to 8192EB, of which the current CSKY
  * implementations will "only" be able to use 1TB ...
  */
-#define TASK_SIZE       0x7fff8000UL
+#define TASK_SIZE      (PAGE_OFFSET - (PAGE_SIZE * 8))
 
 #ifdef __KERNEL__
 #define STACK_TOP       TASK_SIZE
index 79ede9b1a6467f7d56fedc63803db1248d157714..66d5e0f4f0e09488e5cc387a9b7aea7bc28e4d24 100644 (file)
@@ -10,7 +10,7 @@ typedef struct {
 
 #define KERNEL_DS              ((mm_segment_t) { 0xFFFFFFFF })
 
-#define USER_DS                        ((mm_segment_t) { 0x80000000UL })
+#define USER_DS                        ((mm_segment_t) { PAGE_OFFSET })
 #define get_fs()               (current_thread_info()->addr_limit)
 #define set_fs(x)              (current_thread_info()->addr_limit = (x))
 #define uaccess_kernel()       (get_fs().seg == KERNEL_DS.seg)
index 7cf3f2b34ceaff1c00f8120d84680697e9a76fc5..69f5aa249c5f9004610b189130ce93c8a58a558b 100644 (file)
@@ -6,8 +6,6 @@
 #include <linux/spinlock_types.h>
 #include <asm/barrier.h>
 
-#ifdef CONFIG_QUEUED_RWLOCKS
-
 /*
  * Ticket-based spin-locking.
  */
@@ -88,169 +86,4 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
 
 #include <asm/qrwlock.h>
 
-/* See include/linux/spinlock.h */
-#define smp_mb__after_spinlock()       smp_mb()
-
-#else /* CONFIG_QUEUED_RWLOCKS */
-
-/*
- * Test-and-set spin-locking.
- */
-static inline void arch_spin_lock(arch_spinlock_t *lock)
-{
-       u32 *p = &lock->lock;
-       u32 tmp;
-
-       asm volatile (
-               "1:     ldex.w          %0, (%1) \n"
-               "       bnez            %0, 1b   \n"
-               "       movi            %0, 1    \n"
-               "       stex.w          %0, (%1) \n"
-               "       bez             %0, 1b   \n"
-               : "=&r" (tmp)
-               : "r"(p)
-               : "cc");
-       smp_mb();
-}
-
-static inline void arch_spin_unlock(arch_spinlock_t *lock)
-{
-       smp_mb();
-       WRITE_ONCE(lock->lock, 0);
-}
-
-static inline int arch_spin_trylock(arch_spinlock_t *lock)
-{
-       u32 *p = &lock->lock;
-       u32 tmp;
-
-       asm volatile (
-               "1:     ldex.w          %0, (%1) \n"
-               "       bnez            %0, 2f   \n"
-               "       movi            %0, 1    \n"
-               "       stex.w          %0, (%1) \n"
-               "       bez             %0, 1b   \n"
-               "       movi            %0, 0    \n"
-               "2:                              \n"
-               : "=&r" (tmp)
-               : "r"(p)
-               : "cc");
-
-       if (!tmp)
-               smp_mb();
-
-       return !tmp;
-}
-
-#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
-
-/*
- * read lock/unlock/trylock
- */
-static inline void arch_read_lock(arch_rwlock_t *lock)
-{
-       u32 *p = &lock->lock;
-       u32 tmp;
-
-       asm volatile (
-               "1:     ldex.w          %0, (%1) \n"
-               "       blz             %0, 1b   \n"
-               "       addi            %0, 1    \n"
-               "       stex.w          %0, (%1) \n"
-               "       bez             %0, 1b   \n"
-               : "=&r" (tmp)
-               : "r"(p)
-               : "cc");
-       smp_mb();
-}
-
-static inline void arch_read_unlock(arch_rwlock_t *lock)
-{
-       u32 *p = &lock->lock;
-       u32 tmp;
-
-       smp_mb();
-       asm volatile (
-               "1:     ldex.w          %0, (%1) \n"
-               "       subi            %0, 1    \n"
-               "       stex.w          %0, (%1) \n"
-               "       bez             %0, 1b   \n"
-               : "=&r" (tmp)
-               : "r"(p)
-               : "cc");
-}
-
-static inline int arch_read_trylock(arch_rwlock_t *lock)
-{
-       u32 *p = &lock->lock;
-       u32 tmp;
-
-       asm volatile (
-               "1:     ldex.w          %0, (%1) \n"
-               "       blz             %0, 2f   \n"
-               "       addi            %0, 1    \n"
-               "       stex.w          %0, (%1) \n"
-               "       bez             %0, 1b   \n"
-               "       movi            %0, 0    \n"
-               "2:                              \n"
-               : "=&r" (tmp)
-               : "r"(p)
-               : "cc");
-
-       if (!tmp)
-               smp_mb();
-
-       return !tmp;
-}
-
-/*
- * write lock/unlock/trylock
- */
-static inline void arch_write_lock(arch_rwlock_t *lock)
-{
-       u32 *p = &lock->lock;
-       u32 tmp;
-
-       asm volatile (
-               "1:     ldex.w          %0, (%1) \n"
-               "       bnez            %0, 1b   \n"
-               "       subi            %0, 1    \n"
-               "       stex.w          %0, (%1) \n"
-               "       bez             %0, 1b   \n"
-               : "=&r" (tmp)
-               : "r"(p)
-               : "cc");
-       smp_mb();
-}
-
-static inline void arch_write_unlock(arch_rwlock_t *lock)
-{
-       smp_mb();
-       WRITE_ONCE(lock->lock, 0);
-}
-
-static inline int arch_write_trylock(arch_rwlock_t *lock)
-{
-       u32 *p = &lock->lock;
-       u32 tmp;
-
-       asm volatile (
-               "1:     ldex.w          %0, (%1) \n"
-               "       bnez            %0, 2f   \n"
-               "       subi            %0, 1    \n"
-               "       stex.w          %0, (%1) \n"
-               "       bez             %0, 1b   \n"
-               "       movi            %0, 0    \n"
-               "2:                              \n"
-               : "=&r" (tmp)
-               : "r"(p)
-               : "cc");
-
-       if (!tmp)
-               smp_mb();
-
-       return !tmp;
-}
-
-#endif /* CONFIG_QUEUED_RWLOCKS */
 #endif /* __ASM_CSKY_SPINLOCK_H */
index 88b82438b1820c0134e035a8b03367cb88ae94a5..8ff0f6ff3a006eb398a542052cc26a9949622815 100644 (file)
@@ -22,16 +22,6 @@ typedef struct {
 
 #define __ARCH_SPIN_LOCK_UNLOCKED      { { 0 } }
 
-#ifdef CONFIG_QUEUED_RWLOCKS
 #include <asm-generic/qrwlock_types.h>
 
-#else /* CONFIG_NR_CPUS > 2 */
-
-typedef struct {
-       u32 lock;
-} arch_rwlock_t;
-
-#define __ARCH_RW_LOCK_UNLOCKED                { 0 }
-
-#endif /* CONFIG_QUEUED_RWLOCKS */
 #endif /* __ASM_CSKY_SPINLOCK_TYPES_H */
index 3821ef9b75672d8a5af90839ffa7f95cfdb4da50..f03db1d8d0603913f219f96a93511b6020760978 100644 (file)
  */
 ENTRY(csky_cmpxchg)
        USPTOKSP
+
+       RD_MEH  a3
+       WR_MEH  a3
+
        mfcr    a3, epc
        addi    a3, TRAP0_SIZE
 
index 5a5cabd076e1664bafa95848e6dc197a8f50d16b..d5f6d04b21a8bd07cde6a215ee07ce85f79eaf4e 100644 (file)
@@ -49,6 +49,7 @@ ENTRY(csky_\name)
 
        RD_PGDR r6
        RD_MEH  a3
+       WR_MEH  a3
 #ifdef CONFIG_CPU_HAS_TLBI
        tlbi.vaas a3
        sync.is
@@ -64,10 +65,11 @@ ENTRY(csky_\name)
        WR_MCIR a2
 #endif
        bclri   r6, 0
+       lrw     a2, PAGE_OFFSET
+       add     r6, a2
        lrw     a2, va_pa_offset
        ld.w    a2, (a2, 0)
        subu    r6, a2
-       bseti   r6, 31
 
        mov     a2, a3
        lsri    a2, _PGDIR_SHIFT
@@ -75,10 +77,11 @@ ENTRY(csky_\name)
        addu    r6, a2
        ldw     r6, (r6)
 
+       lrw     a2, PAGE_OFFSET
+       add     r6, a2
        lrw     a2, va_pa_offset
        ld.w    a2, (a2, 0)
        subu    r6, a2
-       bseti   r6, 31
 
        lsri    a3, PTE_INDX_SHIFT
        lrw     a2, PTE_INDX_MSK
@@ -314,6 +317,9 @@ ENTRY(csky_trap)
 ENTRY(csky_get_tls)
        USPTOKSP
 
+       RD_MEH  a0
+       WR_MEH  a0
+
        /* increase epc for continue */
        mfcr    a0, epc
        addi    a0, TRAP0_SIZE
index 17ed9d2504807dfa385f5da0ef380b635a388c99..7e3e4f15b0523b8f6124776a8291f07354f1dee6 100644 (file)
@@ -21,10 +21,16 @@ END(_start)
 ENTRY(_start_smp_secondary)
        SETUP_MMU
 
-       /* copy msa1 from CPU0 */
-       lrw     r6, secondary_msa1
+#ifdef CONFIG_PAGE_OFFSET_80000000
+       lrw     r6, secondary_msa1
        ld.w    r6, (r6, 0)
        mtcr    r6, cr<31, 15>
+#endif
+
+       lrw     r6, secondary_pgd
+       ld.w    r6, (r6, 0)
+       mtcr    r6, cr<28, 15>
+       mtcr    r6, cr<29, 15>
 
        /* set stack point */
        lrw     r6, secondary_stack
index 1a29f1157449a96e1190b9e65c5017c35c0b44af..55d5a537948373e1c0a3cc29aedee509c4db8917 100644 (file)
@@ -1319,7 +1319,7 @@ int csky_pmu_device_probe(struct platform_device *pdev,
                pr_notice("[perf] PMU request irq fail!\n");
        }
 
-       ret = cpuhp_setup_state(CPUHP_AP_PERF_ONLINE, "AP_PERF_ONLINE",
+       ret = cpuhp_setup_state(CPUHP_AP_PERF_CSKY_ONLINE, "AP_PERF_ONLINE",
                                csky_pmu_starting_cpu,
                                csky_pmu_dying_cpu);
        if (ret) {
index d822144906ac1362d361a959c7c2f1cfec0aa6a8..e5bd4e01b861d895f0d94c6350fbc594e6236965 100644 (file)
@@ -363,9 +363,10 @@ void show_regs(struct pt_regs *fp)
 
        pr_info("PC: 0x%08lx (%pS)\n", (long)fp->pc, (void *)fp->pc);
        pr_info("LR: 0x%08lx (%pS)\n", (long)fp->lr, (void *)fp->lr);
-       pr_info("SP: 0x%08lx\n", (long)fp);
-       pr_info("orig_a0: 0x%08lx\n", fp->orig_a0);
+       pr_info("SP: 0x%08lx\n", (long)fp->usp);
        pr_info("PSR: 0x%08lx\n", (long)fp->sr);
+       pr_info("orig_a0: 0x%08lx\n", fp->orig_a0);
+       pr_info("PT_REGS: 0x%08lx\n", (long)fp);
 
        pr_info(" a0: 0x%08lx   a1: 0x%08lx   a2: 0x%08lx   a3: 0x%08lx\n",
                fp->a0, fp->a1, fp->a2, fp->a3);
index e4cab16056d6067ac2e41490ddad7f6f2f7db15d..e93bc6f74432ba259b48fab50499e9a6b51e2c1f 100644 (file)
@@ -45,13 +45,17 @@ static void __init csky_memblock_init(void)
 
        if (size >= lowmem_size) {
                max_low_pfn = min_low_pfn + lowmem_size;
+#ifdef CONFIG_PAGE_OFFSET_80000000
                write_mmu_msa1(read_mmu_msa0() + SSEG_SIZE);
+#endif
        } else if (size > sseg_size) {
                max_low_pfn = min_low_pfn + sseg_size;
        }
 
        max_zone_pfn[ZONE_NORMAL] = max_low_pfn;
 
+       mmu_init(min_low_pfn, max_low_pfn);
+
 #ifdef CONFIG_HIGHMEM
        max_zone_pfn[ZONE_HIGHMEM] = max_pfn;
 
@@ -101,16 +105,26 @@ void __init setup_arch(char **cmdline_p)
 unsigned long va_pa_offset;
 EXPORT_SYMBOL(va_pa_offset);
 
+static inline unsigned long read_mmu_msa(void)
+{
+#ifdef CONFIG_PAGE_OFFSET_80000000
+       return read_mmu_msa0();
+#endif
+
+#ifdef CONFIG_PAGE_OFFSET_A0000000
+       return read_mmu_msa1();
+#endif
+}
+
 asmlinkage __visible void __init csky_start(unsigned int unused,
                                            void *dtb_start)
 {
        /* Clean up bss section */
        memset(__bss_start, 0, __bss_stop - __bss_start);
 
-       va_pa_offset = read_mmu_msa0() & ~(SSEG_SIZE - 1);
+       va_pa_offset = read_mmu_msa() & ~(SSEG_SIZE - 1);
 
        pre_trap_init();
-       pre_mmu_init();
 
        if (dtb_start == NULL)
                early_init_dt_scan(__dtb_start);
index 041d0de6a1b6771de342bf1ed74292f93f87d872..0f9f5eef9338695b0b43dab09d198c70b2962395 100644 (file)
@@ -203,8 +203,8 @@ volatile unsigned int secondary_hint;
 volatile unsigned int secondary_hint2;
 volatile unsigned int secondary_ccr;
 volatile unsigned int secondary_stack;
-
-unsigned long secondary_msa1;
+volatile unsigned int secondary_msa1;
+volatile unsigned int secondary_pgd;
 
 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
@@ -216,6 +216,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
        secondary_hint2 = mfcr("cr<21, 1>");
        secondary_ccr  = mfcr("cr18");
        secondary_msa1 = read_mmu_msa1();
+       secondary_pgd = mfcr("cr<29, 15>");
 
        /*
         * Because other CPUs are in reset status, we must flush data
@@ -262,8 +263,6 @@ void csky_start_secondary(void)
 
        flush_tlb_all();
        write_mmu_pagemask(0);
-       TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);
-       TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir);
 
 #ifdef CONFIG_CPU_HAS_FPU
        init_fpu();
index f03033e17c29e76462bc0ea166957ee4a3119779..e8b1a4a497980a2bef01497fa07426267d85b75b 100644 (file)
@@ -33,6 +33,7 @@ SECTIONS
 
        .text : AT(ADDR(.text) - LOAD_OFFSET) {
                _text = .;
+               VBR_BASE
                IRQENTRY_TEXT
                SOFTIRQENTRY_TEXT
                TEXT_TEXT
@@ -104,7 +105,6 @@ SECTIONS
 
        EXCEPTION_TABLE(L1_CACHE_BYTES)
        BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES)
-       VBR_BASE
        _end = . ;
 
        STABS_DEBUG
index 081b178b41b14c5c2cac1ea2fd1f78067d60b363..94eac13b9c97b46669c682df5f01d98de699aa0d 100644 (file)
@@ -59,7 +59,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
 
        si_code = SEGV_MAPERR;
 
-#ifndef CONFIG_CPU_HAS_TLBI
        /*
         * We fault-in kernel-space virtual memory on-demand. The
         * 'reference' page table is init_mm.pgd.
@@ -84,10 +83,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
                pmd_t *pmd, *pmd_k;
                pte_t *pte_k;
 
-               unsigned long pgd_base;
-
-               pgd_base = (unsigned long)__va(get_pgd());
-               pgd = (pgd_t *)pgd_base + offset;
+               pgd = get_pgd() + offset;
                pgd_k = init_mm.pgd + offset;
 
                if (!pgd_present(*pgd_k))
@@ -110,7 +106,6 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
                        goto no_context;
                return;
        }
-#endif
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
        /*
index af627128314f81867a10af034e1923936a59f195..7742f1441a67d326d64867fc7c7ef14cd52ac293 100644 (file)
 #include <asm/mmu_context.h>
 #include <asm/sections.h>
 #include <asm/tlb.h>
+#include <asm/cacheflush.h>
 
 pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
 pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
+pte_t kernel_pte_tables[(PTRS_PER_PGD - USER_PTRS_PER_PGD)*PTRS_PER_PTE] __page_aligned_bss;
+
 EXPORT_SYMBOL(invalid_pte_table);
 unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
                                                __page_aligned_bss;
@@ -130,20 +133,32 @@ void pgd_init(unsigned long *p)
 
        for (i = 0; i < PTRS_PER_PGD; i++)
                p[i] = __pa(invalid_pte_table);
+
+       flush_tlb_all();
+       local_icache_inv_all(NULL);
 }
 
-void __init pre_mmu_init(void)
+void __init mmu_init(unsigned long min_pfn, unsigned long max_pfn)
 {
-       /*
-        * Setup page-table and enable TLB-hardrefill
-        */
+       int i;
+
+       for (i = 0; i < USER_PTRS_PER_PGD; i++)
+               swapper_pg_dir[i].pgd = __pa(invalid_pte_table);
+
+       for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++)
+               swapper_pg_dir[i].pgd =
+                       __pa(kernel_pte_tables + (PTRS_PER_PTE * (i - USER_PTRS_PER_PGD)));
+
+       for (i = min_pfn; i < max_pfn; i++)
+               set_pte(&kernel_pte_tables[i - PFN_DOWN(va_pa_offset)], pfn_pte(i, PAGE_KERNEL));
+
        flush_tlb_all();
-       pgd_init((unsigned long *)swapper_pg_dir);
-       TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir);
-       TLBMISS_HANDLER_SETUP_PGD_KERNEL(swapper_pg_dir);
+       local_icache_inv_all(NULL);
 
        /* Setup page mask to 4k */
        write_mmu_pagemask(0);
+
+       setup_pgd(swapper_pg_dir);
 }
 
 void __init fixrange_init(unsigned long start, unsigned long end,
index b5e14d513e6229a12698683d66062a4820482ba6..2b9cbaf41cd00b6534438c0d12d0040d341c16e4 100644 (file)
@@ -97,4 +97,8 @@ config OFFSET
         hex "Load offset"
        default 0
 
+config BOOT_LINK_OFFSET
+       hex     "zImage link offset"
+       default 0x200000
+
 endmenu
index d1733805ea67a5aa68a06631ea71512b0c132efc..f9731fe8c7d09d30c9c8f9a012b746fee7f97bc9 100644 (file)
@@ -61,7 +61,7 @@
                compatible = "renesas,h8s-intc", "renesas,h8300-intc";
                #interrupt-cells = <2>;
                interrupt-controller;
-               reg = <0xfffe00 24>;
+               reg = <0xfffe00 24>, <0xffff30 6>;
        };
 
        bsc: memory-controller@fffec0 {
@@ -79,7 +79,7 @@
        timer8: timer@ffffb0 {
                compatible = "renesas,8bit-timer";
                reg = <0xffffb0 10>;
-               interrupts = <72 0>;
+               interrupts = <72 0>, <73 0>, <74 0>;
                clocks = <&fclk>;
                clock-names = "fck";
        };
                clocks = <&fclk>;
                clock-names = "fck";
        };
+       ethernet: ethernet@f80000 {
+               compatible = "smsc,lan91c94";
+               reg = <0xf80000 0xfbffff>;
+               reg-io-width = <1>;
+               interrupts = <16 0>;
+       };
 };
index 595398b9d0180a805c0c70fc5a4d0501cdf76504..e1d4d9b7f6b40c04078e29c3592ac5f0f7d50319 100644 (file)
@@ -8,7 +8,7 @@
 
        chosen {
                bootargs = "earlyprintk=h8300-sim";
-               stdout-path = <&sci0>;
+               stdout-path = &sci0;
        };
        aliases {
                serial0 = &sci0;
index 932cc3c5a81bcdd2753681d671c5214c19d2fff5..4848e40e607ecc1da6d29f5dbc6a892d4e93d595 100644 (file)
@@ -8,7 +8,7 @@
 
        chosen {
                bootargs = "earlyprintk=h8300-sim";
-               stdout-path = <&sci0>;
+               stdout-path = &sci0;
        };
        aliases {
                serial0 = &sci0;
index 23791dcf6c259894f2333c99e9e6a79b872d13ac..bcf2edb8fff944b07dbb35e8674bc07d082ebaf4 100644 (file)
@@ -1,9 +1,7 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_USELIB is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSFS_SYSCALL is not set
-# CONFIG_KALLSYMS is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
@@ -12,17 +10,17 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_EVENTFD is not set
 # CONFIG_AIO is not set
 # CONFIG_ADVISE_SYSCALLS is not set
+# CONFIG_KALLSYMS is not set
 CONFIG_EMBEDDED=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLOB=y
+CONFIG_BOOT_LINK_OFFSET=0x400000
+CONFIG_H8S_EDOSK2674=y
 # CONFIG_BLOCK is not set
-CONFIG_H8S_SIM=y
-CONFIG_H8300_BUILTIN_DTB="h8s_sim"
 # CONFIG_BINFMT_SCRIPT is not set
 CONFIG_BINFMT_FLAT=y
 # CONFIG_COREDUMP is not set
-# CONFIG_UEVENT_HELPER is not set
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FW_LOADER is not set
@@ -32,9 +30,7 @@ CONFIG_BINFMT_FLAT=y
 # CONFIG_VT is not set
 # CONFIG_UNIX98_PTYS is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 # CONFIG_USB_SUPPORT is not set
index 7fc9c2f0acc00b066977b0d83e5a60811e891201..1b90399758f3a593c4d17774c34a13b86e9260dd 100644 (file)
@@ -1,9 +1,7 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_USELIB is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSFS_SYSCALL is not set
-# CONFIG_KALLSYMS is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
@@ -12,17 +10,17 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_EVENTFD is not set
 # CONFIG_AIO is not set
 # CONFIG_ADVISE_SYSCALLS is not set
+# CONFIG_KALLSYMS is not set
 CONFIG_EMBEDDED=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLOB=y
-# CONFIG_BLOCK is not set
 CONFIG_H8300H_SIM=y
 CONFIG_H8300_BUILTIN_DTB="h8300h_sim"
+# CONFIG_BLOCK is not set
 # CONFIG_BINFMT_SCRIPT is not set
 CONFIG_BINFMT_FLAT=y
 # CONFIG_COREDUMP is not set
-# CONFIG_UEVENT_HELPER is not set
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FW_LOADER is not set
@@ -32,9 +30,7 @@ CONFIG_BINFMT_FLAT=y
 # CONFIG_VT is not set
 # CONFIG_UNIX98_PTYS is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_EARLYCON=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 # CONFIG_USB_SUPPORT is not set
index 23791dcf6c259894f2333c99e9e6a79b872d13ac..4d46adcc21a483c2de70688ddab77f898181232a 100644 (file)
@@ -1,9 +1,7 @@
 # CONFIG_LOCALVERSION_AUTO is not set
-# CONFIG_USELIB is not set
 CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_UID16 is not set
 # CONFIG_SYSFS_SYSCALL is not set
-# CONFIG_KALLSYMS is not set
 # CONFIG_BASE_FULL is not set
 # CONFIG_FUTEX is not set
 # CONFIG_EPOLL is not set
@@ -12,17 +10,17 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 # CONFIG_EVENTFD is not set
 # CONFIG_AIO is not set
 # CONFIG_ADVISE_SYSCALLS is not set
+# CONFIG_KALLSYMS is not set
 CONFIG_EMBEDDED=y
 # CONFIG_VM_EVENT_COUNTERS is not set
 # CONFIG_COMPAT_BRK is not set
 CONFIG_SLOB=y
-# CONFIG_BLOCK is not set
 CONFIG_H8S_SIM=y
 CONFIG_H8300_BUILTIN_DTB="h8s_sim"
+# CONFIG_BLOCK is not set
 # CONFIG_BINFMT_SCRIPT is not set
 CONFIG_BINFMT_FLAT=y
 # CONFIG_COREDUMP is not set
-# CONFIG_UEVENT_HELPER is not set
 # CONFIG_STANDALONE is not set
 # CONFIG_PREVENT_FIRMWARE_BUILD is not set
 # CONFIG_FW_LOADER is not set
@@ -32,9 +30,7 @@ CONFIG_BINFMT_FLAT=y
 # CONFIG_VT is not set
 # CONFIG_UNIX98_PTYS is not set
 # CONFIG_LEGACY_PTYS is not set
-# CONFIG_DEVKMEM is not set
 CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
 # CONFIG_HW_RANDOM is not set
 # CONFIG_HWMON is not set
 # CONFIG_USB_SUPPORT is not set
index 0281f92eea3d98de9a5a67104ac09f3363e9a81f..15280af7251c16a738373c6d94c4613ae4fc71a6 100644 (file)
@@ -24,7 +24,7 @@
 #include <linux/of.h>
 #include <linux/of_fdt.h>
 #include <linux/of_address.h>
-#include <linux/clk-provider.h>
+#include <linux/of_clk.h>
 #include <linux/memblock.h>
 #include <linux/screen_info.h>
 #include <linux/clocksource.h>
index 2d1abc37fd08b0a78856190fdb00e41f4a264fca..df873779bb5e00e6a12d3c034eafec00de8f7cfb 100644 (file)
 ;; c   = er1(r1l)
 ;; count = er2
 memset:
-       btst    #0,r0l
+       mov.l   er4,@-sp
+       mov.l   er0,er4
+       btst    #0,r4l
        beq     2f
 
        ;; odd address
 1:
-       mov.b   r1l,@er0
-       adds    #1,er0
+       mov.b   r1l,@er4
+       adds    #1,er4
        dec.l   #1,er2
        beq     6f
 
@@ -46,8 +48,8 @@ memset:
        mov.b   r1l,r1h
        mov.w   r1,e1
 3:
-       mov.l   er1,@er0
-       adds    #4,er0
+       mov.l   er1,@er4
+       adds    #4,er4
        dec.l   #1,er2
        bne     3b
 4:
@@ -55,11 +57,12 @@ memset:
        and.b   #3,r3l
        beq     6f
 5:
-       mov.b   r1l,@er0
-       adds    #1,er0
+       mov.b   r1l,@er4
+       adds    #1,er4
        dec.b   r3l
        bne     5b
 6:
+       mov.l   @sp+,er4
        rts
 
 clear_user:
index bfc00f2bd437236823e16abf22f68ca1bc6bebdb..fad4f7fcb12183a57d87fa8f904696b7fe83c762 100644 (file)
 439    common  faccessat2                      sys_faccessat2
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2
+442    common  watch_mount                     sys_watch_mount
index 7fe4e45c864c4f36ca7cdd6f410590541f5e6429..7e33c94f5bc325ec3bce6d81c84985fd3dfda30b 100644 (file)
 439    common  faccessat2                      sys_faccessat2
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2
+442    common  watch_mount                     sys_watch_mount
index a522adf194abe3a3cd982a92888b58365d3858f7..bc08378ad9b65054dea9f9a27a54903abec2b745 100644 (file)
 439    common  faccessat2                      sys_faccessat2
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2
+442    common  watch_mount                     sys_watch_mount
index 0f03ad223f332cfe755c019fada0fbbaf958149c..0272769fa0fde612df9339535c7c2448db8ceb79 100644 (file)
 439    n32     faccessat2                      sys_faccessat2
 440    n32     process_madvise                 sys_process_madvise
 441    n32     epoll_pwait2                    compat_sys_epoll_pwait2
+442    n32     watch_mount                     sys_watch_mount
index 91649690b52f1b25ac229ad4e224366861ab66b9..fdb16671dc2fcdcfbcf29e22389f9975e5967fdf 100644 (file)
 439    n64     faccessat2                      sys_faccessat2
 440    n64     process_madvise                 sys_process_madvise
 441    n64     epoll_pwait2                    sys_epoll_pwait2
+442    n64     watch_mount                     sys_watch_mount
index 4bad0c40aed6bcfa46294cd0be3d3566137085e0..28e99cf4b5f244ca4833f6ed529bf78d42c47515 100644 (file)
 439    o32     faccessat2                      sys_faccessat2
 440    o32     process_madvise                 sys_process_madvise
 441    o32     epoll_pwait2                    sys_epoll_pwait2                compat_sys_epoll_pwait2
+442    o32     watch_mount                     sys_watch_mount
index 40313a63507570b6088ebce45719b04134186fd7..f9a89cf00aa696134ef64322fef1505bcf5f8302 100644 (file)
@@ -1,4 +1,3 @@
-CONFIG_CROSS_COMPILE="nds32le-linux-"
 CONFIG_SYSVIPC=y
 CONFIG_POSIX_MQUEUE=y
 CONFIG_HIGH_RES_TIMERS=y
index c356e484dcab3300504078cd17b3759f1db36c6b..af82e996f412ecf898de9097c7a084079a5108ff 100644 (file)
@@ -52,7 +52,7 @@ EXPORT_SYMBOL(elf_hwcap);
 
 /*
  * The following string table, must sync with HWCAP_xx bitmask,
- * which is defined in <asm/procinfo.h>
+ * which is defined above
  */
 static const char *hwcap_str[] = {
        "mfusr_pc",
index ac9d78ce3a818926dd61bd23cddfa10e495b7a1e..574a3d0a853980a9458ed236ad4fcb9bd242890d 100644 (file)
@@ -2,7 +2,7 @@
 // Copyright (C) 2005-2017 Andes Technology Corporation
 
 #include <linux/clocksource.h>
-#include <linux/clk-provider.h>
+#include <linux/of_clk.h>
 
 void __init time_init(void)
 {
index 6a9772ba73927696c05d5a866ef08696bd7c2bba..ee0d9ae192a5041a8a6494b93f9373f5876737fe 100644 (file)
@@ -25,17 +25,8 @@ extern void show_pte(struct mm_struct *mm, unsigned long addr);
 void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
 {
        unsigned long first;
-       mm_segment_t fs;
        int i;
 
-       /*
-        * We need to switch to kernel mode so that we can use __get_user
-        * to safely read from kernel space.  Note that we now dump the
-        * code first, just in case the backtrace kills us.
-        */
-       fs = get_fs();
-       set_fs(KERNEL_DS);
-
        pr_emerg("%s(0x%08lx to 0x%08lx)\n", lvl, bottom, top);
 
        for (first = bottom & ~31; first < top; first += 32) {
@@ -48,7 +39,9 @@ void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
                for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
                        if (p >= bottom && p < top) {
                                unsigned long val;
-                               if (__get_user(val, (unsigned long *)p) == 0)
+
+                               if (get_kernel_nofault(val,
+                                               (unsigned long *)p) == 0)
                                        sprintf(str + i * 9, " %08lx", val);
                                else
                                        sprintf(str + i * 9, " ????????");
@@ -56,46 +49,10 @@ void dump_mem(const char *lvl, unsigned long bottom, unsigned long top)
                }
                pr_emerg("%s%04lx:%s\n", lvl, first & 0xffff, str);
        }
-
-       set_fs(fs);
 }
 
 EXPORT_SYMBOL(dump_mem);
 
-static void dump_instr(struct pt_regs *regs)
-{
-       unsigned long addr = instruction_pointer(regs);
-       mm_segment_t fs;
-       char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
-       int i;
-
-       return;
-       /*
-        * We need to switch to kernel mode so that we can use __get_user
-        * to safely read from kernel space.  Note that we now dump the
-        * code first, just in case the backtrace kills us.
-        */
-       fs = get_fs();
-       set_fs(KERNEL_DS);
-
-       pr_emerg("Code: ");
-       for (i = -4; i < 1; i++) {
-               unsigned int val, bad;
-
-               bad = __get_user(val, &((u32 *) addr)[i]);
-
-               if (!bad) {
-                       p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
-               } else {
-                       p += sprintf(p, "bad PC value");
-                       break;
-               }
-       }
-       pr_emerg("Code: %s\n", str);
-
-       set_fs(fs);
-}
-
 #define LOOP_TIMES (100)
 static void __dump(struct task_struct *tsk, unsigned long *base_reg,
                   const char *loglvl)
@@ -179,7 +136,6 @@ void die(const char *str, struct pt_regs *regs, int err)
 
        if (!user_mode(regs) || in_interrupt()) {
                dump_mem("Stack: ", regs->sp, (regs->sp + PAGE_SIZE) & PAGE_MASK);
-               dump_instr(regs);
                dump_stack();
        }
 
index 6bcc31966b4460b28a863384f97ea5c97d7b027c..65deefb4ced11cf73574b3f21aa12a9e8c57f81c 100644 (file)
 439    common  faccessat2                      sys_faccessat2
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2                compat_sys_epoll_pwait2
+442    common  watch_mount                     sys_watch_mount
index 349bf3f0c3afa994c5cc0c0ccd88ad4c1df7c8ac..fbc48a500846b0668873a629b0fd52d16f71db71 100644 (file)
@@ -260,9 +260,16 @@ __secondary_hold_acknowledge:
 MachineCheck:
        EXCEPTION_PROLOG_0
 #ifdef CONFIG_PPC_CHRP
+#ifdef CONFIG_VMAP_STACK
+       mtspr   SPRN_SPRG_SCRATCH2,r1
+       mfspr   r1, SPRN_SPRG_THREAD
+       lwz     r1, RTAS_SP(r1)
+       cmpwi   cr1, r1, 0
+#else
        mfspr   r11, SPRN_SPRG_THREAD
        lwz     r11, RTAS_SP(r11)
        cmpwi   cr1, r11, 0
+#endif
        bne     cr1, 7f
 #endif /* CONFIG_PPC_CHRP */
        EXCEPTION_PROLOG_1 for_rtas=1
index f744eb5cba887d398e150e9ef9fbb1293d5fe08f..1d3be10021e9d65f58acf40d650198b93af0df7e 100644 (file)
 439    common  faccessat2                      sys_faccessat2
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2                compat_sys_epoll_pwait2
+442    common  watch_mount                     sys_watch_mount
index d443423495e565e49a8e57527f00dfdd4ffbea17..d1d94dffe24df9e93ed0645492dda7b75a2abaaa 100644 (file)
 439  common    faccessat2              sys_faccessat2                  sys_faccessat2
 440  common    process_madvise         sys_process_madvise             sys_process_madvise
 441  common    epoll_pwait2            sys_epoll_pwait2                compat_sys_epoll_pwait2
+442    common  watch_mount             sys_watch_mount                 sys_watch_mount
index 9df40ac0ebc05f584b43dc23777f027cddeff6b3..6bed66542aa8996180355ba6bb2f0f51d03dcd15 100644 (file)
 439    common  faccessat2                      sys_faccessat2
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2
+442    common  watch_mount                     sys_watch_mount
index c9c34dc52b7d87797dca0a1a0c29d42582b736d7..da6e6f0d71b2334004d511a16b5f59206bfc1009 100644 (file)
@@ -176,7 +176,7 @@ config SMP
          Management" code will be disabled if you say Y here.
 
          See also <file:Documentation/admin-guide/lockup-watchdogs.rst> and the SMP-HOWTO
-         available at <http://www.tldp.org/docs.html#howto>.
+         available at <https://www.tldp.org/docs.html#howto>.
 
          If you don't know what to do here, say N.
 
index a7a38fb4ece0f2308ea29125bfcdfc3aeea61ac2..6d74064add0a8828a3817ca54ffa71f605d2e5b1 100644 (file)
@@ -154,6 +154,10 @@ static off_t get_hdrs_offset(int kernelfd, const char *filename)
                offset -= LOOKBACK;
                /* skip a.out header */
                offset += AOUT_TEXT_OFFSET;
+               if (offset < 0) {
+                       errno = -EINVAL;
+                       die("Calculated a negative offset, probably elftoaout generated an invalid image. Did you use a recent elftoaout ?");
+               }
                if (lseek(kernelfd, offset, SEEK_SET) < 0)
                        die("lseek");
                if (read(kernelfd, buffer, BUFSIZE) != BUFSIZE)
index 8625946d8d00651d01c7275c99560dfc19e13195..597a22953bc5b54ef0eca2e7a74050af0dc5e8bd 100644 (file)
@@ -18,7 +18,7 @@
  *
  * When we spin, we try to use an operation that will cause the
  * current cpu strand to block, and therefore make the core fully
- * available to any other other runnable strands.  There are two
+ * available to any other runnable strands.  There are two
  * options, based upon cpu capabilities.
  *
  * On all cpus prior to SPARC-T4 we do three dummy reads of the
index 7e078bc73ef5616d8f0a94f070246d203c4f6e14..8fb09eec8c3e796a9a79aa0a7877842ceb7ea6d3 100644 (file)
@@ -8,7 +8,6 @@
 
 #include <asm/ptrace.h>
 #include <asm/processor.h>
-#include <asm/extable_64.h>
 #include <asm/spitfire.h>
 #include <asm/adi.h>
 
similarity index 92%
rename from arch/sparc/include/asm/extable_64.h
rename to arch/sparc/include/asm/extable.h
index 5a0171907b7e5dac71055aba5d48d7ef591f901b..554a9dc376fc8efabf4ee0aaf884bd90ece5abc8 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __ASM_EXTABLE64_H
-#define __ASM_EXTABLE64_H
+#ifndef __ASM_EXTABLE_H
+#define __ASM_EXTABLE_H
 /*
  * The exception table consists of pairs of addresses: the first is the
  * address of an instruction that is allowed to fault, and the second is
index 7708d015712b63e95ec7690f77e8937f25bae4a5..6067925972d9ddaf99dd83cd0845960cd64c908f 100644 (file)
@@ -113,7 +113,7 @@ extern unsigned long last_valid_pfn;
 extern void *srmmu_nocache_pool;
 #define __nocache_pa(VADDR) (((unsigned long)VADDR) - SRMMU_NOCACHE_VADDR + __pa((unsigned long)srmmu_nocache_pool))
 #define __nocache_va(PADDR) (__va((unsigned long)PADDR) - (unsigned long)srmmu_nocache_pool + SRMMU_NOCACHE_VADDR)
-#define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
+#define __nocache_fix(VADDR) ((__typeof__(VADDR))__va(__nocache_pa(VADDR)))
 
 /* Accessing the MMU control register. */
 unsigned int srmmu_get_mmureg(void);
index 3c4bc2189092d350b7b6e2c794714f6c7249f692..b6242f7771e9e845ab9a24720e99f66a86c8ae51 100644 (file)
@@ -50,16 +50,12 @@ struct thread_struct {
        unsigned long   fsr;
        unsigned long   fpqdepth;
        struct fpq      fpqueue[16];
-       unsigned long flags;
        mm_segment_t current_ds;
 };
 
-#define SPARC_FLAG_KTHREAD      0x1    /* task is a kernel thread */
-#define SPARC_FLAG_UNALIGNED    0x2    /* is allowed to do unaligned accesses */
-
 #define INIT_THREAD  { \
-       .flags = SPARC_FLAG_KTHREAD, \
        .current_ds = KERNEL_DS, \
+       .kregs = (struct pt_regs *)(init_stack+THREAD_SIZE)-1 \
 }
 
 /* Do necessary setup to start up a newly executed thread. */
index 42cd4cd3892e32a9adacad7ba1d4fb208369691c..8047a9caab2fc86a35e684a98e2c1deff54e923a 100644 (file)
@@ -118,6 +118,7 @@ struct thread_info {
        .task           =       &tsk,                   \
        .current_ds     =       ASI_P,                  \
        .preempt_count  =       INIT_PREEMPT_COUNT,     \
+       .kregs          =       (struct pt_regs *)(init_stack+THREAD_SIZE)-1 \
 }
 
 /* how to get the thread information struct from C */
index dd85bc2c2cad5211c19695362bba874e0ce17b3c..390094200fc44212187f7909d74b3079ed639d6e 100644 (file)
@@ -1,6 +1,9 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 #ifndef ___ASM_SPARC_UACCESS_H
 #define ___ASM_SPARC_UACCESS_H
+
+#include <asm/extable.h>
+
 #if defined(__sparc__) && defined(__arch64__)
 #include <asm/uaccess_64.h>
 #else
index 0a2d3ebc4bb86d349bf5455bc05d7868f6da495e..4a12346bb69c335d06bc4e30b98244ced13a46e3 100644 (file)
@@ -13,9 +13,6 @@
 
 #include <asm/processor.h>
 
-#define ARCH_HAS_SORT_EXTABLE
-#define ARCH_HAS_SEARCH_EXTABLE
-
 /* Sparc is not segmented, however we need to be able to fool access_ok()
  * when doing system calls from kernel mode legitimately.
  *
 #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size)))
 #define access_ok(addr, size) __access_ok((unsigned long)(addr), size)
 
-/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue.  No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path.  This means when everything is well,
- * we don't even have to jump over them.  Further, they do not intrude
- * on our cache or tlb entries.
- *
- * There is a special way how to put a range of potentially faulting
- * insns (like twenty ldd/std's with now intervening other instructions)
- * You specify address of first in insn and 0 in fixup and in the next
- * exception_table_entry you specify last potentially faulting insn + 1
- * and in fixup the routine which should handle the fault.
- * That fixup code will get
- * (faulting_insn_address - first_insn_in_the_range_address)/4
- * in %g2 (ie. index of the faulting instruction in the range).
- */
-
-struct exception_table_entry
-{
-        unsigned long insn, fixup;
-};
-
-/* Returns 0 if exception not found and fixup otherwise.  */
-unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
-
 /* Uh, these should become the main single-value transfer routines..
  * They automatically use the right size if we just have the right
  * pointer type..
@@ -252,12 +219,7 @@ static inline unsigned long __clear_user(void __user *addr, unsigned long size)
        unsigned long ret;
 
        __asm__ __volatile__ (
-               ".section __ex_table,#alloc\n\t"
-               ".align 4\n\t"
-               ".word 1f,3\n\t"
-               ".previous\n\t"
                "mov %2, %%o1\n"
-               "1:\n\t"
                "call __bzero\n\t"
                " mov %1, %%o0\n\t"
                "mov %%o0, %0\n"
index 698cf69f74e9984d54bca056bcd6470df7b88c1e..30eb4c6414d1baf9ae792726bf8c652d6085e61e 100644 (file)
@@ -10,7 +10,6 @@
 #include <linux/string.h>
 #include <asm/asi.h>
 #include <asm/spitfire.h>
-#include <asm/extable_64.h>
 
 #include <asm/processor.h>
 
index be30c8d4cc7373490ed5f8c2ff716aae22f56574..6044b82b976751a8ccbe43ac424bafe8de36af42 100644 (file)
@@ -515,7 +515,7 @@ continue_boot:
 
                /* I want a kernel stack NOW! */
                set     init_thread_union, %g1
-               set     (THREAD_SIZE - STACKFRAME_SZ), %g2
+               set     (THREAD_SIZE - STACKFRAME_SZ - TRACEREG_SZ), %g2
                add     %g1, %g2, %sp
                mov     0, %fp                  /* And for good luck */
 
index c5ff2472b3d9d4e53385da0536802b387f123f75..72a5bdc833ea22f0800663c354cba188fad83243 100644 (file)
@@ -706,7 +706,7 @@ tlb_fixup_done:
        wr      %g0, ASI_P, %asi
        mov     1, %g1
        sllx    %g1, THREAD_SHIFT, %g1
-       sub     %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
+       sub     %g1, (STACKFRAME_SZ + STACK_BIAS + TRACEREG_SZ), %g1
        add     %g6, %g1, %sp
 
        /* Set per-cpu pointer initially to zero, this makes
index 5d45b6d766d6a2a350c30957769b1d097dbe2888..9c2b720bfd20d784907171668c4db7ce7d8e9fa1 100644 (file)
@@ -552,9 +552,8 @@ static void pci_of_scan_bus(struct pci_pbm_info *pbm,
                pci_info(bus, "scan_bus[%pOF] bus no %d\n",
                         node, bus->number);
 
-       child = NULL;
        prev_devfn = -1;
-       while ((child = of_get_next_child(node, child)) != NULL) {
+       for_each_child_of_node(node, child) {
                if (ofpci_verbose)
                        pci_info(bus, "  * %pOF\n", child);
                reg = of_get_property(child, "reg", &reglen);
index a023637359154e9bbd874a574bd9f95ba50c5b10..f75caecff115cb5b905cada262ebd461672257d3 100644 (file)
@@ -216,16 +216,6 @@ void flush_thread(void)
                clear_thread_flag(TIF_USEDFPU);
 #endif
        }
-
-       /* This task is no longer a kernel thread. */
-       if (current->thread.flags & SPARC_FLAG_KTHREAD) {
-               current->thread.flags &= ~SPARC_FLAG_KTHREAD;
-
-               /* We must fixup kregs as well. */
-               /* XXX This was not fixed for ti for a while, worked. Unused? */
-               current->thread.kregs = (struct pt_regs *)
-                   (task_stack_page(current) + (THREAD_SIZE - TRACEREG_SZ));
-       }
 }
 
 static inline struct sparc_stackf __user *
@@ -313,7 +303,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
                extern int nwindows;
                unsigned long psr;
                memset(new_stack, 0, STACKFRAME_SZ + TRACEREG_SZ);
-               p->thread.flags |= SPARC_FLAG_KTHREAD;
                p->thread.current_ds = KERNEL_DS;
                ti->kpc = (((unsigned long) ret_from_kernel_thread) - 0x8);
                childregs->u_regs[UREG_G1] = sp; /* function */
@@ -325,7 +314,6 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long arg,
        }
        memcpy(new_stack, (char *)regs - STACKFRAME_SZ, STACKFRAME_SZ + TRACEREG_SZ);
        childregs->u_regs[UREG_FP] = sp;
-       p->thread.flags &= ~SPARC_FLAG_KTHREAD;
        p->thread.current_ds = USER_DS;
        ti->kpc = (((unsigned long) ret_from_fork) - 0x8);
        ti->kpsr = current->thread.fork_kpsr | PSR_PIL;
index eea43a1aef1b9a6fd1e4139dbf118d8e4eafc2ee..c8e0dd99f3700e73851878ca44102f1c7d24fe8f 100644 (file)
@@ -266,7 +266,6 @@ static __init void leon_patch(void)
 }
 
 struct tt_entry *sparc_ttable;
-static struct pt_regs fake_swapper_regs;
 
 /* Called from head_32.S - before we have setup anything
  * in the kernel. Be very careful with what you do here.
@@ -363,8 +362,6 @@ void __init setup_arch(char **cmdline_p)
                (*(linux_dbvec->teach_debugger))();
        }
 
-       init_task.thread.kregs = &fake_swapper_regs;
-
        /* Run-time patch instructions to match the cpu model */
        per_cpu_patch();
 
index d87244197d5cbb56fec92223c3b5c7ead0352b63..48abee4eee29d8589d9452029f109e102f63f91a 100644 (file)
@@ -165,8 +165,6 @@ extern int root_mountflags;
 
 char reboot_command[COMMAND_LINE_SIZE];
 
-static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
-
 static void __init per_cpu_patch(void)
 {
        struct cpuid_patch_entry *p;
@@ -661,8 +659,6 @@ void __init setup_arch(char **cmdline_p)
        rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
 #endif
 
-       task_thread_info(&init_task)->kregs = &fake_swapper_regs;
-
 #ifdef CONFIG_IP_PNP
        if (!ic_set_manually) {
                phandle chosen = prom_finddevice("/chosen");
index 40d8c7cd82984b44ddf9f95074835961114547eb..631b8295774bd6621d708ecfb2d65d674587ca12 100644 (file)
 439    common  faccessat2                      sys_faccessat2
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2                compat_sys_epoll_pwait2
+442    common  watch_mount                     sys_watch_mount
index 83db94c0b43189e4c938d30878368ee2cbf41c08..ef5c5207c9ffbbee5ef2b339cb0d073d1cc55d2a 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/uaccess.h>
 #include <linux/smp.h>
 #include <linux/perf_event.h>
+#include <linux/extable.h>
 
 #include <asm/setup.h>
 
@@ -213,10 +214,10 @@ static inline int ok_for_kernel(unsigned int insn)
 
 static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
 {
-       unsigned long g2 = regs->u_regs [UREG_G2];
-       unsigned long fixup = search_extables_range(regs->pc, &g2);
+       const struct exception_table_entry *entry;
 
-       if (!fixup) {
+       entry = search_exception_tables(regs->pc);
+       if (!entry) {
                unsigned long address = compute_effective_address(regs, insn);
                if(address < PAGE_SIZE) {
                        printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference in mna handler");
@@ -232,9 +233,8 @@ static void kernel_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
                die_if_kernel("Oops", regs);
                /* Not reached */
        }
-       regs->pc = fixup;
+       regs->pc = entry->fixup;
        regs->npc = regs->pc + 4;
-       regs->u_regs [UREG_G2] = g2;
 }
 
 asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
@@ -274,103 +274,9 @@ asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn)
        }
 }
 
-static inline int ok_for_user(struct pt_regs *regs, unsigned int insn,
-                             enum direction dir)
-{
-       unsigned int reg;
-       int size = ((insn >> 19) & 3) == 3 ? 8 : 4;
-
-       if ((regs->pc | regs->npc) & 3)
-               return 0;
-
-       /* Must access_ok() in all the necessary places. */
-#define WINREG_ADDR(regnum) \
-       ((void __user *)(((unsigned long *)regs->u_regs[UREG_FP])+(regnum)))
-
-       reg = (insn >> 25) & 0x1f;
-       if (reg >= 16) {
-               if (!access_ok(WINREG_ADDR(reg - 16), size))
-                       return -EFAULT;
-       }
-       reg = (insn >> 14) & 0x1f;
-       if (reg >= 16) {
-               if (!access_ok(WINREG_ADDR(reg - 16), size))
-                       return -EFAULT;
-       }
-       if (!(insn & 0x2000)) {
-               reg = (insn & 0x1f);
-               if (reg >= 16) {
-                       if (!access_ok(WINREG_ADDR(reg - 16), size))
-                               return -EFAULT;
-               }
-       }
-#undef WINREG_ADDR
-       return 0;
-}
-
-static void user_mna_trap_fault(struct pt_regs *regs, unsigned int insn)
+asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
 {
        send_sig_fault(SIGBUS, BUS_ADRALN,
                       (void __user *)safe_compute_effective_address(regs, insn),
                       0, current);
 }
-
-asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn)
-{
-       enum direction dir;
-
-       if(!(current->thread.flags & SPARC_FLAG_UNALIGNED) ||
-          (((insn >> 30) & 3) != 3))
-               goto kill_user;
-       dir = decode_direction(insn);
-       if(!ok_for_user(regs, insn, dir)) {
-               goto kill_user;
-       } else {
-               int err, size = decode_access_size(insn);
-               unsigned long addr;
-
-               if(floating_point_load_or_store_p(insn)) {
-                       printk("User FPU load/store unaligned unsupported.\n");
-                       goto kill_user;
-               }
-
-               addr = compute_effective_address(regs, insn);
-               perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
-               switch(dir) {
-               case load:
-                       err = do_int_load(fetch_reg_addr(((insn>>25)&0x1f),
-                                                        regs),
-                                         size, (unsigned long *) addr,
-                                         decode_signedness(insn));
-                       break;
-
-               case store:
-                       err = do_int_store(((insn>>25)&0x1f), size,
-                                          (unsigned long *) addr, regs);
-                       break;
-
-               case both:
-                       /*
-                        * This was supported in 2.4. However, we question
-                        * the value of SWAP instruction across word boundaries.
-                        */
-                       printk("Unaligned SWAP unsupported.\n");
-                       err = -EFAULT;
-                       break;
-
-               default:
-                       unaligned_panic("Impossible user unaligned trap.");
-                       goto out;
-               }
-               if (err)
-                       goto kill_user;
-               else
-                       advance(regs);
-               goto out;
-       }
-
-kill_user:
-       user_mna_trap_fault(regs, insn);
-out:
-       ;
-}
index 7db5aabe9708576109bd028c241532150339ff59..e27afd233bf52628f89e471638673f81842eaa4b 100644 (file)
@@ -428,7 +428,7 @@ static int process_dreg_info(struct vio_driver_state *vio,
                             struct vio_dring_register *pkt)
 {
        struct vio_dring_state *dr;
-       int i, len;
+       int i;
 
        viodbg(HS, "GOT DRING_REG INFO ident[%llx] "
               "ndesc[%u] dsz[%u] opt[0x%x] ncookies[%u]\n",
@@ -482,9 +482,7 @@ static int process_dreg_info(struct vio_driver_state *vio,
               pkt->num_descr, pkt->descr_size, pkt->options,
               pkt->num_cookies);
 
-       len = (sizeof(*pkt) +
-              (dr->ncookies * sizeof(struct ldc_trans_cookie)));
-       if (send_ctrl(vio, &pkt->tag, len) < 0)
+       if (send_ctrl(vio, &pkt->tag, struct_size(pkt, cookies, dr->ncookies)) < 0)
                goto send_nack;
 
        vio->dr_state |= VIO_DR_STATE_RXREG;
index 7488d130faf730d6e96414ce987ff4a5e8b37ca6..781e39b3c009f9b27e886f6a6e07e6d2f5545108 100644 (file)
@@ -155,13 +155,6 @@ cpout:     retl                                            ! get outta here
         .text;                                  \
         .align  4
 
-#define EXT(start,end)                         \
-        .section __ex_table,ALLOC;             \
-        .align  4;                              \
-        .word   start, 0, end, cc_fault;         \
-        .text;                                  \
-        .align  4
-
        /* This aligned version executes typically in 8.5 superscalar cycles, this
         * is the best I can do.  I say 8.5 because the final add will pair with
         * the next ldd in the main unrolled loop.  Thus the pipe is always full.
@@ -169,20 +162,20 @@ cpout:    retl                                            ! get outta here
         * please check the fixup code below as well.
         */
 #define CSUMCOPY_BIGCHUNK_ALIGNED(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7)  \
-       ldd     [src + off + 0x00], t0;                                                 \
-       ldd     [src + off + 0x08], t2;                                                 \
+       EX(ldd  [src + off + 0x00], t0);                                                \
+       EX(ldd  [src + off + 0x08], t2);                                                \
        addxcc  t0, sum, sum;                                                           \
-       ldd     [src + off + 0x10], t4;                                                 \
+       EX(ldd  [src + off + 0x10], t4);                                                \
        addxcc  t1, sum, sum;                                                           \
-       ldd     [src + off + 0x18], t6;                                                 \
+       EX(ldd  [src + off + 0x18], t6);                                                \
        addxcc  t2, sum, sum;                                                           \
-       std     t0, [dst + off + 0x00];                                                 \
+       EX(std  t0, [dst + off + 0x00]);                                                \
        addxcc  t3, sum, sum;                                                           \
-       std     t2, [dst + off + 0x08];                                                 \
+       EX(std  t2, [dst + off + 0x08]);                                                \
        addxcc  t4, sum, sum;                                                           \
-       std     t4, [dst + off + 0x10];                                                 \
+       EX(std  t4, [dst + off + 0x10]);                                                \
        addxcc  t5, sum, sum;                                                           \
-       std     t6, [dst + off + 0x18];                                                 \
+       EX(std  t6, [dst + off + 0x18]);                                                \
        addxcc  t6, sum, sum;                                                           \
        addxcc  t7, sum, sum;
 
@@ -191,39 +184,39 @@ cpout:    retl                                            ! get outta here
         * Viking MXCC into streaming mode.  Ho hum...
         */
 #define CSUMCOPY_BIGCHUNK(src, dst, sum, off, t0, t1, t2, t3, t4, t5, t6, t7)  \
-       ldd     [src + off + 0x00], t0;                                         \
-       ldd     [src + off + 0x08], t2;                                         \
-       ldd     [src + off + 0x10], t4;                                         \
-       ldd     [src + off + 0x18], t6;                                         \
-       st      t0, [dst + off + 0x00];                                         \
+       EX(ldd  [src + off + 0x00], t0);                                        \
+       EX(ldd  [src + off + 0x08], t2);                                        \
+       EX(ldd  [src + off + 0x10], t4);                                        \
+       EX(ldd  [src + off + 0x18], t6);                                        \
+       EX(st   t0, [dst + off + 0x00]);                                        \
        addxcc  t0, sum, sum;                                                   \
-       st      t1, [dst + off + 0x04];                                         \
+       EX(st   t1, [dst + off + 0x04]);                                        \
        addxcc  t1, sum, sum;                                                   \
-       st      t2, [dst + off + 0x08];                                         \
+       EX(st   t2, [dst + off + 0x08]);                                        \
        addxcc  t2, sum, sum;                                                   \
-       st      t3, [dst + off + 0x0c];                                         \
+       EX(st   t3, [dst + off + 0x0c]);                                        \
        addxcc  t3, sum, sum;                                                   \
-       st      t4, [dst + off + 0x10];                                         \
+       EX(st   t4, [dst + off + 0x10]);                                        \
        addxcc  t4, sum, sum;                                                   \
-       st      t5, [dst + off + 0x14];                                         \
+       EX(st   t5, [dst + off + 0x14]);                                        \
        addxcc  t5, sum, sum;                                                   \
-       st      t6, [dst + off + 0x18];                                         \
+       EX(st   t6, [dst + off + 0x18]);                                        \
        addxcc  t6, sum, sum;                                                   \
-       st      t7, [dst + off + 0x1c];                                         \
+       EX(st   t7, [dst + off + 0x1c]);                                        \
        addxcc  t7, sum, sum;
 
        /* Yuck, 6 superscalar cycles... */
 #define CSUMCOPY_LASTCHUNK(src, dst, sum, off, t0, t1, t2, t3) \
-       ldd     [src - off - 0x08], t0;                         \
-       ldd     [src - off - 0x00], t2;                         \
+       EX(ldd  [src - off - 0x08], t0);                        \
+       EX(ldd  [src - off - 0x00], t2);                        \
        addxcc  t0, sum, sum;                                   \
-       st      t0, [dst - off - 0x08];                         \
+       EX(st   t0, [dst - off - 0x08]);                        \
        addxcc  t1, sum, sum;                                   \
-       st      t1, [dst - off - 0x04];                         \
+       EX(st   t1, [dst - off - 0x04]);                        \
        addxcc  t2, sum, sum;                                   \
-       st      t2, [dst - off - 0x00];                         \
+       EX(st   t2, [dst - off - 0x00]);                        \
        addxcc  t3, sum, sum;                                   \
-       st      t3, [dst - off + 0x04];
+       EX(st   t3, [dst - off + 0x04]);
 
        /* Handle the end cruft code out of band for better cache patterns. */
 cc_end_cruft:
@@ -331,7 +324,6 @@ __csum_partial_copy_sparc_generic:
        CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
        CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
        CSUMCOPY_BIGCHUNK(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
-10:    EXT(5b, 10b)                    ! note for exception handling
        sub     %g1, 128, %g1           ! detract from length
        addx    %g0, %g7, %g7           ! add in last carry bit
        andcc   %g1, 0xffffff80, %g0    ! more to csum?
@@ -356,8 +348,7 @@ cctbl:      CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x68,%g2,%g3,%g4,%g5)
        CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x28,%g2,%g3,%g4,%g5)
        CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x18,%g2,%g3,%g4,%g5)
        CSUMCOPY_LASTCHUNK(%o0,%o1,%g7,0x08,%g2,%g3,%g4,%g5)
-12:    EXT(cctbl, 12b)                 ! note for exception table handling
-       addx    %g0, %g7, %g7
+12:    addx    %g0, %g7, %g7
        andcc   %o3, 0xf, %g0           ! check for low bits set
 ccte:  bne     cc_end_cruft            ! something left, handle it out of band
         andcc  %o3, 8, %g0             ! begin checks for that code
@@ -367,7 +358,6 @@ ccdbl:      CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x00,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o
        CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x20,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
        CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x40,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
        CSUMCOPY_BIGCHUNK_ALIGNED(%o0,%o1,%g7,0x60,%o4,%o5,%g2,%g3,%g4,%g5,%o2,%o3)
-11:    EXT(ccdbl, 11b)                 ! note for exception table handling
        sub     %g1, 128, %g1           ! detract from length
        addx    %g0, %g7, %g7           ! add in last carry bit
        andcc   %g1, 0xffffff80, %g0    ! more to csum?
index dc72f2b970b7b3328b0ccc1566c2d64951669abf..954572c78539aeff5a84fedd211d3dfb4465bfea 100644 (file)
 /* Work around cpp -rob */
 #define ALLOC #alloc
 #define EXECINSTR #execinstr
+
+#define EX_ENTRY(l1, l2)                       \
+       .section __ex_table,ALLOC;              \
+       .align  4;                              \
+       .word   l1, l2;                         \
+       .text;
+
 #define EX(x,y,a,b)                            \
 98:    x,y;                                    \
        .section .fixup,ALLOC,EXECINSTR;        \
        .align  4;                              \
-99:    ba fixupretl;                           \
-        a, b, %g3;                             \
-       .section __ex_table,ALLOC;              \
-       .align  4;                              \
-       .word   98b, 99b;                       \
-       .text;                                  \
-       .align  4
+99:    retl;                                   \
+        a, b, %o0;                             \
+       EX_ENTRY(98b, 99b)
 
 #define EX2(x,y,c,d,e,a,b)                     \
 98:    x,y;                                    \
        .section .fixup,ALLOC,EXECINSTR;        \
        .align  4;                              \
 99:    c, d, e;                                \
-       ba fixupretl;                           \
-        a, b, %g3;                             \
-       .section __ex_table,ALLOC;              \
-       .align  4;                              \
-       .word   98b, 99b;                       \
-       .text;                                  \
-       .align  4
+       retl;                                   \
+        a, b, %o0;                             \
+       EX_ENTRY(98b, 99b)
 
 #define EXO2(x,y)                              \
 98:    x, y;                                   \
-       .section __ex_table,ALLOC;              \
-       .align  4;                              \
-       .word   98b, 97f;                       \
-       .text;                                  \
-       .align  4
+       EX_ENTRY(98b, 97f)
 
-#define EXT(start,end,handler)                 \
-       .section __ex_table,ALLOC;              \
-       .align  4;                              \
-       .word   start, 0, end, handler;         \
-       .text;                                  \
-       .align  4
+#define LD(insn, src, offset, reg, label)      \
+98:    insn [%src + (offset)], %reg;           \
+       .section .fixup,ALLOC,EXECINSTR;        \
+99:    ba      label;                          \
+        mov    offset, %g5;                    \
+       EX_ENTRY(98b, 99b)
 
-/* Please do not change following macros unless you change logic used
- * in .fixup at the end of this file as well
- */
+#define ST(insn, dst, offset, reg, label)      \
+98:    insn %reg, [%dst + (offset)];           \
+       .section .fixup,ALLOC,EXECINSTR;        \
+99:    ba      label;                          \
+        mov    offset, %g5;                    \
+       EX_ENTRY(98b, 99b)
 
 /* Both these macros have to start with exactly the same insn */
+/* left: g7 + (g1 % 128) - offset */
 #define MOVE_BIGCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
-       ldd     [%src + (offset) + 0x00], %t0; \
-       ldd     [%src + (offset) + 0x08], %t2; \
-       ldd     [%src + (offset) + 0x10], %t4; \
-       ldd     [%src + (offset) + 0x18], %t6; \
-       st      %t0, [%dst + (offset) + 0x00]; \
-       st      %t1, [%dst + (offset) + 0x04]; \
-       st      %t2, [%dst + (offset) + 0x08]; \
-       st      %t3, [%dst + (offset) + 0x0c]; \
-       st      %t4, [%dst + (offset) + 0x10]; \
-       st      %t5, [%dst + (offset) + 0x14]; \
-       st      %t6, [%dst + (offset) + 0x18]; \
-       st      %t7, [%dst + (offset) + 0x1c];
-
+       LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \
+       LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \
+       LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \
+       LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \
+       ST(st, dst, offset + 0x00, t0, bigchunk_fault)  \
+       ST(st, dst, offset + 0x04, t1, bigchunk_fault)  \
+       ST(st, dst, offset + 0x08, t2, bigchunk_fault)  \
+       ST(st, dst, offset + 0x0c, t3, bigchunk_fault)  \
+       ST(st, dst, offset + 0x10, t4, bigchunk_fault)  \
+       ST(st, dst, offset + 0x14, t5, bigchunk_fault)  \
+       ST(st, dst, offset + 0x18, t6, bigchunk_fault)  \
+       ST(st, dst, offset + 0x1c, t7, bigchunk_fault)
+
+/* left: g7 + (g1 % 128) - offset */
 #define MOVE_BIGALIGNCHUNK(src, dst, offset, t0, t1, t2, t3, t4, t5, t6, t7) \
-       ldd     [%src + (offset) + 0x00], %t0; \
-       ldd     [%src + (offset) + 0x08], %t2; \
-       ldd     [%src + (offset) + 0x10], %t4; \
-       ldd     [%src + (offset) + 0x18], %t6; \
-       std     %t0, [%dst + (offset) + 0x00]; \
-       std     %t2, [%dst + (offset) + 0x08]; \
-       std     %t4, [%dst + (offset) + 0x10]; \
-       std     %t6, [%dst + (offset) + 0x18];
+       LD(ldd, src, offset + 0x00, t0, bigchunk_fault) \
+       LD(ldd, src, offset + 0x08, t2, bigchunk_fault) \
+       LD(ldd, src, offset + 0x10, t4, bigchunk_fault) \
+       LD(ldd, src, offset + 0x18, t6, bigchunk_fault) \
+       ST(std, dst, offset + 0x00, t0, bigchunk_fault) \
+       ST(std, dst, offset + 0x08, t2, bigchunk_fault) \
+       ST(std, dst, offset + 0x10, t4, bigchunk_fault) \
+       ST(std, dst, offset + 0x18, t6, bigchunk_fault)
 
+       .section .fixup,#alloc,#execinstr
+bigchunk_fault:
+       sub     %g7, %g5, %o0
+       and     %g1, 127, %g1
+       retl
+        add    %o0, %g1, %o0
+
+/* left: offset + 16 + (g1 % 16) */
 #define MOVE_LASTCHUNK(src, dst, offset, t0, t1, t2, t3) \
-       ldd     [%src - (offset) - 0x10], %t0; \
-       ldd     [%src - (offset) - 0x08], %t2; \
-       st      %t0, [%dst - (offset) - 0x10]; \
-       st      %t1, [%dst - (offset) - 0x0c]; \
-       st      %t2, [%dst - (offset) - 0x08]; \
-       st      %t3, [%dst - (offset) - 0x04];
+       LD(ldd, src, -(offset + 0x10), t0, lastchunk_fault)     \
+       LD(ldd, src, -(offset + 0x08), t2, lastchunk_fault)     \
+       ST(st, dst, -(offset + 0x10), t0, lastchunk_fault)      \
+       ST(st, dst, -(offset + 0x0c), t1, lastchunk_fault)      \
+       ST(st, dst, -(offset + 0x08), t2, lastchunk_fault)      \
+       ST(st, dst, -(offset + 0x04), t3, lastchunk_fault)
 
-#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
-       lduh    [%src + (offset) + 0x00], %t0; \
-       lduh    [%src + (offset) + 0x02], %t1; \
-       lduh    [%src + (offset) + 0x04], %t2; \
-       lduh    [%src + (offset) + 0x06], %t3; \
-       sth     %t0, [%dst + (offset) + 0x00]; \
-       sth     %t1, [%dst + (offset) + 0x02]; \
-       sth     %t2, [%dst + (offset) + 0x04]; \
-       sth     %t3, [%dst + (offset) + 0x06];
+       .section .fixup,#alloc,#execinstr
+lastchunk_fault:
+       and     %g1, 15, %g1
+       retl
+        sub    %g1, %g5, %o0
 
+/* left: o3 + (o2 % 16) - offset */
+#define MOVE_HALFCHUNK(src, dst, offset, t0, t1, t2, t3) \
+       LD(lduh, src, offset + 0x00, t0, halfchunk_fault)       \
+       LD(lduh, src, offset + 0x02, t1, halfchunk_fault)       \
+       LD(lduh, src, offset + 0x04, t2, halfchunk_fault)       \
+       LD(lduh, src, offset + 0x06, t3, halfchunk_fault)       \
+       ST(sth, dst, offset + 0x00, t0, halfchunk_fault)        \
+       ST(sth, dst, offset + 0x02, t1, halfchunk_fault)        \
+       ST(sth, dst, offset + 0x04, t2, halfchunk_fault)        \
+       ST(sth, dst, offset + 0x06, t3, halfchunk_fault)
+
+/* left: o3 + (o2 % 16) + offset + 2 */
 #define MOVE_SHORTCHUNK(src, dst, offset, t0, t1) \
-       ldub    [%src - (offset) - 0x02], %t0; \
-       ldub    [%src - (offset) - 0x01], %t1; \
-       stb     %t0, [%dst - (offset) - 0x02]; \
-       stb     %t1, [%dst - (offset) - 0x01];
+       LD(ldub, src, -(offset + 0x02), t0, halfchunk_fault)    \
+       LD(ldub, src, -(offset + 0x01), t1, halfchunk_fault)    \
+       ST(stb, dst, -(offset + 0x02), t0, halfchunk_fault)     \
+       ST(stb, dst, -(offset + 0x01), t1, halfchunk_fault)
+
+       .section .fixup,#alloc,#execinstr
+halfchunk_fault:
+       and     %o2, 15, %o2
+       sub     %o3, %g5, %o3
+       retl
+        add    %o2, %o3, %o0
+
+/* left: offset + 2 + (o2 % 2) */
+#define MOVE_LAST_SHORTCHUNK(src, dst, offset, t0, t1) \
+       LD(ldub, src, -(offset + 0x02), t0, last_shortchunk_fault)      \
+       LD(ldub, src, -(offset + 0x01), t1, last_shortchunk_fault)      \
+       ST(stb, dst, -(offset + 0x02), t0, last_shortchunk_fault)       \
+       ST(stb, dst, -(offset + 0x01), t1, last_shortchunk_fault)
+
+       .section .fixup,#alloc,#execinstr
+last_shortchunk_fault:
+       and     %o2, 1, %o2
+       retl
+        sub    %o2, %g5, %o0
 
        .text
        .align  4
@@ -182,8 +218,6 @@ __copy_user:        /* %o0=dst %o1=src %o2=len */
        MOVE_BIGCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
        MOVE_BIGCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
        MOVE_BIGCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
-80:
-       EXT(5b, 80b, 50f)
        subcc   %g7, 128, %g7
        add     %o1, 128, %o1
        bne     5b
@@ -201,7 +235,6 @@ __copy_user:        /* %o0=dst %o1=src %o2=len */
        jmpl    %o5 + %lo(copy_user_table_end), %g0
         add    %o0, %g7, %o0
 
-copy_user_table:
        MOVE_LASTCHUNK(o1, o0, 0x60, g2, g3, g4, g5)
        MOVE_LASTCHUNK(o1, o0, 0x50, g2, g3, g4, g5)
        MOVE_LASTCHUNK(o1, o0, 0x40, g2, g3, g4, g5)
@@ -210,7 +243,6 @@ copy_user_table:
        MOVE_LASTCHUNK(o1, o0, 0x10, g2, g3, g4, g5)
        MOVE_LASTCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
 copy_user_table_end:
-       EXT(copy_user_table, copy_user_table_end, 51f)
        be      copy_user_last7
         andcc  %g1, 4, %g0
 
@@ -250,8 +282,6 @@ ldd_std:
        MOVE_BIGALIGNCHUNK(o1, o0, 0x20, o2, o3, o4, o5, g2, g3, g4, g5)
        MOVE_BIGALIGNCHUNK(o1, o0, 0x40, o2, o3, o4, o5, g2, g3, g4, g5)
        MOVE_BIGALIGNCHUNK(o1, o0, 0x60, o2, o3, o4, o5, g2, g3, g4, g5)
-81:
-       EXT(ldd_std, 81b, 52f)
        subcc   %g7, 128, %g7
        add     %o1, 128, %o1
        bne     ldd_std
@@ -290,8 +320,6 @@ cannot_optimize:
 10:
        MOVE_HALFCHUNK(o1, o0, 0x00, g2, g3, g4, g5)
        MOVE_HALFCHUNK(o1, o0, 0x08, g2, g3, g4, g5)
-82:
-       EXT(10b, 82b, 53f)
        subcc   %o3, 0x10, %o3
        add     %o1, 0x10, %o1
        bne     10b
@@ -308,8 +336,6 @@ byte_chunk:
        MOVE_SHORTCHUNK(o1, o0, -0x0c, g2, g3)
        MOVE_SHORTCHUNK(o1, o0, -0x0e, g2, g3)
        MOVE_SHORTCHUNK(o1, o0, -0x10, g2, g3)
-83:
-       EXT(byte_chunk, 83b, 54f)
        subcc   %o3, 0x10, %o3
        add     %o1, 0x10, %o1
        bne     byte_chunk
@@ -325,16 +351,14 @@ short_end:
        add     %o1, %o3, %o1
        jmpl    %o5 + %lo(short_table_end), %g0
         andcc  %o2, 1, %g0
-84:
-       MOVE_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
-       MOVE_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
-       MOVE_SHORTCHUNK(o1, o0, 0x08, g2, g3)
-       MOVE_SHORTCHUNK(o1, o0, 0x06, g2, g3)
-       MOVE_SHORTCHUNK(o1, o0, 0x04, g2, g3)
-       MOVE_SHORTCHUNK(o1, o0, 0x02, g2, g3)
-       MOVE_SHORTCHUNK(o1, o0, 0x00, g2, g3)
+       MOVE_LAST_SHORTCHUNK(o1, o0, 0x0c, g2, g3)
+       MOVE_LAST_SHORTCHUNK(o1, o0, 0x0a, g2, g3)
+       MOVE_LAST_SHORTCHUNK(o1, o0, 0x08, g2, g3)
+       MOVE_LAST_SHORTCHUNK(o1, o0, 0x06, g2, g3)
+       MOVE_LAST_SHORTCHUNK(o1, o0, 0x04, g2, g3)
+       MOVE_LAST_SHORTCHUNK(o1, o0, 0x02, g2, g3)
+       MOVE_LAST_SHORTCHUNK(o1, o0, 0x00, g2, g3)
 short_table_end:
-       EXT(84b, short_table_end, 55f)
        be      1f
         nop
        EX(ldub [%o1], %g2, add %g0, 1)
@@ -363,123 +387,8 @@ short_aligned_end:
        .section .fixup,#alloc,#execinstr
        .align  4
 97:
-       mov     %o2, %g3
-fixupretl:
        retl
-        mov    %g3, %o0
-
-/* exception routine sets %g2 to (broken_insn - first_insn)>>2 */
-50:
-/* This magic counts how many bytes are left when crash in MOVE_BIGCHUNK
- * happens. This is derived from the amount ldd reads, st stores, etc.
- * x = g2 % 12;
- * g3 = g1 + g7 - ((g2 / 12) * 32 + (x < 4) ? 0 : (x - 4) * 4);
- * o0 += (g2 / 12) * 32;
- */
-       cmp     %g2, 12
-       add     %o0, %g7, %o0
-       bcs     1f
-        cmp    %g2, 24
-       bcs     2f
-        cmp    %g2, 36
-       bcs     3f
-        nop
-       sub     %g2, 12, %g2
-       sub     %g7, 32, %g7
-3:     sub     %g2, 12, %g2
-       sub     %g7, 32, %g7
-2:     sub     %g2, 12, %g2
-       sub     %g7, 32, %g7
-1:     cmp     %g2, 4
-       bcs,a   60f
-        clr    %g2
-       sub     %g2, 4, %g2
-       sll     %g2, 2, %g2
-60:    and     %g1, 0x7f, %g3
-       sub     %o0, %g7, %o0
-       add     %g3, %g7, %g3
-       ba      fixupretl
-        sub    %g3, %g2, %g3
-51:
-/* i = 41 - g2; j = i % 6;
- * g3 = (g1 & 15) + (i / 6) * 16 + (j < 4) ? (j + 1) * 4 : 16;
- * o0 -= (i / 6) * 16 + 16;
- */
-       neg     %g2
-       and     %g1, 0xf, %g1
-       add     %g2, 41, %g2
-       add     %o0, %g1, %o0
-1:     cmp     %g2, 6
-       bcs,a   2f
-        cmp    %g2, 4
-       add     %g1, 16, %g1
-       b       1b
-        sub    %g2, 6, %g2
-2:     bcc,a   2f
-        mov    16, %g2
-       inc     %g2
-       sll     %g2, 2, %g2
-2:     add     %g1, %g2, %g3
-       ba      fixupretl
-        sub    %o0, %g3, %o0
-52:
-/* g3 = g1 + g7 - (g2 / 8) * 32 + (g2 & 4) ? (g2 & 3) * 8 : 0;
-   o0 += (g2 / 8) * 32 */
-       andn    %g2, 7, %g4
-       add     %o0, %g7, %o0
-       andcc   %g2, 4, %g0
-       and     %g2, 3, %g2
-       sll     %g4, 2, %g4
-       sll     %g2, 3, %g2
-       bne     60b
-        sub    %g7, %g4, %g7
-       ba      60b
-        clr    %g2
-53:
-/* g3 = o3 + (o2 & 15) - (g2 & 8) - (g2 & 4) ? (g2 & 3) * 2 : 0;
-   o0 += (g2 & 8) */
-       and     %g2, 3, %g4
-       andcc   %g2, 4, %g0
-       and     %g2, 8, %g2
-       sll     %g4, 1, %g4
-       be      1f
-        add    %o0, %g2, %o0
-       add     %g2, %g4, %g2
-1:     and     %o2, 0xf, %g3
-       add     %g3, %o3, %g3
-       ba      fixupretl
-        sub    %g3, %g2, %g3
-54:
-/* g3 = o3 + (o2 & 15) - (g2 / 4) * 2 - (g2 & 2) ? (g2 & 1) : 0;
-   o0 += (g2 / 4) * 2 */
-       srl     %g2, 2, %o4
-       and     %g2, 1, %o5
-       srl     %g2, 1, %g2
-       add     %o4, %o4, %o4
-       and     %o5, %g2, %o5
-       and     %o2, 0xf, %o2
-       add     %o0, %o4, %o0
-       sub     %o3, %o5, %o3
-       sub     %o2, %o4, %o2
-       ba      fixupretl
-        add    %o2, %o3, %g3
-55:
-/* i = 27 - g2;
-   g3 = (o2 & 1) + i / 4 * 2 + !(i & 3);
-   o0 -= i / 4 * 2 + 1 */
-       neg     %g2
-       and     %o2, 1, %o2
-       add     %g2, 27, %g2
-       srl     %g2, 2, %o5
-       andcc   %g2, 3, %g0
-       mov     1, %g2
-       add     %o5, %o5, %o5
-       be,a    1f
-        clr    %g2
-1:     add     %g2, %o5, %g3
-       sub     %o0, %g3, %o0
-       ba      fixupretl
-        add    %g3, %o2, %g3
+        mov    %o2, %o0
 
        .globl  __copy_user_end
 __copy_user_end:
index b89d42b29e344a4ad9bfff0335c84a20c0e25bd3..eaff68213fdf58b86fb297d2ab844ad858da5dc0 100644 (file)
@@ -19,7 +19,7 @@
 98:    x,y;                                    \
        .section .fixup,ALLOC,EXECINSTR;        \
        .align  4;                              \
-99:    ba 30f;                                 \
+99:    retl;                                   \
         a, b, %o0;                             \
        .section __ex_table,ALLOC;              \
        .align  4;                              \
        .text;                                  \
        .align  4
 
-#define EXT(start,end,handler)                         \
+#define STORE(source, base, offset, n)         \
+98:    std source, [base + offset + n];        \
+       .section .fixup,ALLOC,EXECINSTR;        \
+       .align  4;                              \
+99:    ba 30f;                                 \
+        sub %o3, n - offset, %o3;              \
        .section __ex_table,ALLOC;              \
        .align  4;                              \
-       .word   start, 0, end, handler;         \
+       .word   98b, 99b;                       \
        .text;                                  \
-       .align  4
+       .align  4;
+
+#define STORE_LAST(source, base, offset, n)    \
+       EX(std source, [base - offset - n],     \
+          add %o1, offset + n);
 
 /* Please don't change these macros, unless you change the logic
  * in the .fixup section below as well.
  * Store 64 bytes at (BASE + OFFSET) using value SOURCE. */
-#define ZERO_BIG_BLOCK(base, offset, source)    \
-       std     source, [base + offset + 0x00]; \
-       std     source, [base + offset + 0x08]; \
-       std     source, [base + offset + 0x10]; \
-       std     source, [base + offset + 0x18]; \
-       std     source, [base + offset + 0x20]; \
-       std     source, [base + offset + 0x28]; \
-       std     source, [base + offset + 0x30]; \
-       std     source, [base + offset + 0x38];
+#define ZERO_BIG_BLOCK(base, offset, source)   \
+       STORE(source, base, offset, 0x00);      \
+       STORE(source, base, offset, 0x08);      \
+       STORE(source, base, offset, 0x10);      \
+       STORE(source, base, offset, 0x18);      \
+       STORE(source, base, offset, 0x20);      \
+       STORE(source, base, offset, 0x28);      \
+       STORE(source, base, offset, 0x30);      \
+       STORE(source, base, offset, 0x38);
 
 #define ZERO_LAST_BLOCKS(base, offset, source) \
-       std     source, [base - offset - 0x38]; \
-       std     source, [base - offset - 0x30]; \
-       std     source, [base - offset - 0x28]; \
-       std     source, [base - offset - 0x20]; \
-       std     source, [base - offset - 0x18]; \
-       std     source, [base - offset - 0x10]; \
-       std     source, [base - offset - 0x08]; \
-       std     source, [base - offset - 0x00];
+       STORE_LAST(source, base, offset, 0x38); \
+       STORE_LAST(source, base, offset, 0x30); \
+       STORE_LAST(source, base, offset, 0x28); \
+       STORE_LAST(source, base, offset, 0x20); \
+       STORE_LAST(source, base, offset, 0x18); \
+       STORE_LAST(source, base, offset, 0x10); \
+       STORE_LAST(source, base, offset, 0x08); \
+       STORE_LAST(source, base, offset, 0x00);
 
        .text
        .align 4
@@ -68,8 +77,6 @@ __bzero_begin:
        .globl  memset
        EXPORT_SYMBOL(__bzero)
        EXPORT_SYMBOL(memset)
-       .globl  __memset_start, __memset_end
-__memset_start:
 memset:
        mov     %o0, %g1
        mov     1, %g4
@@ -122,8 +129,6 @@ __bzero:
        ZERO_BIG_BLOCK(%o0, 0x00, %g2)
        subcc   %o3, 128, %o3
        ZERO_BIG_BLOCK(%o0, 0x40, %g2)
-11:
-       EXT(10b, 11b, 20f)
        bne     10b
         add    %o0, 128, %o0
 
@@ -138,7 +143,6 @@ __bzero:
        jmp     %o4
         add    %o0, %o2, %o0
 
-12:
        ZERO_LAST_BLOCKS(%o0, 0x48, %g2)
        ZERO_LAST_BLOCKS(%o0, 0x08, %g2)
 13:
@@ -181,37 +185,13 @@ __bzero:
 5:
        retl
         clr    %o0
-__memset_end:
 
        .section .fixup,#alloc,#execinstr
        .align  4
-20:
-       cmp     %g2, 8
-       bleu    1f
-        and    %o1, 0x7f, %o1
-       sub     %g2, 9, %g2
-       add     %o3, 64, %o3
-1:
-       sll     %g2, 3, %g2
-       add     %o3, %o1, %o0
-       b 30f
-        sub    %o0, %g2, %o0
-21:
-       mov     8, %o0
-       and     %o1, 7, %o1
-       sub     %o0, %g2, %o0
-       sll     %o0, 3, %o0
-       b 30f
-        add    %o0, %o1, %o0
 30:
-/* %o4 is faulting address, %o5 is %pc where fault occurred */
-       save    %sp, -104, %sp
-       mov     %i5, %o0
-       mov     %i7, %o1
-       call    lookup_fault
-        mov    %i4, %o2
-       ret
-        restore
+       and     %o1, 0x7f, %o1
+       retl
+        add    %o3, %o1, %o0
 
        .globl __bzero_end
 __bzero_end:
index 68db1f859b02857e979539d0b4408a6b0d26fe64..871354aa3c002b875defade17825d7600066fee4 100644 (file)
@@ -8,7 +8,7 @@ ccflags-y := -Werror
 obj-$(CONFIG_SPARC64)   += ultra.o tlb.o tsb.o
 obj-y                   += fault_$(BITS).o
 obj-y                   += init_$(BITS).o
-obj-$(CONFIG_SPARC32)   += extable.o srmmu.o iommu.o io-unit.o
+obj-$(CONFIG_SPARC32)   += srmmu.o iommu.o io-unit.o
 obj-$(CONFIG_SPARC32)   += srmmu_access.o
 obj-$(CONFIG_SPARC32)   += hypersparc.o viking.o tsunami.o swift.o
 obj-$(CONFIG_SPARC32)   += leon_mm.o
diff --git a/arch/sparc/mm/extable.c b/arch/sparc/mm/extable.c
deleted file mode 100644 (file)
index 241b406..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * linux/arch/sparc/mm/extable.c
- */
-
-#include <linux/module.h>
-#include <linux/extable.h>
-#include <linux/uaccess.h>
-
-void sort_extable(struct exception_table_entry *start,
-                 struct exception_table_entry *finish)
-{
-}
-
-/* Caller knows they are in a range if ret->fixup == 0 */
-const struct exception_table_entry *
-search_extable(const struct exception_table_entry *base,
-              const size_t num,
-              unsigned long value)
-{
-       int i;
-
-       /* Single insn entries are encoded as:
-        *      word 1: insn address
-        *      word 2: fixup code address
-        *
-        * Range entries are encoded as:
-        *      word 1: first insn address
-        *      word 2: 0
-        *      word 3: last insn address + 4 bytes
-        *      word 4: fixup code address
-        *
-        * Deleted entries are encoded as:
-        *      word 1: unused
-        *      word 2: -1
-        *
-        * See asm/uaccess.h for more details.
-        */
-
-       /* 1. Try to find an exact match. */
-       for (i = 0; i < num; i++) {
-               if (base[i].fixup == 0) {
-                       /* A range entry, skip both parts. */
-                       i++;
-                       continue;
-               }
-
-               /* A deleted entry; see trim_init_extable */
-               if (base[i].fixup == -1)
-                       continue;
-
-               if (base[i].insn == value)
-                       return &base[i];
-       }
-
-       /* 2. Try to find a range match. */
-       for (i = 0; i < (num - 1); i++) {
-               if (base[i].fixup)
-                       continue;
-
-               if (base[i].insn <= value && base[i + 1].insn > value)
-                       return &base[i];
-
-               i++;
-       }
-
-        return NULL;
-}
-
-#ifdef CONFIG_MODULES
-/* We could memmove them around; easier to mark the trimmed ones. */
-void trim_init_extable(struct module *m)
-{
-       unsigned int i;
-       bool range;
-
-       for (i = 0; i < m->num_exentries; i += range ? 2 : 1) {
-               range = m->extable[i].fixup == 0;
-
-               if (within_module_init(m->extable[i].insn, m)) {
-                       m->extable[i].fixup = -1;
-                       if (range)
-                               m->extable[i+1].fixup = -1;
-               }
-               if (range)
-                       i++;
-       }
-}
-#endif /* CONFIG_MODULES */
-
-/* Special extable search, which handles ranges.  Returns fixup */
-unsigned long search_extables_range(unsigned long addr, unsigned long *g2)
-{
-       const struct exception_table_entry *entry;
-
-       entry = search_exception_tables(addr);
-       if (!entry)
-               return 0;
-
-       /* Inside range?  Fix g2 and return correct fixup */
-       if (!entry->fixup) {
-               *g2 = (addr - entry->insn) / 4;
-               return (entry + 1)->fixup;
-       }
-
-       return entry->fixup;
-}
index 40ce087dfecf24709ea6e5a5e6694970cbac1340..de2031c2b2d7982ebe39af78654e463c1cbfc5cd 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/interrupt.h>
 #include <linux/kdebug.h>
 #include <linux/uaccess.h>
+#include <linux/extable.h>
 
 #include <asm/page.h>
 #include <asm/openprom.h>
@@ -54,54 +55,6 @@ static void __noreturn unhandled_fault(unsigned long address,
        die_if_kernel("Oops", regs);
 }
 
-asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
-                           unsigned long address)
-{
-       struct pt_regs regs;
-       unsigned long g2;
-       unsigned int insn;
-       int i;
-
-       i = search_extables_range(ret_pc, &g2);
-       switch (i) {
-       case 3:
-               /* load & store will be handled by fixup */
-               return 3;
-
-       case 1:
-               /* store will be handled by fixup, load will bump out */
-               /* for _to_ macros */
-               insn = *((unsigned int *) pc);
-               if ((insn >> 21) & 1)
-                       return 1;
-               break;
-
-       case 2:
-               /* load will be handled by fixup, store will bump out */
-               /* for _from_ macros */
-               insn = *((unsigned int *) pc);
-               if (!((insn >> 21) & 1) || ((insn>>19)&0x3f) == 15)
-                       return 2;
-               break;
-
-       default:
-               break;
-       }
-
-       memset(&regs, 0, sizeof(regs));
-       regs.pc = pc;
-       regs.npc = pc + 4;
-       __asm__ __volatile__(
-               "rd %%psr, %0\n\t"
-               "nop\n\t"
-               "nop\n\t"
-               "nop\n" : "=r" (regs.psr));
-       unhandled_fault(address, current, &regs);
-
-       /* Not reached */
-       return 0;
-}
-
 static inline void
 show_signal_msg(struct pt_regs *regs, int sig, int code,
                unsigned long address, struct task_struct *tsk)
@@ -162,8 +115,6 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
        struct vm_area_struct *vma;
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->mm;
-       unsigned int fixup;
-       unsigned long g2;
        int from_user = !(regs->psr & PSR_PS);
        int code;
        vm_fault_t fault;
@@ -281,30 +232,19 @@ bad_area_nosemaphore:
 
        /* Is this in ex_table? */
 no_context:
-       g2 = regs->u_regs[UREG_G2];
        if (!from_user) {
-               fixup = search_extables_range(regs->pc, &g2);
-               /* Values below 10 are reserved for other things */
-               if (fixup > 10) {
-                       extern const unsigned int __memset_start[];
-                       extern const unsigned int __memset_end[];
+               const struct exception_table_entry *entry;
 
+               entry = search_exception_tables(regs->pc);
 #ifdef DEBUG_EXCEPTIONS
-                       printk("Exception: PC<%08lx> faddr<%08lx>\n",
-                              regs->pc, address);
-                       printk("EX_TABLE: insn<%08lx> fixup<%08x> g2<%08lx>\n",
-                               regs->pc, fixup, g2);
+               printk("Exception: PC<%08lx> faddr<%08lx>\n",
+                      regs->pc, address);
+               printk("EX_TABLE: insn<%08lx> fixup<%08x>\n",
+                       regs->pc, entry->fixup);
 #endif
-                       if ((regs->pc >= (unsigned long)__memset_start &&
-                            regs->pc < (unsigned long)__memset_end)) {
-                               regs->u_regs[UREG_I4] = address;
-                               regs->u_regs[UREG_I5] = regs->pc;
-                       }
-                       regs->u_regs[UREG_G2] = g2;
-                       regs->pc = fixup;
-                       regs->npc = regs->pc + 4;
-                       return;
-               }
+               regs->pc = entry->fixup;
+               regs->npc = regs->pc + 4;
+               return;
        }
 
        unhandled_fault(address, tsk, regs);
index ce750a99eea96bb968bea7b2f4414dac752a7922..ee55f10806343aa07fa405b4452fc591c5ab22b6 100644 (file)
@@ -1,7 +1,5 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /* fault_32.c - visible as they are called from assembler */
-asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
-                            unsigned long address);
 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
                                unsigned long address);
 
index a03caa5f6628df8432d7a39323e52abba8d67cf8..6766e961ffe7993035e08993b226f0e373731cc2 100644 (file)
@@ -689,7 +689,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
                pgdp = pgd_offset_k(start);
                p4dp = p4d_offset(pgdp, start);
                pudp = pud_offset(p4dp, start);
-               if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
+               if (pud_none(*__nocache_fix(pudp))) {
                        pmdp = __srmmu_get_nocache(
                            SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
                        if (pmdp == NULL)
@@ -698,7 +698,7 @@ static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
                        pud_set(__nocache_fix(pudp), pmdp);
                }
                pmdp = pmd_offset(__nocache_fix(pudp), start);
-               if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
+               if (srmmu_pmd_none(*__nocache_fix(pmdp))) {
                        ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
                        if (ptep == NULL)
                                early_pgtable_allocfail("pte");
@@ -810,11 +810,11 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
                p4dp = p4d_offset(pgdp, start);
                pudp = pud_offset(p4dp, start);
                if (what == 2) {
-                       *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
+                       *__nocache_fix(pgdp) = __pgd(probed);
                        start += PGDIR_SIZE;
                        continue;
                }
-               if (pud_none(*(pud_t *)__nocache_fix(pudp))) {
+               if (pud_none(*__nocache_fix(pudp))) {
                        pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
                                                   SRMMU_PMD_TABLE_SIZE);
                        if (pmdp == NULL)
@@ -822,13 +822,13 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
                        memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
                        pud_set(__nocache_fix(pudp), pmdp);
                }
-               pmdp = pmd_offset(__nocache_fix(pgdp), start);
+               pmdp = pmd_offset(__nocache_fix(pudp), start);
                if (what == 1) {
                        *(pmd_t *)__nocache_fix(pmdp) = __pmd(probed);
                        start += PMD_SIZE;
                        continue;
                }
-               if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
+               if (srmmu_pmd_none(*__nocache_fix(pmdp))) {
                        ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
                        if (ptep == NULL)
                                early_pgtable_allocfail("pte");
@@ -836,7 +836,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
                        pmd_set(__nocache_fix(pmdp), ptep);
                }
                ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
-               *(pte_t *)__nocache_fix(ptep) = __pte(probed);
+               *__nocache_fix(ptep) = __pte(probed);
                start += PAGE_SIZE;
        }
 }
@@ -850,7 +850,7 @@ static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base
        unsigned long big_pte;
 
        big_pte = KERNEL_PTE(phys_base >> 4);
-       *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
+       *__nocache_fix(pgdp) = __pgd(big_pte);
 }
 
 /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
@@ -940,7 +940,7 @@ void __init srmmu_paging_init(void)
        srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa(srmmu_context_table);
 
        for (i = 0; i < num_contexts; i++)
-               srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
+               srmmu_ctxd_set(__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
 
        flush_cache_all();
        srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
index 874aeacde2dd157cd1d8f41e34f74598a4ef3f5a..02a349afaf9c3705602fc67afcbd100b53e96a70 100644 (file)
 439    i386    faccessat2              sys_faccessat2
 440    i386    process_madvise         sys_process_madvise
 441    i386    epoll_pwait2            sys_epoll_pwait2                compat_sys_epoll_pwait2
+442    i386    watch_mount             sys_watch_mount
index 78672124d28be0da465300ee9a301bab59c1ac1d..d9bcc4e02588911d3a3c2e642b225415e304e4b9 100644 (file)
 439    common  faccessat2              sys_faccessat2
 440    common  process_madvise         sys_process_madvise
 441    common  epoll_pwait2            sys_epoll_pwait2
+442    common  watch_mount             sys_watch_mount
 
 #
 # Due to a historical design error, certain syscalls are numbered differently
index 37ce1489364ee27a1ee297aeb37bc5dd515d6575..97beddc9d6459bf31e58270b1a7b49982d14fc41 100644 (file)
@@ -467,7 +467,7 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
          then enter your normal kernel breakpoints once the MMU was mapped
          to the kernel mappings (0XC0000000).
 
-         This unfortunately won't work for U-Boot and likely also wont
+         This unfortunately won't work for U-Boot and likely also won't
          work for using KEXEC to have a hot kernel ready for doing a
          KDUMP.
 
index 46116a28eeed3bd0b250ab06874f6238103720a4..327e4a3c48ba54b694ffa683ef0b064785e813e2 100644 (file)
 439    common  faccessat2                      sys_faccessat2
 440    common  process_madvise                 sys_process_madvise
 441    common  epoll_pwait2                    sys_epoll_pwait2
+442    common  watch_mount                     sys_watch_mount
index 96e5fcd7f071b606c62d946354080aada4f4b8b7..7663a9b94b8002ad710231ec726dfd4cc7c23513 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/blk-mq.h>
+#include <linux/blk-pm.h>
 #include <linux/highmem.h>
 #include <linux/mm.h>
 #include <linux/pagemap.h>
@@ -424,11 +425,11 @@ EXPORT_SYMBOL(blk_cleanup_queue);
 /**
  * blk_queue_enter() - try to increase q->q_usage_counter
  * @q: request queue pointer
- * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PREEMPT
+ * @flags: BLK_MQ_REQ_NOWAIT and/or BLK_MQ_REQ_PM
  */
 int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 {
-       const bool pm = flags & BLK_MQ_REQ_PREEMPT;
+       const bool pm = flags & BLK_MQ_REQ_PM;
 
        while (true) {
                bool success = false;
@@ -440,7 +441,8 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
                         * responsible for ensuring that that counter is
                         * globally visible before the queue is unfrozen.
                         */
-                       if (pm || !blk_queue_pm_only(q)) {
+                       if ((pm && queue_rpm_status(q) != RPM_SUSPENDED) ||
+                           !blk_queue_pm_only(q)) {
                                success = true;
                        } else {
                                percpu_ref_put(&q->q_usage_counter);
@@ -465,8 +467,7 @@ int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags)
 
                wait_event(q->mq_freeze_wq,
                           (!q->mq_freeze_depth &&
-                           (pm || (blk_pm_request_resume(q),
-                                   !blk_queue_pm_only(q)))) ||
+                           blk_pm_resume_queue(pm, q)) ||
                           blk_queue_dying(q));
                if (blk_queue_dying(q))
                        return -ENODEV;
@@ -630,7 +631,7 @@ struct request *blk_get_request(struct request_queue *q, unsigned int op,
        struct request *req;
 
        WARN_ON_ONCE(op & REQ_NOWAIT);
-       WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PREEMPT));
+       WARN_ON_ONCE(flags & ~(BLK_MQ_REQ_NOWAIT | BLK_MQ_REQ_PM));
 
        req = blk_mq_alloc_request(q, op, flags);
        if (!IS_ERR(req) && q->mq_ops->initialize_rq_fn)
index 3094542e12ae0fa468f242a897b1d08b35fbe83f..9336a6f8d6efd55acbe4277c3fcb30e3042bddf6 100644 (file)
@@ -297,7 +297,6 @@ static const char *const rqf_name[] = {
        RQF_NAME(MIXED_MERGE),
        RQF_NAME(MQ_INFLIGHT),
        RQF_NAME(DONTPREP),
-       RQF_NAME(PREEMPT),
        RQF_NAME(FAILED),
        RQF_NAME(QUIET),
        RQF_NAME(ELVPRIV),
index c338c9bc5a2c53d22331f45801a3b5dfbc299dda..f285a9123a8b081deeae72282306948aeec928b4 100644 (file)
@@ -294,8 +294,8 @@ static struct request *blk_mq_rq_ctx_init(struct blk_mq_alloc_data *data,
        rq->mq_hctx = data->hctx;
        rq->rq_flags = 0;
        rq->cmd_flags = data->cmd_flags;
-       if (data->flags & BLK_MQ_REQ_PREEMPT)
-               rq->rq_flags |= RQF_PREEMPT;
+       if (data->flags & BLK_MQ_REQ_PM)
+               rq->rq_flags |= RQF_PM;
        if (blk_queue_io_stat(data->q))
                rq->rq_flags |= RQF_IO_STAT;
        INIT_LIST_HEAD(&rq->queuelist);
index b85234d758f7b2d6d75734fe9d74a0e5e03bdcc2..17bd020268d421434a3724ad63552f2df49da0f4 100644 (file)
@@ -67,6 +67,10 @@ int blk_pre_runtime_suspend(struct request_queue *q)
 
        WARN_ON_ONCE(q->rpm_status != RPM_ACTIVE);
 
+       spin_lock_irq(&q->queue_lock);
+       q->rpm_status = RPM_SUSPENDING;
+       spin_unlock_irq(&q->queue_lock);
+
        /*
         * Increase the pm_only counter before checking whether any
         * non-PM blk_queue_enter() calls are in progress to avoid that any
@@ -89,15 +93,14 @@ int blk_pre_runtime_suspend(struct request_queue *q)
        /* Switch q_usage_counter back to per-cpu mode. */
        blk_mq_unfreeze_queue(q);
 
-       spin_lock_irq(&q->queue_lock);
-       if (ret < 0)
+       if (ret < 0) {
+               spin_lock_irq(&q->queue_lock);
+               q->rpm_status = RPM_ACTIVE;
                pm_runtime_mark_last_busy(q->dev);
-       else
-               q->rpm_status = RPM_SUSPENDING;
-       spin_unlock_irq(&q->queue_lock);
+               spin_unlock_irq(&q->queue_lock);
 
-       if (ret)
                blk_clear_pm_only(q);
+       }
 
        return ret;
 }
index ea5507d23e75976e0c234c1a4951fd443e86811c..a2283cc9f716dc89622a5966fd6e894d8ddefee9 100644 (file)
@@ -6,11 +6,14 @@
 #include <linux/pm_runtime.h>
 
 #ifdef CONFIG_PM
-static inline void blk_pm_request_resume(struct request_queue *q)
+static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
 {
-       if (q->dev && (q->rpm_status == RPM_SUSPENDED ||
-                      q->rpm_status == RPM_SUSPENDING))
-               pm_request_resume(q->dev);
+       if (!q->dev || !blk_queue_pm_only(q))
+               return 1;       /* Nothing to do */
+       if (pm && q->rpm_status != RPM_SUSPENDED)
+               return 1;       /* Request allowed */
+       pm_request_resume(q->dev);
+       return 0;
 }
 
 static inline void blk_pm_mark_last_busy(struct request *rq)
@@ -44,8 +47,9 @@ static inline void blk_pm_put_request(struct request *rq)
                --rq->q->nr_pending;
 }
 #else
-static inline void blk_pm_request_resume(struct request_queue *q)
+static inline int blk_pm_resume_queue(const bool pm, struct request_queue *q)
 {
+       return 1;
 }
 
 static inline void blk_pm_mark_last_busy(struct request *rq)
index 6514f9ebc943f4e7e8cd2f3ceba36da0762c269a..bffe4c6f4a9e204d215119278d419bb491d2d7c8 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/ctype.h>
 #include <linux/err.h>
 #include <linux/seq_file.h>
+#include <linux/uidgid.h>
 #include <keys/system_keyring.h>
 #include "blacklist.h"
 
@@ -37,7 +38,7 @@ static int blacklist_vet_description(const char *desc)
 found_colon:
        desc++;
        for (; *desc; desc++) {
-               if (!isxdigit(*desc))
+               if (!isxdigit(*desc) || isupper(*desc))
                        return -EINVAL;
                n++;
        }
@@ -78,7 +79,7 @@ static struct key_type key_type_blacklist = {
 
 /**
  * mark_hash_blacklisted - Add a hash to the system blacklist
- * @hash - The hash as a hex string with a type prefix (eg. "tbs:23aa429783")
+ * @hash: The hash as a hex string with a type prefix (eg. "tbs:23aa429783")
  */
 int mark_hash_blacklisted(const char *hash)
 {
@@ -156,13 +157,12 @@ static int __init blacklist_init(void)
 
        blacklist_keyring =
                keyring_alloc(".blacklist",
-                             KUIDT_INIT(0), KGIDT_INIT(0),
-                             current_cred(),
+                             GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
                              (KEY_POS_ALL & ~KEY_POS_SETATTR) |
                              KEY_USR_VIEW | KEY_USR_READ |
                              KEY_USR_SEARCH,
                              KEY_ALLOC_NOT_IN_QUOTA |
-                             KEY_FLAG_KEEP,
+                             KEY_ALLOC_SET_KEEP,
                              NULL, NULL);
        if (IS_ERR(blacklist_keyring))
                panic("Can't allocate system blacklist keyring\n");
index 798291177186c361ffdb6797096bdb0f219228ae..4b693da488f14bcc66b452789f2eab92da959f9f 100644 (file)
@@ -11,6 +11,7 @@
 #include <linux/cred.h>
 #include <linux/err.h>
 #include <linux/slab.h>
+#include <linux/uidgid.h>
 #include <linux/verification.h>
 #include <keys/asymmetric-type.h>
 #include <keys/system_keyring.h>
@@ -98,7 +99,7 @@ static __init int system_trusted_keyring_init(void)
 
        builtin_trusted_keys =
                keyring_alloc(".builtin_trusted_keys",
-                             KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
+                             GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
                              ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
                              KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH),
                              KEY_ALLOC_NOT_IN_QUOTA,
@@ -109,7 +110,7 @@ static __init int system_trusted_keyring_init(void)
 #ifdef CONFIG_SECONDARY_TRUSTED_KEYRING
        secondary_trusted_keys =
                keyring_alloc(".secondary_trusted_keys",
-                             KUIDT_INIT(0), KGIDT_INIT(0), current_cred(),
+                             GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, current_cred(),
                              ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
                               KEY_USR_VIEW | KEY_USR_READ | KEY_USR_SEARCH |
                               KEY_USR_WRITE),
index 33e77d846caa8709ba9998606ce406524a4d21cf..ad8af3d70ac04d6f2b2ef6db6778aa0e6085cbd1 100644 (file)
@@ -152,7 +152,8 @@ EXPORT_SYMBOL_GPL(asymmetric_key_generate_id);
 
 /**
  * asymmetric_key_id_same - Return true if two asymmetric keys IDs are the same.
- * @kid_1, @kid_2: The key IDs to compare
+ * @kid1: The key ID to compare
+ * @kid2: The key ID to compare
  */
 bool asymmetric_key_id_same(const struct asymmetric_key_id *kid1,
                            const struct asymmetric_key_id *kid2)
@@ -168,7 +169,8 @@ EXPORT_SYMBOL_GPL(asymmetric_key_id_same);
 /**
  * asymmetric_key_id_partial - Return true if two asymmetric keys IDs
  * partially match
- * @kid_1, @kid_2: The key IDs to compare
+ * @kid1: The key ID to compare
+ * @kid2: The key ID to compare
  */
 bool asymmetric_key_id_partial(const struct asymmetric_key_id *kid1,
                               const struct asymmetric_key_id *kid2)
index 6565fdc2d4cac2bc49bfa629edde67b875573cb7..e17f7ce4fb434832a3867ce2254de6ccc7f06ae7 100644 (file)
@@ -41,10 +41,9 @@ struct pkcs7_signed_info {
         *
         * This contains the generated digest of _either_ the Content Data or
         * the Authenticated Attributes [RFC2315 9.3].  If the latter, one of
-        * the attributes contains the digest of the the Content Data within
-        * it.
+        * the attributes contains the digest of the Content Data within it.
         *
-        * THis also contains the issuing cert serial number and issuer's name
+        * This also contains the issuing cert serial number and issuer's name
         * [PKCS#7 or CMS ver 1] or issuing cert's SKID [CMS ver 3].
         */
        struct public_key_signature *sig;
index 61af3c4d82ccf96b622d4cad6044ea985426d159..b531df2013c41d6c5d7ccdb1e00f296a94fc0c9a 100644 (file)
@@ -16,7 +16,7 @@
 #include <crypto/public_key.h>
 #include "pkcs7_parser.h"
 
-/**
+/*
  * Check the trust on one PKCS#7 SignedInfo block.
  */
 static int pkcs7_validate_trust_one(struct pkcs7_message *pkcs7,
index ce49820caa97fb885766798060677361742f3645..0b4d07aa88111e332d16faeb2f97d8b594585741 100644 (file)
@@ -141,11 +141,10 @@ int pkcs7_get_digest(struct pkcs7_message *pkcs7, const u8 **buf, u32 *len,
        *buf = sinfo->sig->digest;
        *len = sinfo->sig->digest_size;
 
-       for (i = 0; i < HASH_ALGO__LAST; i++)
-               if (!strcmp(hash_algo_name[i], sinfo->sig->hash_algo)) {
-                       *hash_algo = i;
-                       break;
-               }
+       i = match_string(hash_algo_name, HASH_ALGO__LAST,
+                        sinfo->sig->hash_algo);
+       if (i >= 0)
+               *hash_algo = i;
 
        return 0;
 }
index 65a3886f68c9e391722b99d0ebe4d45fc3de7fe9..5f0472c18bcbd791bbc74a5aff8fe79ff2a76fd1 100644 (file)
@@ -3607,7 +3607,7 @@ static int idt77252_init_one(struct pci_dev *pcidev,
 
        if ((err = dma_set_mask_and_coherent(&pcidev->dev, DMA_BIT_MASK(32)))) {
                printk("idt77252: can't enable DMA for PCI device at %s\n", pci_name(pcidev));
-               return err;
+               goto err_out_disable_pdev;
        }
 
        card = kzalloc(sizeof(struct idt77252_dev), GFP_KERNEL);
index 95fd1549f87de38dcbad0846e923b06ee25bdbd2..8456d8384ac8e14842c603554516aede25239f6c 100644 (file)
@@ -366,6 +366,8 @@ int devm_platform_get_irqs_affinity(struct platform_device *dev,
                return -ERANGE;
 
        nvec = platform_irq_count(dev);
+       if (nvec < 0)
+               return nvec;
 
        if (nvec < minvec)
                return -ENOSPC;
index 41ff2071d7eff38633963fe44ffc8dc10ae7995b..88ce5f0ffc4babd74a8004cd9f9efc469b37d036 100644 (file)
@@ -437,38 +437,31 @@ int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver
                tlv = (struct intel_tlv *)skb->data;
                switch (tlv->type) {
                case INTEL_TLV_CNVI_TOP:
-                       version->cnvi_top =
-                               __le32_to_cpu(get_unaligned_le32(tlv->val));
+                       version->cnvi_top = get_unaligned_le32(tlv->val);
                        break;
                case INTEL_TLV_CNVR_TOP:
-                       version->cnvr_top =
-                               __le32_to_cpu(get_unaligned_le32(tlv->val));
+                       version->cnvr_top = get_unaligned_le32(tlv->val);
                        break;
                case INTEL_TLV_CNVI_BT:
-                       version->cnvi_bt =
-                               __le32_to_cpu(get_unaligned_le32(tlv->val));
+                       version->cnvi_bt = get_unaligned_le32(tlv->val);
                        break;
                case INTEL_TLV_CNVR_BT:
-                       version->cnvr_bt =
-                               __le32_to_cpu(get_unaligned_le32(tlv->val));
+                       version->cnvr_bt = get_unaligned_le32(tlv->val);
                        break;
                case INTEL_TLV_DEV_REV_ID:
-                       version->dev_rev_id =
-                               __le16_to_cpu(get_unaligned_le16(tlv->val));
+                       version->dev_rev_id = get_unaligned_le16(tlv->val);
                        break;
                case INTEL_TLV_IMAGE_TYPE:
                        version->img_type = tlv->val[0];
                        break;
                case INTEL_TLV_TIME_STAMP:
-                       version->timestamp =
-                               __le16_to_cpu(get_unaligned_le16(tlv->val));
+                       version->timestamp = get_unaligned_le16(tlv->val);
                        break;
                case INTEL_TLV_BUILD_TYPE:
                        version->build_type = tlv->val[0];
                        break;
                case INTEL_TLV_BUILD_NUM:
-                       version->build_num =
-                               __le32_to_cpu(get_unaligned_le32(tlv->val));
+                       version->build_num = get_unaligned_le32(tlv->val);
                        break;
                case INTEL_TLV_SECURE_BOOT:
                        version->secure_boot = tlv->val[0];
index 5f9f027956317ef3a5bb71f6753f39d7522023cf..9872ef18f9fea91f1d460f489777e8697aff83d9 100644 (file)
@@ -442,15 +442,15 @@ static int btmtksdio_rx_packet(struct btmtksdio_dev *bdev, u16 rx_size)
        }
 
        switch ((&pkts[i])->lsize) {
-               case 1:
-                       dlen = skb->data[(&pkts[i])->loff];
-                       break;
-               case 2:
-                       dlen = get_unaligned_le16(skb->data +
+       case 1:
+               dlen = skb->data[(&pkts[i])->loff];
+               break;
+       case 2:
+               dlen = get_unaligned_le16(skb->data +
                                                  (&pkts[i])->loff);
-                       break;
-               default:
-                       goto err_kfree_skb;
+               break;
+       default:
+               goto err_kfree_skb;
        }
 
        pad_size = skb->len - (&pkts[i])->hlen -  dlen;
index f85a55add9be598f5a2d6fa589cc3c4f2de8a932..25114f0d1319971c429e49f60ad1bf6bb72147ef 100644 (file)
@@ -94,6 +94,53 @@ out:
 }
 EXPORT_SYMBOL_GPL(qca_read_soc_version);
 
+static int qca_read_fw_build_info(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+       struct edl_event_hdr *edl;
+       char cmd, build_label[QCA_FW_BUILD_VER_LEN];
+       int build_lbl_len, err = 0;
+
+       bt_dev_dbg(hdev, "QCA read fw build info");
+
+       cmd = EDL_GET_BUILD_INFO_CMD;
+       skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN,
+                               &cmd, 0, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               bt_dev_err(hdev, "Reading QCA fw build info failed (%d)",
+                          err);
+               return err;
+       }
+
+       edl = (struct edl_event_hdr *)(skb->data);
+       if (!edl) {
+               bt_dev_err(hdev, "QCA read fw build info with no header");
+               err = -EILSEQ;
+               goto out;
+       }
+
+       if (edl->cresp != EDL_CMD_REQ_RES_EVT ||
+           edl->rtype != EDL_GET_BUILD_INFO_CMD) {
+               bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp,
+                          edl->rtype);
+               err = -EIO;
+               goto out;
+       }
+
+       build_lbl_len = edl->data[0];
+       if (build_lbl_len <= QCA_FW_BUILD_VER_LEN - 1) {
+               memcpy(build_label, edl->data + 1, build_lbl_len);
+               *(build_label + build_lbl_len) = '\0';
+       }
+
+       hci_set_fw_info(hdev, "%s", build_label);
+
+out:
+       kfree_skb(skb);
+       return err;
+}
+
 static int qca_send_reset(struct hci_dev *hdev)
 {
        struct sk_buff *skb;
@@ -517,6 +564,19 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
                        return err;
        }
 
+       /* WCN399x supports the Microsoft vendor extension with 0xFD70 as the
+        * VsMsftOpCode.
+        */
+       switch (soc_type) {
+       case QCA_WCN3990:
+       case QCA_WCN3991:
+       case QCA_WCN3998:
+               hci_set_msft_opcode(hdev, 0xFD70);
+               break;
+       default:
+               break;
+       }
+
        /* Perform HCI reset */
        err = qca_send_reset(hdev);
        if (err < 0) {
@@ -524,6 +584,13 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate,
                return err;
        }
 
+       if (soc_type == QCA_WCN3991) {
+               /* get fw build info */
+               err = qca_read_fw_build_info(hdev);
+               if (err < 0)
+                       return err;
+       }
+
        bt_dev_info(hdev, "QCA setup on UART is completed");
 
        return 0;
index e73b8f8775bd73da607f30dee3948270d7e487fb..b19add7675a469ac1eca963cf2811d36da9e978d 100644 (file)
@@ -11,6 +11,7 @@
 #define EDL_PATCH_CMD_LEN              (1)
 #define EDL_PATCH_VER_REQ_CMD          (0x19)
 #define EDL_PATCH_TLV_REQ_CMD          (0x1E)
+#define EDL_GET_BUILD_INFO_CMD         (0x20)
 #define EDL_NVM_ACCESS_SET_REQ_CMD     (0x01)
 #define MAX_SIZE_PER_TLV_SEGMENT       (243)
 #define QCA_PRE_SHUTDOWN_CMD           (0xFC08)
index 98d53764871f5b5fcc791dd36d367bf21b56f866..2acb719e596f59e930c520b5030f0c6d0e5a55cb 100644 (file)
@@ -142,12 +142,16 @@ static int btqcomsmd_probe(struct platform_device *pdev)
 
        btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD",
                                                   btqcomsmd_cmd_callback, btq);
-       if (IS_ERR(btq->cmd_channel))
-               return PTR_ERR(btq->cmd_channel);
+       if (IS_ERR(btq->cmd_channel)) {
+               ret = PTR_ERR(btq->cmd_channel);
+               goto destroy_acl_channel;
+       }
 
        hdev = hci_alloc_dev();
-       if (!hdev)
-               return -ENOMEM;
+       if (!hdev) {
+               ret = -ENOMEM;
+               goto destroy_cmd_channel;
+       }
 
        hci_set_drvdata(hdev, btq);
        btq->hdev = hdev;
@@ -161,14 +165,21 @@ static int btqcomsmd_probe(struct platform_device *pdev)
        hdev->set_bdaddr = qca_set_bdaddr_rome;
 
        ret = hci_register_dev(hdev);
-       if (ret < 0) {
-               hci_free_dev(hdev);
-               return ret;
-       }
+       if (ret < 0)
+               goto hci_free_dev;
 
        platform_set_drvdata(pdev, btq);
 
        return 0;
+
+hci_free_dev:
+       hci_free_dev(hdev);
+destroy_cmd_channel:
+       rpmsg_destroy_ept(btq->cmd_channel);
+destroy_acl_channel:
+       rpmsg_destroy_ept(btq->acl_channel);
+
+       return ret;
 }
 
 static int btqcomsmd_remove(struct platform_device *pdev)
index a4f7cace66b067f987e7f90539fd04c52b09f34d..1abf6a4d672734f6ea70ff6159b3d95c0dc3c910 100644 (file)
@@ -658,6 +658,12 @@ out_free:
                }
        }
 
+       /* RTL8822CE supports the Microsoft vendor extension and uses 0xFCF0
+        * for VsMsftOpCode.
+        */
+       if (lmp_subver == RTL_ROM_LMP_8822B)
+               hci_set_msft_opcode(hdev, 0xFCF0);
+
        return btrtl_dev;
 
 err_free:
@@ -708,13 +714,24 @@ int btrtl_setup_realtek(struct hci_dev *hdev)
 
        ret = btrtl_download_firmware(hdev, btrtl_dev);
 
-       btrtl_free(btrtl_dev);
-
        /* Enable controller to do both LE scan and BR/EDR inquiry
         * simultaneously.
         */
        set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
 
+       /* Enable central-peripheral role (able to create new connections with
+        * an existing connection in slave role).
+        */
+       switch (btrtl_dev->ic_info->lmp_subver) {
+       case RTL_ROM_LMP_8822B:
+               set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks);
+               break;
+       default:
+               rtl_dev_dbg(hdev, "Central-peripheral role not enabled.");
+               break;
+       }
+
+       btrtl_free(btrtl_dev);
        return ret;
 }
 EXPORT_SYMBOL_GPL(btrtl_setup_realtek);
index 03b83aa912779b31c539aba19feb1d3718d042fc..9ff920de8d2600c3da2faff47854d2f9e552265e 100644 (file)
@@ -506,7 +506,6 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = {
 #define BTUSB_HW_RESET_ACTIVE  12
 #define BTUSB_TX_WAIT_VND_EVT  13
 #define BTUSB_WAKEUP_DISABLE   14
-#define BTUSB_USE_ALT1_FOR_WBS 15
 
 struct btusb_data {
        struct hci_dev       *hdev;
@@ -1736,15 +1735,12 @@ static void btusb_work(struct work_struct *work)
                                new_alts = data->sco_num;
                        }
                } else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) {
-                       /* Check if Alt 6 is supported for Transparent audio */
-                       if (btusb_find_altsetting(data, 6)) {
-                               data->usb_alt6_packet_flow = true;
-                               new_alts = 6;
-                       } else if (test_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags)) {
-                               new_alts = 1;
-                       } else {
-                               bt_dev_err(hdev, "Device does not support ALT setting 6");
-                       }
+                       /* Bluetooth USB spec recommends alt 6 (63 bytes), but
+                        * many adapters do not support it.  Alt 1 appears to
+                        * work for all adapters that do not have alt 6, and
+                        * which work with WBS at all.
+                        */
+                       new_alts = btusb_find_altsetting(data, 6) ? 6 : 1;
                }
 
                if (btusb_switch_alt_setting(hdev, new_alts) < 0)
@@ -1903,7 +1899,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
                    le16_to_cpu(rp->lmp_subver) == 0x1012 &&
                    le16_to_cpu(rp->hci_rev) == 0x0810 &&
                    le16_to_cpu(rp->hci_ver) == BLUETOOTH_VER_4_0) {
-                       bt_dev_warn(hdev, "CSR: detected a fake CSR dongle using a Barrot 8041a02 chip, this chip is very buggy and may have issues\n");
+                       bt_dev_warn(hdev, "CSR: detected a fake CSR dongle using a Barrot 8041a02 chip, this chip is very buggy and may have issues");
 
                        pm_runtime_allow(&data->udev->dev);
 
@@ -1911,7 +1907,7 @@ static int btusb_setup_csr(struct hci_dev *hdev)
                        if (ret >= 0)
                                msleep(200);
                        else
-                               bt_dev_err(hdev, "Failed to suspend the device for Barrot 8041a02 receive-issue workaround\n");
+                               bt_dev_err(hdev, "Failed to suspend the device for Barrot 8041a02 receive-issue workaround");
 
                        pm_runtime_forbid(&data->udev->dev);
 
@@ -2924,7 +2920,10 @@ finish:
         * extension are using 0xFC1E for VsMsftOpCode.
         */
        switch (ver.hw_variant) {
+       case 0x11:      /* JfP */
        case 0x12:      /* ThP */
+       case 0x13:      /* HrP */
+       case 0x14:      /* CcP */
                hci_set_msft_opcode(hdev, 0xFC1E);
                break;
        }
@@ -3725,7 +3724,7 @@ static int marvell_config_oob_wake(struct hci_dev *hdev)
 
        skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
        if (!skb) {
-               bt_dev_err(hdev, "%s: No memory\n", __func__);
+               bt_dev_err(hdev, "%s: No memory", __func__);
                return -ENOMEM;
        }
 
@@ -3734,7 +3733,7 @@ static int marvell_config_oob_wake(struct hci_dev *hdev)
 
        ret = btusb_send_frame(hdev, skb);
        if (ret) {
-               bt_dev_err(hdev, "%s: configuration failed\n", __func__);
+               bt_dev_err(hdev, "%s: configuration failed", __func__);
                kfree_skb(skb);
                return ret;
        }
@@ -4264,6 +4263,20 @@ static bool btusb_prevent_wake(struct hci_dev *hdev)
        return !device_may_wakeup(&data->udev->dev);
 }
 
+static int btusb_shutdown_qca(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+
+       skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               bt_dev_err(hdev, "HCI reset during shutdown failed");
+               return PTR_ERR(skb);
+       }
+       kfree_skb(skb);
+
+       return 0;
+}
+
 static int btusb_probe(struct usb_interface *intf,
                       const struct usb_device_id *id)
 {
@@ -4523,6 +4536,7 @@ static int btusb_probe(struct usb_interface *intf,
 
        if (id->driver_info & BTUSB_QCA_WCN6855) {
                data->setup_on_usb = btusb_setup_qca;
+               hdev->shutdown = btusb_shutdown_qca;
                hdev->set_bdaddr = btusb_set_bdaddr_wcn6855;
                hdev->cmd_timeout = btusb_qca_cmd_timeout;
                set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks);
@@ -4548,10 +4562,6 @@ static int btusb_probe(struct usb_interface *intf,
                 * (DEVICE_REMOTE_WAKEUP)
                 */
                set_bit(BTUSB_WAKEUP_DISABLE, &data->flags);
-               if (btusb_find_altsetting(data, 1))
-                       set_bit(BTUSB_USE_ALT1_FOR_WBS, &data->flags);
-               else
-                       bt_dev_err(hdev, "Device does not support ALT setting 1");
        }
 
        if (!reset)
index 8ea5ca8d71d6d2260965cfc1c9f4ae0e0a645d13..3764ceb6fa0d52a454bd2954f8c19c4e10ba060a 100644 (file)
@@ -654,6 +654,7 @@ static const struct h4_recv_pkt bcm_recv_pkts[] = {
        { H4_RECV_ACL,      .recv = hci_recv_frame },
        { H4_RECV_SCO,      .recv = hci_recv_frame },
        { H4_RECV_EVENT,    .recv = hci_recv_frame },
+       { H4_RECV_ISO,      .recv = hci_recv_frame },
        { BCM_RECV_LM_DIAG, .recv = hci_recv_diag  },
        { BCM_RECV_NULL,    .recv = hci_recv_diag  },
        { BCM_RECV_TYPE49,  .recv = hci_recv_diag  },
index f83d67eafc9f0e8739dfa6743c086ce3360ee2af..8be4d807d1370030e0055efd9011ecece9592dda 100644 (file)
@@ -127,10 +127,9 @@ int hci_uart_tx_wakeup(struct hci_uart *hu)
        if (!test_bit(HCI_UART_PROTO_READY, &hu->flags))
                goto no_schedule;
 
-       if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) {
-               set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
+       set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state);
+       if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state))
                goto no_schedule;
-       }
 
        BT_DBG("");
 
@@ -174,10 +173,10 @@ restart:
                kfree_skb(skb);
        }
 
+       clear_bit(HCI_UART_SENDING, &hu->tx_state);
        if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state))
                goto restart;
 
-       clear_bit(HCI_UART_SENDING, &hu->tx_state);
        wake_up_bit(&hu->tx_state, HCI_UART_SENDING);
 }
 
index ef96ad06fa54e2f9ce7e14e1d388f2ae9980fc70..9e03402ef1b378c2f613df7697f1708beba1f9b9 100644 (file)
@@ -83,9 +83,9 @@ static void hci_uart_write_work(struct work_struct *work)
                        hci_uart_tx_complete(hu, hci_skb_pkt_type(skb));
                        kfree_skb(skb);
                }
-       } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
 
-       clear_bit(HCI_UART_SENDING, &hu->tx_state);
+               clear_bit(HCI_UART_SENDING, &hu->tx_state);
+       } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state));
 }
 
 /* ------- Interface to HCI layer ------ */
index 47114c2a7cb5448739c108990cf476403b3f1d4a..03e8065f26795431a59df85a9748d1ec0cf1acc2 100644 (file)
@@ -25,8 +25,8 @@
 #define TCORB  6
 #define _8TCNT 8
 
-#define CMIEA  6
-#define CMFA   6
+#define OVIE   5
+#define OVF    5
 
 #define FLAG_STARTED (1 << 3)
 
@@ -40,6 +40,7 @@ struct timer8_priv {
        void __iomem *mapbase;
        unsigned long flags;
        unsigned int rate;
+       uint16_t cnt;
 };
 
 static irqreturn_t timer8_interrupt(int irq, void *dev_id)
@@ -51,7 +52,8 @@ static irqreturn_t timer8_interrupt(int irq, void *dev_id)
 
        p->ced.event_handler(&p->ced);
 
-       bclr(CMFA, p->mapbase + _8TCSR);
+       iowrite16be(p->cnt, p->mapbase + _8TCNT);
+       bclr(OVF, p->mapbase + _8TCSR);
 
        return IRQ_HANDLED;
 }
@@ -60,16 +62,14 @@ static void timer8_set_next(struct timer8_priv *p, unsigned long delta)
 {
        if (delta >= 0x10000)
                pr_warn("delta out of range\n");
-       bclr(CMIEA, p->mapbase + _8TCR);
-       iowrite16be(delta, p->mapbase + TCORA);
-       iowrite16be(0x0000, p->mapbase + _8TCNT);
-       bclr(CMFA, p->mapbase + _8TCSR);
-       bset(CMIEA, p->mapbase + _8TCR);
+       p->cnt = 0x10000 - delta;
+       iowrite16be(p->cnt, p->mapbase + _8TCNT);
+       bclr(OVF, p->mapbase + _8TCSR);
+       bset(OVIE, p->mapbase + _8TCR);
 }
 
 static int timer8_enable(struct timer8_priv *p)
 {
-       iowrite16be(0xffff, p->mapbase + TCORA);
        iowrite16be(0x0000, p->mapbase + _8TCNT);
        iowrite16be(0x0c02, p->mapbase + _8TCR);
 
@@ -177,7 +177,7 @@ static int __init h8300_8timer_init(struct device_node *node)
        }
 
        ret = -EINVAL;
-       irq = irq_of_parse_and_map(node, 0);
+       irq = irq_of_parse_and_map(node, 2);
        if (!irq) {
                pr_err("failed to get irq for clockevent\n");
                goto unmap_reg;
index b971505b87152398cd76a16a8fb3a05fb9767c89..08d71dafa001578b1ab96d422b569555432c9cad 100644 (file)
@@ -86,12 +86,12 @@ static struct dw_edma_chunk *dw_edma_alloc_chunk(struct dw_edma_desc *desc)
 
        if (desc->chunk) {
                /* Create and add new element into the linked list */
-               desc->chunks_alloc++;
-               list_add_tail(&chunk->list, &desc->chunk->list);
                if (!dw_edma_alloc_burst(chunk)) {
                        kfree(chunk);
                        return NULL;
                }
+               desc->chunks_alloc++;
+               list_add_tail(&chunk->list, &desc->chunk->list);
        } else {
                /* List head */
                chunk->burst = NULL;
index 266423a2cabc7ddfa773d7e7e198f904fa77069c..4dbb03c545e48abaff38677818be3b579cdbff5e 100644 (file)
@@ -434,7 +434,7 @@ int idxd_register_driver(void)
        return 0;
 
 drv_fail:
-       for (; i > 0; i--)
+       while (--i >= 0)
                driver_unregister(&idxd_drvs[i]->drv);
        return rc;
 }
@@ -1840,7 +1840,7 @@ int idxd_register_bus_type(void)
        return 0;
 
 bus_err:
-       for (; i > 0; i--)
+       while (--i >= 0)
                bus_unregister(idxd_bus_types[i]);
        return rc;
 }
index f133ae8dece16b47827b1872cdd7c4eaa4f0e1c3..6ad8afbb95f2b3d4dc33cda0d4bb4bbd1e2550d8 100644 (file)
@@ -1007,6 +1007,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
        return 0;
 
 err_free:
+       mtk_hsdma_hw_deinit(hsdma);
        of_dma_controller_free(pdev->dev.of_node);
 err_unregister:
        dma_async_device_unregister(dd);
index 584c931e807af3a824a1bd0adefec70195c7e6bc..d29d01e730aa09171eecc60cfd298943b9783c9a 100644 (file)
@@ -350,7 +350,7 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
 
        ret = dma_async_device_register(ddev);
        if (ret)
-               return ret;
+               goto disable_xdmac;
 
        ret = of_dma_controller_register(dev->of_node,
                                         of_dma_simple_xlate, mdev);
@@ -363,6 +363,8 @@ static int milbeaut_xdmac_probe(struct platform_device *pdev)
 
 unregister_dmac:
        dma_async_device_unregister(ddev);
+disable_xdmac:
+       disable_xdmac(mdev);
        return ret;
 }
 
index d5773d474d8f5c04b382959e648413636239bbd3..88579857ca1d6c08a342c606001188a56157cf03 100644 (file)
@@ -630,7 +630,7 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
                             GFP_NOWAIT);
 
        if (!async_desc)
-               goto err_out;
+               return NULL;
 
        if (flags & DMA_PREP_FENCE)
                async_desc->flags |= DESC_FLAG_NWD;
@@ -670,10 +670,6 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
        }
 
        return vchan_tx_prep(&bchan->vc, &async_desc->vd, flags);
-
-err_out:
-       kfree(async_desc);
-       return NULL;
 }
 
 /**
index d2334f535de2a16f7312eb0da0484b9cd9363bb9..556c070a514cad28ba344cc4049d783976cde03c 100644 (file)
@@ -1416,7 +1416,7 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
        len = 1 << bit;
        ring->alloc_size = (len + (len - 1));
        dev_dbg(gpii->gpi_dev->dev,
-               "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%lu\n",
+               "#el:%u el_size:%u len:%u actual_len:%llu alloc_size:%zu\n",
                  elements, el_size, (elements * el_size), len,
                  ring->alloc_size);
 
@@ -1424,7 +1424,7 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
                                               ring->alloc_size,
                                               &ring->dma_handle, GFP_KERNEL);
        if (!ring->pre_aligned) {
-               dev_err(gpii->gpi_dev->dev, "could not alloc size:%lu mem for ring\n",
+               dev_err(gpii->gpi_dev->dev, "could not alloc size:%zu mem for ring\n",
                        ring->alloc_size);
                return -ENOMEM;
        }
@@ -1444,8 +1444,8 @@ static int gpi_alloc_ring(struct gpi_ring *ring, u32 elements,
        smp_wmb();
 
        dev_dbg(gpii->gpi_dev->dev,
-               "phy_pre:0x%0llx phy_alig:0x%0llx len:%u el_size:%u elements:%u\n",
-               ring->dma_handle, ring->phys_addr, ring->len,
+               "phy_pre:%pad phy_alig:%pa len:%u el_size:%u elements:%u\n",
+               &ring->dma_handle, &ring->phys_addr, ring->len,
                ring->el_size, ring->elements);
 
        return 0;
index 87157cbae1b8e782b414fd768f330b878b5b016d..298460438bb4d86ec1e2df62bc8b960b8fbf31c9 100644 (file)
@@ -4698,9 +4698,9 @@ static int pktdma_setup_resources(struct udma_dev *ud)
                ud->tchan_tpl.levels = 1;
        }
 
-       ud->tchan_tpl.levels = ud->tchan_tpl.levels;
-       ud->tchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
-       ud->tchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
+       ud->rchan_tpl.levels = ud->tchan_tpl.levels;
+       ud->rchan_tpl.start_idx[0] = ud->tchan_tpl.start_idx[0];
+       ud->rchan_tpl.start_idx[1] = ud->tchan_tpl.start_idx[1];
 
        ud->tchan_map = devm_kmalloc_array(dev, BITS_TO_LONGS(ud->tchan_cnt),
                                           sizeof(unsigned long), GFP_KERNEL);
index 715e491dfbc333395aa5ba5f3861e7d65b10368c..28785642a5c55933586601876338e86438cbf66e 100644 (file)
@@ -490,7 +490,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
 {
        struct fwnet_device *dev;
        int status;
-       __be64 guid;
 
        switch (ether_type) {
        case ETH_P_ARP:
@@ -512,7 +511,6 @@ static int fwnet_finish_incoming_packet(struct net_device *net,
         * Parse the encapsulation header. This actually does the job of
         * converting to an ethernet-like pseudo frame header.
         */
-       guid = cpu_to_be64(dev->card->guid);
        if (dev_hard_header(skb, net, ether_type,
                           is_broadcast ? net->broadcast : net->dev_addr,
                           NULL, skb->len) >= 0) {
index 86d71b0212b1bd74a3548f60cbd7c7a8dc464ca3..4d5421d14a410e8aedb9bb9ae94aa71051d2efc8 100644 (file)
@@ -85,6 +85,7 @@ static ssize_t get_modalias(char *buffer, size_t buffer_size)
                { "svn", DMI_SYS_VENDOR },
                { "pn",  DMI_PRODUCT_NAME },
                { "pvr", DMI_PRODUCT_VERSION },
+               { "sku", DMI_PRODUCT_SKU },
                { "rvn", DMI_BOARD_VENDOR },
                { "rn",  DMI_BOARD_NAME },
                { "rvr", DMI_BOARD_VERSION },
index 1cb7d73f7317bd26d0d7d1e4d552333f5e9169fa..e46646679281a4cf107c141fcc059f5f117cdc15 100644 (file)
@@ -4206,6 +4206,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev)
                case CHIP_NAVI14:
                case CHIP_NAVI12:
                case CHIP_SIENNA_CICHLID:
+               case CHIP_VANGOGH:
                        break;
                default:
                        goto disabled;
index e42175e1acf18b758c7ab64077c6f2496f0e4550..0f4cf8dc8f93d2a43cb75ebd06a03f3dee8c3f36 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/dma-buf.h>
 #include <linux/dma-fence-array.h>
 #include <linux/pci-p2pdma.h>
+#include <linux/pm_runtime.h>
 
 /**
  * amdgpu_gem_prime_mmap - &drm_driver.gem_prime_mmap implementation
@@ -151,9 +152,13 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
        if (attach->dev->driver == adev->dev->driver)
                return 0;
 
+       r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
+       if (r < 0)
+               goto out;
+
        r = amdgpu_bo_reserve(bo, false);
        if (unlikely(r != 0))
-               return r;
+               goto out;
 
        /*
         * We only create shared fences for internal use, but importers
@@ -165,11 +170,15 @@ static int amdgpu_dma_buf_attach(struct dma_buf *dmabuf,
         */
        r = __dma_resv_make_exclusive(bo->tbo.base.resv);
        if (r)
-               return r;
+               goto out;
 
        bo->prime_shared_count++;
        amdgpu_bo_unreserve(bo);
        return 0;
+
+out:
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+       return r;
 }
 
 /**
@@ -189,6 +198,9 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
 
        if (attach->dev->driver != adev->dev->driver && bo->prime_shared_count)
                bo->prime_shared_count--;
+
+       pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
+       pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
 }
 
 /**
index 5f4805e4d04ac840ededc9ca72d248cc514e95d6..ec03c0764768f82444e86198a469f4ee634060cc 100644 (file)
@@ -1243,6 +1243,8 @@ static const struct amdgpu_gfxoff_quirk amdgpu_gfxoff_quirk_list[] = {
        { 0x1002, 0x15dd, 0x103c, 0x83e7, 0xd3 },
        /* GFXOFF is unstable on C6 parts with a VBIOS 113-RAVEN-114 */
        { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc6 },
+       /* https://bugzilla.kernel.org/show_bug.cgi?id=207899 */
+       { 0x1002, 0x15dd, 0x103c, 0x83e9, 0xd6 },
        { 0, 0, 0, 0, 0 },
 };
 
index 6bee3677394ac011131369778f10a4dcd56668c1..61fd196a6c661a98dea7bf05912bda19525086e0 100644 (file)
@@ -336,6 +336,38 @@ static int nv_asic_mode1_reset(struct amdgpu_device *adev)
        return ret;
 }
 
+static int nv_asic_mode2_reset(struct amdgpu_device *adev)
+{
+       u32 i;
+       int ret = 0;
+
+       amdgpu_atombios_scratch_regs_engine_hung(adev, true);
+
+       /* disable BM */
+       pci_clear_master(adev->pdev);
+
+       amdgpu_device_cache_pci_state(adev->pdev);
+
+       ret = amdgpu_dpm_mode2_reset(adev);
+       if (ret)
+               dev_err(adev->dev, "GPU mode2 reset failed\n");
+
+       amdgpu_device_load_pci_state(adev->pdev);
+
+       /* wait for asic to come out of reset */
+       for (i = 0; i < adev->usec_timeout; i++) {
+               u32 memsize = adev->nbio.funcs->get_memsize(adev);
+
+               if (memsize != 0xffffffff)
+                       break;
+               udelay(1);
+       }
+
+       amdgpu_atombios_scratch_regs_engine_hung(adev, false);
+
+       return ret;
+}
+
 static bool nv_asic_supports_baco(struct amdgpu_device *adev)
 {
        struct smu_context *smu = &adev->smu;
@@ -352,6 +384,7 @@ nv_asic_reset_method(struct amdgpu_device *adev)
        struct smu_context *smu = &adev->smu;
 
        if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 ||
+           amdgpu_reset_method == AMD_RESET_METHOD_MODE2 ||
            amdgpu_reset_method == AMD_RESET_METHOD_BACO)
                return amdgpu_reset_method;
 
@@ -360,6 +393,8 @@ nv_asic_reset_method(struct amdgpu_device *adev)
                                  amdgpu_reset_method);
 
        switch (adev->asic_type) {
+       case CHIP_VANGOGH:
+               return AMD_RESET_METHOD_MODE2;
        case CHIP_SIENNA_CICHLID:
        case CHIP_NAVY_FLOUNDER:
        case CHIP_DIMGREY_CAVEFISH:
@@ -377,7 +412,8 @@ static int nv_asic_reset(struct amdgpu_device *adev)
        int ret = 0;
        struct smu_context *smu = &adev->smu;
 
-       if (nv_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) {
+       switch (nv_asic_reset_method(adev)) {
+       case AMD_RESET_METHOD_BACO:
                dev_info(adev->dev, "BACO reset\n");
 
                ret = smu_baco_enter(smu);
@@ -386,9 +422,15 @@ static int nv_asic_reset(struct amdgpu_device *adev)
                ret = smu_baco_exit(smu);
                if (ret)
                        return ret;
-       } else {
+               break;
+       case AMD_RESET_METHOD_MODE2:
+               dev_info(adev->dev, "MODE2 reset\n");
+               ret = nv_asic_mode2_reset(adev);
+               break;
+       default:
                dev_info(adev->dev, "MODE1 reset\n");
                ret = nv_asic_mode1_reset(adev);
+               break;
        }
 
        return ret;
index 8cb4fcee9a2c3750143775c940435fb4ba947a65..9bf139b7774bd7b5d0d1d13a7526ae9ca8ef9c11 100644 (file)
@@ -731,6 +731,14 @@ static int vangogh_system_features_control(struct smu_context *smu, bool en)
                                                en ? RLC_STATUS_NORMAL : RLC_STATUS_OFF, NULL);
        else
                return 0;
+<<<<<<< HEAD
+=======
+}
+
+static int vangogh_mode2_reset(struct smu_context *smu)
+{
+       return smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
+>>>>>>> linux-next/akpm-base
 }
 
 static const struct pptable_funcs vangogh_ppt_funcs = {
@@ -761,6 +769,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
        .set_default_dpm_table = vangogh_set_default_dpm_tables,
        .set_fine_grain_gfx_freq_parameters = vangogh_set_fine_grain_gfx_freq_parameters,
        .system_features_control = vangogh_system_features_control,
+       .mode2_reset = vangogh_mode2_reset,
 };
 
 void vangogh_set_ppt_funcs(struct smu_context *smu)
index 6231048aa5aaa05faf1e10697a63753999fae798..73fe2bc5633c04771e11bc607361528a9fc1b22e 100644 (file)
@@ -28,6 +28,7 @@ config DRM_IMX_TVE
 config DRM_IMX_LDB
        tristate "Support for LVDS displays"
        depends on DRM_IMX && MFD_SYSCON
+       depends on COMMON_CLK
        select DRM_PANEL
        help
          Choose this to enable the internal LVDS Display Bridge (LDB)
index 2eb8df4697dfac6c0bd1e6a2098a62a296e0498c..c4dab79a93854af05abd2680c04d8768f7905410 100644 (file)
@@ -74,7 +74,7 @@ static int imx_pd_connector_get_modes(struct drm_connector *connector)
                        return ret;
 
                drm_mode_copy(mode, &imxpd->mode);
-               mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+               mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
                drm_mode_probed_add(connector, mode);
                num_modes++;
        }
index 9a7c49bc394f81aa34014900bc8a497eb386aa1d..32d5c514e28ad2978ac43f586d6dbcc60eedb084 100644 (file)
@@ -988,6 +988,8 @@ void msm_gem_free_object(struct drm_gem_object *obj)
                if (msm_obj->pages)
                        kvfree(msm_obj->pages);
 
+               put_iova_vmas(obj);
+
                /* dma_buf_detach() grabs resv lock, so we need to unlock
                 * prior to drm_prime_gem_destroy
                 */
@@ -997,11 +999,10 @@ void msm_gem_free_object(struct drm_gem_object *obj)
        } else {
                msm_gem_vunmap(obj);
                put_pages(obj);
+               put_iova_vmas(obj);
                msm_gem_unlock(obj);
        }
 
-       put_iova_vmas(obj);
-
        drm_gem_object_release(obj);
 
        kfree(msm_obj);
index b4a31d506fccf0cf733e91ea5c39ed39f959af45..e617f60afeea3d3e7ad36058feb2eb525cb14bb9 100644 (file)
@@ -310,10 +310,6 @@ static void ipu_di_sync_config_noninterlaced(struct ipu_di *di,
                        /* unused */
                } , {
                        /* unused */
-               } , {
-                       /* unused */
-               } , {
-                       /* unused */
                },
        };
        /* can't use #7 and #8 for line active and pixel active counters */
index 98f830c6ed5070ef0fb74a767efe6322d1dd3789..ccef04f27f12faecd0c8e236f5ba479735a01ae8 100644 (file)
@@ -343,7 +343,6 @@ static int cti_plat_create_connection(struct device *dev,
 {
        struct cti_trig_con *tc = NULL;
        int cpuid = -1, err = 0;
-       struct fwnode_handle *cs_fwnode = NULL;
        struct coresight_device *csdev = NULL;
        const char *assoc_name = "unknown";
        char cpu_name_str[16];
@@ -397,8 +396,9 @@ static int cti_plat_create_connection(struct device *dev,
                assoc_name = cpu_name_str;
        } else {
                /* associated device ? */
-               cs_fwnode = fwnode_find_reference(fwnode,
-                                                 CTI_DT_CSDEV_ASSOC, 0);
+               struct fwnode_handle *cs_fwnode = fwnode_find_reference(fwnode,
+                                                                       CTI_DT_CSDEV_ASSOC,
+                                                                       0);
                if (!IS_ERR(cs_fwnode)) {
                        assoc_name = cti_plat_get_csdev_or_node_name(cs_fwnode,
                                                                     &csdev);
index 19abf11c84c8a3c96df14b80df1d9d212cb7dc39..52a4ad32938a96e7e5f3475b3ce4c9cba55d0d30 100644 (file)
@@ -98,7 +98,7 @@ config IDE_GD_ATAPI
 
          For information about jumper settings and the question
          of when a ZIP drive uses a partition table, see
-         <http://www.win.tue.nl/~aeb/linux/zip/zip-1.html>.
+         <https://www.win.tue.nl/~aeb/linux/zip/zip-1.html>.
 
          If unsure, say N.
 
index 4c959ce41ba9a1db0b53fe9e3adaff96d16966a2..375c7b4946f1a6e291c24f8650d0fa96804a3e3b 100644 (file)
@@ -309,8 +309,7 @@ static struct pci_driver aec62xx_pci_driver = {
        .id_table       = aec62xx_pci_tbl,
        .probe          = aec62xx_init_one,
        .remove         = aec62xx_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init aec62xx_ide_init(void)
index 3265970aee34f70a947f2dc144c13a4b3a5f21de..a5043e048f919c72160381c55b66ba66d1221d83 100644 (file)
@@ -580,8 +580,7 @@ static struct pci_driver alim15x3_pci_driver = {
        .id_table       = alim15x3_pci_tbl,
        .probe          = alim15x3_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init ali15x3_ide_init(void)
index 7340597a373e387b462d379e2fcc9b3e6f8d7043..a9eb64b3e4806276473c2ed7fcd8018bab87edbd 100644 (file)
@@ -321,8 +321,7 @@ static struct pci_driver amd74xx_pci_driver = {
        .id_table       = amd74xx_pci_tbl,
        .probe          = amd74xx_probe,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init amd74xx_ide_init(void)
index e08b0aac08b9e2f160c714d33904fe87bbd192aa..0bc98d5abaf45368de1197f8dbb49739b1074d61 100644 (file)
@@ -190,8 +190,7 @@ static struct pci_driver atiixp_pci_driver = {
        .id_table       = atiixp_pci_tbl,
        .probe          = atiixp_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init atiixp_ide_init(void)
index 943bf944bf722d62b686ba0c923d8a0fb3989c4d..480898a761334d705aab9721c22dc5ff3d105362 100644 (file)
@@ -430,8 +430,7 @@ static struct pci_driver cmd64x_pci_driver = {
        .id_table       = cmd64x_pci_tbl,
        .probe          = cmd64x_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init cmd64x_ide_init(void)
index 89a4ff100b7a5fbd311fbb149edf28bc0115c438..ba0a5bc03d76700fe34fe1512bd54b8a22660d9a 100644 (file)
@@ -152,8 +152,7 @@ static struct pci_driver cs5520_pci_driver = {
        .name           = "Cyrix_IDE",
        .id_table       = cs5520_pci_tbl,
        .probe          = cs5520_init_one,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init cs5520_ide_init(void)
index 65371599b97672673af822f05167e0647ae617a0..5bb46e7130c8a231c5e713ac449e717f7e25c801 100644 (file)
@@ -273,8 +273,7 @@ static struct pci_driver cs5530_pci_driver = {
        .id_table       = cs5530_pci_tbl,
        .probe          = cs5530_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init cs5530_ide_init(void)
index 70fdbe3161f8f06be0dc41bc9c08d94f47e79979..c5b79f84d2748e8520512eb09c91adcaf6b46f0c 100644 (file)
@@ -194,8 +194,7 @@ static struct pci_driver cs5535_pci_driver = {
        .id_table       = cs5535_pci_tbl,
        .probe          = cs5535_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init cs5535_ide_init(void)
index 8b5ca145191b5b84477760e10a10ff30a4092396..827cc68439349678f82318a760c3b1a1b45d3755 100644 (file)
@@ -279,8 +279,7 @@ static struct pci_driver cs5536_pci_driver = {
        .id_table       = cs5536_pci_tbl,
        .probe          = cs5536_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 module_pci_driver(cs5536_pci_driver);
index bc01660ee8fd383c9e651759d2d21c02c3a10d03..511a870a352ca0da868d6bbb39f6e068d8dd6583 100644 (file)
@@ -212,8 +212,7 @@ static struct pci_driver cy82c693_pci_driver = {
        .id_table       = cy82c693_pci_tbl,
        .probe          = cy82c693_init_one,
        .remove         = cy82c693_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init cy82c693_ide_init(void)
index 300daabaa5753a46fa8226d3ce313865d225a753..a711b64dbd1ce3a0b6f4779fcea2fdeb511342ae 100644 (file)
@@ -123,39 +123,18 @@ delkin_cb_remove (struct pci_dev *dev)
        pci_disable_device(dev);
 }
 
-#ifdef CONFIG_PM
-static int delkin_cb_suspend(struct pci_dev *dev, pm_message_t state)
-{
-       pci_save_state(dev);
-       pci_disable_device(dev);
-       pci_set_power_state(dev, pci_choose_state(dev, state));
-
-       return 0;
-}
+#define delkin_cb_suspend NULL
 
-static int delkin_cb_resume(struct pci_dev *dev)
+static int __maybe_unused delkin_cb_resume(struct device *dev_d)
 {
+       struct pci_dev *dev = to_pci_dev(dev_d);
        struct ide_host *host = pci_get_drvdata(dev);
-       int rc;
-
-       pci_set_power_state(dev, PCI_D0);
-
-       rc = pci_enable_device(dev);
-       if (rc)
-               return rc;
-
-       pci_restore_state(dev);
-       pci_set_master(dev);
 
        if (host->init_chipset)
                host->init_chipset(dev);
 
        return 0;
 }
-#else
-#define delkin_cb_suspend NULL
-#define delkin_cb_resume NULL
-#endif
 
 static struct pci_device_id delkin_cb_pci_tbl[] = {
        { 0x1145, 0xf021, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
@@ -164,13 +143,14 @@ static struct pci_device_id delkin_cb_pci_tbl[] = {
 };
 MODULE_DEVICE_TABLE(pci, delkin_cb_pci_tbl);
 
+static SIMPLE_DEV_PM_OPS(delkin_cb_pm_ops, delkin_cb_suspend, delkin_cb_resume);
+
 static struct pci_driver delkin_cb_pci_driver = {
        .name           = "Delkin-ASKA-Workbit Cardbus IDE",
        .id_table       = delkin_cb_pci_tbl,
        .probe          = delkin_cb_probe,
        .remove         = delkin_cb_remove,
-       .suspend        = delkin_cb_suspend,
-       .resume         = delkin_cb_resume,
+       .driver.pm      = &delkin_cb_pm_ops,
 };
 
 module_pci_driver(delkin_cb_pci_driver);
index 50c9a41467c881e7966d41f390e1359333daac9c..b3d44de777cbdbed949b4d3ff3585cebf46d342e 100644 (file)
@@ -13,7 +13,7 @@
  *
  *
  * HighPoint has its own drivers (open source except for the RAID part)
- * available from http://www.highpoint-tech.com/USA_new/service_support.htm 
+ * available from https://www.highpoint-tech.com/USA_new/service_support.htm
  * This may be useful to anyone wanting to work on this driver, however  do not
  * trust  them too much since the code tends to become less and less meaningful
  * as the time passes... :-/
@@ -1523,8 +1523,7 @@ static struct pci_driver hpt366_pci_driver = {
        .id_table       = hpt366_pci_tbl,
        .probe          = hpt366_init_one,
        .remove         = hpt366_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init hpt366_ide_init(void)
index 05e18d65814164c637f1cf39caeca648831f312a..9a4ec281b98549dcd0e49be2ec152c1297a6b39b 100644 (file)
@@ -320,10 +320,7 @@ static int do_drive_set_taskfiles(ide_drive_t *drive,
                u8 *gtf = (u8 *)(gtf_address + ix * REGS_PER_GTF);
                struct ide_cmd cmd;
 
-               DEBPRINT("(0x1f1-1f7): "
-                        "hex: %02x %02x %02x %02x %02x %02x %02x\n",
-                        gtf[0], gtf[1], gtf[2],
-                        gtf[3], gtf[4], gtf[5], gtf[6]);
+               DEBPRINT("(0x1f1-1f7): hex: %7ph\n", gtf);
 
                if (!ide_acpigtf) {
                        DEBPRINT("_GTF execution disabled\n");
index 2162bc80f09e02ff2daa35362436de1dbb802217..013ad33fbbc81ee507148df1aeffde66613957d2 100644 (file)
@@ -223,7 +223,6 @@ void ide_prep_sense(ide_drive_t *drive, struct request *rq)
        sense_rq->rq_disk = rq->rq_disk;
        sense_rq->cmd_flags = REQ_OP_DRV_IN;
        ide_req(sense_rq)->type = ATA_PRIV_SENSE;
-       sense_rq->rq_flags |= RQF_PREEMPT;
 
        req->cmd[0] = GPCMD_REQUEST_SENSE;
        req->cmd[4] = cmd_len;
index 1a53c7a752244bf0037acf556471788117a23bfe..4867b67b60d698c464bd9f99f248f2e846b9e524 100644 (file)
@@ -515,15 +515,10 @@ repeat:
                 * above to return us whatever is in the queue. Since we call
                 * ide_do_request() ourselves, we end up taking requests while
                 * the queue is blocked...
-                * 
-                * We let requests forced at head of queue with ide-preempt
-                * though. I hope that doesn't happen too much, hopefully not
-                * unless the subdriver triggers such a thing in its own PM
-                * state machine.
                 */
                if ((drive->dev_flags & IDE_DFLAG_BLOCKED) &&
                    ata_pm_request(rq) == 0 &&
-                   (rq->rq_flags & RQF_PREEMPT) == 0) {
+                   (rq->rq_flags & RQF_PM) == 0) {
                        /* there should be no pending command at this point */
                        ide_unlock_port(hwif);
                        goto plug_device;
index 673420db953f2f1f641e72fd399481e8eaf89750..cc677fbed6f118fa11dd9b3a916f5238c771248e 100644 (file)
@@ -181,8 +181,7 @@ static struct pci_driver generic_pci_driver = {
        .id_table       = generic_pci_tbl,
        .probe          = generic_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init generic_ide_init(void)
index 192e6c65d34e7a0aff073003c2e4039d681f87f2..82ab308f1aafe007b0e2420e0065eeefd19acf6f 100644 (file)
@@ -77,7 +77,7 @@ int generic_ide_resume(struct device *dev)
        }
 
        memset(&rqpm, 0, sizeof(rqpm));
-       rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PREEMPT);
+       rq = blk_get_request(drive->queue, REQ_OP_DRV_IN, BLK_MQ_REQ_PM);
        ide_req(rq)->type = ATA_PRIV_PM_RESUME;
        ide_req(rq)->special = &rqpm;
        rqpm.pm_step = IDE_PM_START_RESUME;
index b6f674ab4fb7c0d4119fa580965d44cc4da272da..d3b5147af7ddbb11176d999d1b971decf0de275f 100644 (file)
@@ -143,8 +143,7 @@ static struct pci_driver it8172_pci_driver = {
        .id_table       = it8172_pci_tbl,
        .probe          = it8172_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init it8172_ide_init(void)
index d0bf4430c437451f5e71c9bc290f4581e524c5bc..56bc08ce5805d0fa12ab996f0cedec727464b6da 100644 (file)
@@ -195,8 +195,7 @@ static struct pci_driver it8213_pci_driver = {
        .id_table       = it8213_pci_tbl,
        .probe          = it8213_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init it8213_ide_init(void)
index 36a64c8ea575dc33bcfe2b300a3c7d8eab165899..aad746007330cdc0d07cf3f9f3db8dce1c70ada2 100644 (file)
@@ -690,8 +690,7 @@ static struct pci_driver it821x_pci_driver = {
        .id_table       = it821x_pci_tbl,
        .probe          = it821x_init_one,
        .remove         = it821x_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init it821x_ide_init(void)
index ae6480dcbadf28e92860dd140302a864d57a5583..0fff50e712a250ea6c0ff1a587f8e36fcbbbffea 100644 (file)
@@ -154,8 +154,7 @@ static struct pci_driver jmicron_pci_driver = {
        .id_table       = jmicron_pci_tbl,
        .probe          = jmicron_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init jmicron_ide_init(void)
index 11a672aba6ee868660e085f6998ec6d054a3eaa7..25c99265e85b5a24fb35d29474d63d9a742bc052 100644 (file)
@@ -328,8 +328,7 @@ static struct pci_driver ns87415_pci_driver = {
        .id_table       = ns87415_pci_tbl,
        .probe          = ns87415_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init ns87415_ide_init(void)
index c374f82333c6d5b6d52b073c28923f49fdb45d28..9fa84e709c430991a4c385ceb779360118b60cb9 100644 (file)
@@ -157,8 +157,7 @@ static struct pci_driver opti621_pci_driver = {
        .id_table       = opti621_pci_tbl,
        .probe          = opti621_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init opti621_ide_init(void)
index 4fcafb9121e00de165b21b6ead4350bef9860073..7c276b8aeb5fef4b1274ad25ce07aa0960ffe06e 100644 (file)
@@ -535,8 +535,7 @@ static struct pci_driver pdc202new_pci_driver = {
        .id_table       = pdc202new_pci_tbl,
        .probe          = pdc202new_init_one,
        .remove         = pdc202new_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init pdc202new_ide_init(void)
index 5248ac064e6e04e17de49abf2c45bf10011cade7..a902028dd5d251cd4fc841d2d622e9baf69481c5 100644 (file)
@@ -340,8 +340,7 @@ static struct pci_driver pdc202xx_pci_driver = {
        .id_table       = pdc202xx_pci_tbl,
        .probe          = pdc202xx_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init pdc202xx_ide_init(void)
index a671cead6ae72b7e95bbd297053e22900f6806c9..2634768a4e664c6c27dcfb5f670dc34230beffa4 100644 (file)
@@ -453,8 +453,7 @@ static struct pci_driver piix_pci_driver = {
        .id_table       = piix_pci_tbl,
        .probe          = piix_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init piix_ide_init(void)
index a5b701818405ba6eb9b7d8fada5462cf9d1d53cb..91a197832d1f54b6eabe8decf99003f017c627b4 100644 (file)
@@ -222,46 +222,33 @@ static void sc1200_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
        sc1200_tunepio(drive, pio);
 }
 
-#ifdef CONFIG_PM
 struct sc1200_saved_state {
        u32 regs[8];
 };
 
-static int sc1200_suspend (struct pci_dev *dev, pm_message_t state)
+static int __maybe_unused sc1200_suspend(struct device *dev_d)
 {
-       printk("SC1200: suspend(%u)\n", state.event);
+       struct pci_dev *dev = to_pci_dev(dev_d);
+       struct ide_host *host = pci_get_drvdata(dev);
+       struct sc1200_saved_state *ss = host->host_priv;
+       unsigned int r;
 
        /*
-        * we only save state when going from full power to less
+        * save timing registers
+        * (this may be unnecessary if BIOS also does it)
         */
-       if (state.event == PM_EVENT_ON) {
-               struct ide_host *host = pci_get_drvdata(dev);
-               struct sc1200_saved_state *ss = host->host_priv;
-               unsigned int r;
-
-               /*
-                * save timing registers
-                * (this may be unnecessary if BIOS also does it)
-                */
-               for (r = 0; r < 8; r++)
-                       pci_read_config_dword(dev, 0x40 + r * 4, &ss->regs[r]);
-       }
+       for (r = 0; r < 8; r++)
+               pci_read_config_dword(dev, 0x40 + r * 4, &ss->regs[r]);
 
-       pci_disable_device(dev);
-       pci_set_power_state(dev, pci_choose_state(dev, state));
        return 0;
 }
 
-static int sc1200_resume (struct pci_dev *dev)
+static int __maybe_unused sc1200_resume(struct device *dev_d)
 {
+       struct pci_dev *dev = to_pci_dev(dev_d);
        struct ide_host *host = pci_get_drvdata(dev);
        struct sc1200_saved_state *ss = host->host_priv;
        unsigned int r;
-       int i;
-
-       i = pci_enable_device(dev);
-       if (i)
-               return i;
 
        /*
         * restore timing registers
@@ -272,7 +259,6 @@ static int sc1200_resume (struct pci_dev *dev)
 
        return 0;
 }
-#endif
 
 static const struct ide_port_ops sc1200_port_ops = {
        .set_pio_mode           = sc1200_set_pio_mode,
@@ -326,15 +312,14 @@ static const struct pci_device_id sc1200_pci_tbl[] = {
 };
 MODULE_DEVICE_TABLE(pci, sc1200_pci_tbl);
 
+static SIMPLE_DEV_PM_OPS(sc1200_pm_ops, sc1200_suspend, sc1200_resume);
+
 static struct pci_driver sc1200_pci_driver = {
        .name           = "SC1200_IDE",
        .id_table       = sc1200_pci_tbl,
        .probe          = sc1200_init_one,
        .remove         = ide_pci_remove,
-#ifdef CONFIG_PM
-       .suspend        = sc1200_suspend,
-       .resume         = sc1200_resume,
-#endif
+       .driver.pm      = &sc1200_pm_ops,
 };
 
 static int __init sc1200_ide_init(void)
index 458e72e034b09ab2f0d7f5e558069c50c34be117..cdc05b23e03b75ca867780a6d9a12822cf0233db 100644 (file)
@@ -434,8 +434,7 @@ static struct pci_driver svwks_pci_driver = {
        .id_table       = svwks_pci_tbl,
        .probe          = svwks_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init svwks_ide_init(void)
index fdc8e813170c32b4df99417c37d4940e0abbbc78..1a8fb033e4b33898f6a713eb95b166de25f6cdb3 100644 (file)
@@ -648,35 +648,18 @@ void ide_pci_remove(struct pci_dev *dev)
 }
 EXPORT_SYMBOL_GPL(ide_pci_remove);
 
-#ifdef CONFIG_PM
-int ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
-{
-       pci_save_state(dev);
-       pci_disable_device(dev);
-       pci_set_power_state(dev, pci_choose_state(dev, state));
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(ide_pci_suspend);
+#define ide_pci_suspend NULL
 
-int ide_pci_resume(struct pci_dev *dev)
+static int __maybe_unused ide_pci_resume(struct device *dev_d)
 {
+       struct pci_dev *dev = to_pci_dev(dev_d);
        struct ide_host *host = pci_get_drvdata(dev);
-       int rc;
-
-       pci_set_power_state(dev, PCI_D0);
-
-       rc = pci_enable_device(dev);
-       if (rc)
-               return rc;
-
-       pci_restore_state(dev);
-       pci_set_master(dev);
 
        if (host->init_chipset)
                host->init_chipset(dev);
 
        return 0;
 }
-EXPORT_SYMBOL_GPL(ide_pci_resume);
-#endif
+
+SIMPLE_DEV_PM_OPS(ide_pci_pm_ops, ide_pci_suspend, ide_pci_resume);
+EXPORT_SYMBOL_GPL(ide_pci_pm_ops);
index c4b20f350b84bbb55d9786c9e7809a4a346c0343..198847488cc6151d2e257a78c5d38b005669b985 100644 (file)
@@ -821,8 +821,7 @@ static struct pci_driver siimage_pci_driver = {
        .id_table       = siimage_pci_tbl,
        .probe          = siimage_init_one,
        .remove         = siimage_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init siimage_ide_init(void)
index 1a700bef6c565c2f5187e1b5a69a29c8b2d9882e..290daa07e2cac5076d8b3e4394d9594b19ba6f4a 100644 (file)
@@ -615,8 +615,7 @@ static struct pci_driver sis5513_pci_driver = {
        .id_table       = sis5513_pci_tbl,
        .probe          = sis5513_init_one,
        .remove         = sis5513_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init sis5513_ide_init(void)
index 5c24c420c4387ae6f529a71bfbd5ba0376751a14..4ad5c6bce2b7c32b3fa57df76fdfd290ea26eb10 100644 (file)
@@ -346,8 +346,7 @@ static struct pci_driver sl82c105_pci_driver = {
        .id_table       = sl82c105_pci_tbl,
        .probe          = sl82c105_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init sl82c105_ide_init(void)
index f521d5ebf9167b7b23bd88cecf2d511064708b82..cd47445fda1ffd3c9f8ceada7f6ce3cb329370c6 100644 (file)
@@ -160,8 +160,7 @@ static struct pci_driver slc90e66_pci_driver = {
        .id_table       = slc90e66_pci_tbl,
        .probe          = slc90e66_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init slc90e66_ide_init(void)
index 16ddd095683257d20521a8df3288c82939fc9c08..f08a71c2ab6369a074919fd80c16f04697a862cd 100644 (file)
@@ -91,6 +91,13 @@ static const struct ide_port_info triflex_device = {
 
 static int triflex_init_one(struct pci_dev *dev, const struct pci_device_id *id)
 {
+       /*
+        * We must not disable or powerdown the device.
+        * APM bios refuses to suspend if IDE is not accessible.
+        */
+       dev->pm_cap = 0;
+       dev_info(&dev->dev, "Disable triflex to be turned off by PCI CORE\n");
+
        return ide_pci_init_one(dev, &triflex_device, NULL);
 }
 
@@ -100,27 +107,12 @@ static const struct pci_device_id triflex_pci_tbl[] = {
 };
 MODULE_DEVICE_TABLE(pci, triflex_pci_tbl);
 
-#ifdef CONFIG_PM
-static int triflex_ide_pci_suspend(struct pci_dev *dev, pm_message_t state)
-{
-       /*
-        * We must not disable or powerdown the device.
-        * APM bios refuses to suspend if IDE is not accessible.
-        */
-       pci_save_state(dev);
-       return 0;
-}
-#else
-#define triflex_ide_pci_suspend NULL
-#endif
-
 static struct pci_driver triflex_pci_driver = {
        .name           = "TRIFLEX_IDE",
        .id_table       = triflex_pci_tbl,
        .probe          = triflex_init_one,
        .remove         = ide_pci_remove,
-       .suspend        = triflex_ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init triflex_ide_init(void)
index 63a3aca506fca494ecb307932b5bd98882ec0867..166feaeed6e38c8fe91e563c2458c6f0f382110a 100644 (file)
@@ -510,8 +510,7 @@ static struct pci_driver via_pci_driver = {
        .id_table       = via_pci_tbl,
        .probe          = via_init_one,
        .remove         = via_remove,
-       .suspend        = ide_pci_suspend,
-       .resume         = ide_pci_resume,
+       .driver.pm      = &ide_pci_pm_ops,
 };
 
 static int __init via_ide_init(void)
index 2b321c17054adaf76521352798cbd82f5651c741..94eab82086b2782a7f45f15aa20082d8aefc6bae 100644 (file)
@@ -446,7 +446,7 @@ config KEYBOARD_MPR121
 
 config KEYBOARD_SNVS_PWRKEY
        tristate "IMX SNVS Power Key Driver"
-       depends on ARCH_MXC || COMPILE_TEST
+       depends on ARCH_MXC || (COMPILE_TEST && HAS_IOMEM)
        depends on OF
        help
          This is the snvs powerkey driver for the Freescale i.MX application
index 37568b00873d464eb30bf0f4d49ab65ed6cf9fc2..b08610d6e575e0140090ea94c75e8eb6e75d5d9f 100644 (file)
@@ -863,6 +863,7 @@ static void da7280_parse_properties(struct device *dev,
                gpi_str3[7] = '0' + i;
                haptics->gpi_ctl[i].polarity = 0;
                error = device_property_read_string(dev, gpi_str3, &str);
+               if (!error)
                        haptics->gpi_ctl[i].polarity =
                                da7280_haptic_of_gpi_pol_str(dev, str);
        }
@@ -1299,11 +1300,13 @@ static int __maybe_unused da7280_resume(struct device *dev)
        return retval;
 }
 
+#ifdef CONFIG_OF
 static const struct of_device_id da7280_of_match[] = {
        { .compatible = "dlg,da7280", },
        { }
 };
 MODULE_DEVICE_TABLE(of, da7280_of_match);
+#endif
 
 static const struct i2c_device_id da7280_i2c_id[] = {
        { "da7280", },
index 603a948460d64cb99f490a117796cf5ef5f3b590..4d2d22a8697732d9bb54cfb7abae3714fb5ebc5d 100644 (file)
@@ -445,6 +445,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
                                    enum raydium_bl_ack state)
 {
        int error;
+       static const u8 cmd[] = { 0xFF, 0x39 };
 
        error = raydium_i2c_send(client, RM_CMD_BOOT_WRT, data, len);
        if (error) {
@@ -453,7 +454,7 @@ static int raydium_i2c_write_object(struct i2c_client *client,
                return error;
        }
 
-       error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, NULL, 0);
+       error = raydium_i2c_send(client, RM_CMD_BOOT_ACK, cmd, sizeof(cmd));
        if (error) {
                dev_err(&client->dev, "Ack obj command failed: %d\n", error);
                return error;
index 5f5eb8877c4134849d1c484a723274dfd467cb74..25c9a9c06e4100d727e8a192e69b3c31387181b8 100644 (file)
@@ -167,7 +167,7 @@ static void bcm2836_arm_irqchip_handle_ipi(struct irq_desc *desc)
        chained_irq_exit(chip, desc);
 }
 
-static void bcm2836_arm_irqchip_ipi_eoi(struct irq_data *d)
+static void bcm2836_arm_irqchip_ipi_ack(struct irq_data *d)
 {
        int cpu = smp_processor_id();
 
@@ -195,7 +195,7 @@ static struct irq_chip bcm2836_arm_irqchip_ipi = {
        .name           = "IPI",
        .irq_mask       = bcm2836_arm_irqchip_dummy_op,
        .irq_unmask     = bcm2836_arm_irqchip_dummy_op,
-       .irq_eoi        = bcm2836_arm_irqchip_ipi_eoi,
+       .irq_ack        = bcm2836_arm_irqchip_ipi_ack,
        .ipi_send_mask  = bcm2836_arm_irqchip_ipi_send_mask,
 };
 
index 1054d74b7eddec94bf107be79c7bc3581976846c..42e2918eee2fec7900891f5f38eb107d32199fb8 100644 (file)
@@ -24,7 +24,10 @@ static const char ipr_bit[] = {
 
 static void __iomem *intc_baseaddr;
 
-#define IPR (intc_baseaddr + 6)
+#define ICSR (intc_baseaddr + 2)
+#define IER  (intc_baseaddr + 3)
+#define ISR  (intc_baseaddr + 4)
+#define IPR  (intc_baseaddr + 6)
 
 static void h8300h_disable_irq(struct irq_data *data)
 {
@@ -38,6 +41,8 @@ static void h8300h_disable_irq(struct irq_data *data)
                else
                        ctrl_bclr(bit & 7, (IPR+1));
        }
+       if (irq < 6)
+               ctrl_bclr(irq, IER);
 }
 
 static void h8300h_enable_irq(struct irq_data *data)
@@ -52,12 +57,24 @@ static void h8300h_enable_irq(struct irq_data *data)
                else
                        ctrl_bset(bit & 7, (IPR+1));
        }
+       if (irq < 6)
+               ctrl_bset(irq, IER);
+}
+
+static void h8300h_ack_irq(struct irq_data *data)
+{
+       int bit;
+       int irq = data->irq - 12;
+
+       if (irq < 6)
+               ctrl_bclr(irq, ISR);
 }
 
 struct irq_chip h8300h_irq_chip = {
        .name           = "H8/300H-INTC",
        .irq_enable     = h8300h_enable_irq,
        .irq_disable    = h8300h_disable_irq,
+       .irq_eoi        = h8300h_ack_irq,
 };
 
 static int irq_map(struct irq_domain *h, unsigned int virq,
index 4e2461bae944dd60f22ea81cb9ac8249bc88537b..0b8d9d7ef2ee1878306de54bcff56ca8e7d7b64a 100644 (file)
 #include <linux/of_irq.h>
 #include <asm/io.h>
 
-static void *intc_baseaddr;
-#define IPRA (intc_baseaddr)
+static void *ipr_base;
+static void *icr_base;
+#define IPRA (ipr_base)
+#define IER  (icr_base + 2)
+#define ISR  (icr_base + 4)
 
 static const unsigned char ipr_table[] = {
        0x03, 0x02, 0x01, 0x00, 0x13, 0x12, 0x11, 0x10, /* 16 - 23 */
@@ -36,13 +39,22 @@ static void h8s_disable_irq(struct irq_data *data)
        int pos;
        void __iomem *addr;
        unsigned short pri;
-       int irq = data->irq;
+       int irq = data->irq - 16;
+       unsigned short ier;
+
+       if (irq < 0)
+               return;
 
-       addr = IPRA + ((ipr_table[irq - 16] & 0xf0) >> 3);
-       pos = (ipr_table[irq - 16] & 0x0f) * 4;
+       addr = IPRA + ((ipr_table[irq] & 0xf0) >> 3);
+       pos = (ipr_table[irq] & 0x0f) * 4;
        pri = ~(0x000f << pos);
-       pri &= readw(addr);
-       writew(pri, addr);
+       pri &= __raw_readw(addr);
+       __raw_writew(pri, addr);
+       if (irq < 16) {
+               ier = __raw_readw(IER);
+               ier &= ~(1 << irq);
+               __raw_writew(ier, IER);
+       }
 }
 
 static void h8s_enable_irq(struct irq_data *data)
@@ -50,20 +62,43 @@ static void h8s_enable_irq(struct irq_data *data)
        int pos;
        void __iomem *addr;
        unsigned short pri;
-       int irq = data->irq;
+       int irq = data->irq - 16;
+       unsigned short ier;
+
+       if (irq < 0)
+               return;
 
-       addr = IPRA + ((ipr_table[irq - 16] & 0xf0) >> 3);
-       pos = (ipr_table[irq - 16] & 0x0f) * 4;
+       addr = IPRA + ((ipr_table[irq] & 0xf0) >> 3);
+       pos = (ipr_table[irq] & 0x0f) * 4;
        pri = ~(0x000f << pos);
-       pri &= readw(addr);
+       pri &= __raw_readw(addr);
        pri |= 1 << pos;
-       writew(pri, addr);
+       __raw_writew(pri, addr);
+       if (irq < 16) {
+               ier = __raw_readw(IER);
+               ier &= ~(1 << irq);
+               __raw_writew(ier, IER);
+       }
+}
+
+static void h8s_ack_irq(struct irq_data *data)
+{
+       int irq = data->irq;
+       uint16_t isr;
+
+       if (irq >= 16 && irq < 32) {
+               irq -= 16;
+               isr = __raw_readw(ISR);
+               isr &= ~(1 << irq);
+               __raw_writew(isr, ISR);
+       }
 }
 
 struct irq_chip h8s_irq_chip = {
        .name           = "H8S-INTC",
        .irq_enable     = h8s_enable_irq,
        .irq_disable    = h8s_disable_irq,
+       .irq_ack        = h8s_ack_irq,
 };
 
 static __init int irq_map(struct irq_domain *h, unsigned int virq,
@@ -85,14 +120,16 @@ static int __init h8s_intc_of_init(struct device_node *intc,
        struct irq_domain *domain;
        int n;
 
-       intc_baseaddr = of_iomap(intc, 0);
-       BUG_ON(!intc_baseaddr);
+       ipr_base = of_iomap(intc, 0);
+       icr_base = of_iomap(intc, 1);
+       BUG_ON(!ipr_base || !icr_base);
 
        /* All interrupt priority is 0 (disable) */
        /* IPRA to IPRK */
        for (n = 0; n <= 'k' - 'a'; n++)
-               writew(0x0000, IPRA + (n * 2));
+               __raw_writew(0x0000, IPRA + (n * 2));
 
+       __raw_writew(0xffff, IER);
        domain = irq_domain_add_linear(intc, NR_IRQS, &irq_ops, NULL);
        BUG_ON(!domain);
        irq_set_default_host(domain);
index 0aa50d025ef6c2f877750b7d7b981bdc1d7fd8f3..fbb354413ffa139a31844559a0fb0db56c1390f1 100644 (file)
@@ -66,7 +66,7 @@ static int sl28cpld_intc_probe(struct platform_device *pdev)
        irqchip->chip.num_regs = 1;
        irqchip->chip.status_base = base + INTC_IP;
        irqchip->chip.mask_base = base + INTC_IE;
-       irqchip->chip.mask_invert = true,
+       irqchip->chip.mask_invert = true;
        irqchip->chip.ack_base = base + INTC_IP;
 
        return devm_regmap_add_irq_chip_fwnode(dev, dev_fwnode(dev),
index bb68ba23a7d448e32a8a97f93356131305559db6..49e1bddaa15e0f6a845c76468b827e91c778c4e7 100644 (file)
@@ -96,14 +96,14 @@ static int ariel_led_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        leds[0].ec_index = EC_BLUE_LED;
-       leds[0].led_cdev.name = "blue:power",
+       leds[0].led_cdev.name = "blue:power";
        leds[0].led_cdev.default_trigger = "default-on";
 
        leds[1].ec_index = EC_AMBER_LED;
-       leds[1].led_cdev.name = "amber:status",
+       leds[1].led_cdev.name = "amber:status";
 
        leds[2].ec_index = EC_GREEN_LED;
-       leds[2].led_cdev.name = "green:status",
+       leds[2].led_cdev.name = "green:status";
        leds[2].led_cdev.default_trigger = "default-on";
 
        for (i = 0; i < NLEDS; i++) {
index e11fe178824254118392644e3894a60eaff281bc..b4e1fdff4186ad3d9e98fe176d129af37eb1b53b 100644 (file)
@@ -192,13 +192,13 @@ static int store_color_common(struct device *dev, const char *buf, int color)
        return 0;
 }
 
-static ssize_t show_red(struct device *dev, struct device_attribute *attr,
+static ssize_t red_show(struct device *dev, struct device_attribute *attr,
                        char *buf)
 {
        return show_color_common(dev, buf, RED);
 }
 
-static ssize_t store_red(struct device *dev, struct device_attribute *attr,
+static ssize_t red_store(struct device *dev, struct device_attribute *attr,
                         const char *buf, size_t count)
 {
        int ret;
@@ -209,15 +209,15 @@ static ssize_t store_red(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(red, S_IRUGO | S_IWUSR, show_red, store_red);
+static DEVICE_ATTR_RW(red);
 
-static ssize_t show_green(struct device *dev, struct device_attribute *attr,
+static ssize_t green_show(struct device *dev, struct device_attribute *attr,
                          char *buf)
 {
        return show_color_common(dev, buf, GREEN);
 }
 
-static ssize_t store_green(struct device *dev, struct device_attribute *attr,
+static ssize_t green_store(struct device *dev, struct device_attribute *attr,
                           const char *buf, size_t count)
 {
 
@@ -229,15 +229,15 @@ static ssize_t store_green(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(green, S_IRUGO | S_IWUSR, show_green, store_green);
+static DEVICE_ATTR_RW(green);
 
-static ssize_t show_blue(struct device *dev, struct device_attribute *attr,
+static ssize_t blue_show(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
        return show_color_common(dev, buf, BLUE);
 }
 
-static ssize_t store_blue(struct device *dev, struct device_attribute *attr,
+static ssize_t blue_store(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
 {
        int ret;
@@ -248,16 +248,16 @@ static ssize_t store_blue(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(blue, S_IRUGO | S_IWUSR, show_blue, store_blue);
+static DEVICE_ATTR_RW(blue);
 
-static ssize_t show_test(struct device *dev, struct device_attribute *attr,
+static ssize_t test_show(struct device *dev, struct device_attribute *attr,
                         char *buf)
 {
        return scnprintf(buf, PAGE_SIZE,
                         "#Write into test to start test sequence!#\n");
 }
 
-static ssize_t store_test(struct device *dev, struct device_attribute *attr,
+static ssize_t test_store(struct device *dev, struct device_attribute *attr,
                          const char *buf, size_t count)
 {
 
@@ -273,7 +273,7 @@ static ssize_t store_test(struct device *dev, struct device_attribute *attr,
        return count;
 }
 
-static DEVICE_ATTR(test, S_IRUGO | S_IWUSR, show_test, store_test);
+static DEVICE_ATTR_RW(test);
 
 /* TODO: HSB, fade, timeadj, script ... */
 
index 2f8362f6bf75c9b19bf318b523653e56e5381ace..2db455efd4b177d174878109b11c2c1c8d522464 100644 (file)
@@ -346,8 +346,8 @@ static void lm3530_brightness_set(struct led_classdev *led_cdev,
        }
 }
 
-static ssize_t lm3530_mode_get(struct device *dev,
-               struct device_attribute *attr, char *buf)
+static ssize_t mode_show(struct device *dev,
+                        struct device_attribute *attr, char *buf)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
        struct lm3530_data *drvdata;
@@ -365,8 +365,8 @@ static ssize_t lm3530_mode_get(struct device *dev,
        return len;
 }
 
-static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute
-                                  *attr, const char *buf, size_t size)
+static ssize_t mode_store(struct device *dev, struct device_attribute
+                         *attr, const char *buf, size_t size)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
        struct lm3530_data *drvdata;
@@ -397,7 +397,7 @@ static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute
 
        return sizeof(drvdata->mode);
 }
-static DEVICE_ATTR(mode, 0644, lm3530_mode_get, lm3530_mode_set);
+static DEVICE_ATTR_RW(mode);
 
 static struct attribute *lm3530_attrs[] = {
        &dev_attr_mode.attr,
index b3edee7031931eae380a762f30d866ac3af36da3..9dd205870525c4f753d656b169e94d2754fbb768 100644 (file)
@@ -679,7 +679,7 @@ static int lm3533_led_probe(struct platform_device *pdev)
        led->cdev.brightness_get = lm3533_led_get;
        led->cdev.blink_set = lm3533_led_blink_set;
        led->cdev.brightness = LED_OFF;
-       led->cdev.groups = lm3533_led_attribute_groups,
+       led->cdev.groups = lm3533_led_attribute_groups;
        led->id = pdev->id;
 
        mutex_init(&led->mutex);
index 1505521249b509c46b3c913116ad21eb487e2586..2d3e11845ba51696062236e9b4b0edb7128905ee 100644 (file)
@@ -349,9 +349,9 @@ static int lm355x_indicator_brightness_set(struct led_classdev *cdev,
 }
 
 /* indicator pattern only for lm3556*/
-static ssize_t lm3556_indicator_pattern_store(struct device *dev,
-                                             struct device_attribute *attr,
-                                             const char *buf, size_t size)
+static ssize_t pattern_store(struct device *dev,
+                            struct device_attribute *attr,
+                            const char *buf, size_t size)
 {
        ssize_t ret;
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
@@ -381,7 +381,7 @@ out:
        return ret;
 }
 
-static DEVICE_ATTR(pattern, S_IWUSR, NULL, lm3556_indicator_pattern_store);
+static DEVICE_ATTR_WO(pattern);
 
 static struct attribute *lm355x_indicator_attrs[] = {
        &dev_attr_pattern.attr,
index 62c14872caf7302701121c18376db60018d92a49..8007b82985a8b0eccf454efc7f57668f7a71fc31 100644 (file)
@@ -165,9 +165,9 @@ static int lm3642_control(struct lm3642_chip_data *chip,
 /* torch */
 
 /* torch pin config for lm3642 */
-static ssize_t lm3642_torch_pin_store(struct device *dev,
-                                     struct device_attribute *attr,
-                                     const char *buf, size_t size)
+static ssize_t torch_pin_store(struct device *dev,
+                              struct device_attribute *attr,
+                              const char *buf, size_t size)
 {
        ssize_t ret;
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
@@ -193,7 +193,7 @@ static ssize_t lm3642_torch_pin_store(struct device *dev,
        return size;
 }
 
-static DEVICE_ATTR(torch_pin, S_IWUSR, NULL, lm3642_torch_pin_store);
+static DEVICE_ATTR_WO(torch_pin);
 
 static int lm3642_torch_brightness_set(struct led_classdev *cdev,
                                        enum led_brightness brightness)
@@ -212,9 +212,9 @@ static int lm3642_torch_brightness_set(struct led_classdev *cdev,
 /* flash */
 
 /* strobe pin config for lm3642*/
-static ssize_t lm3642_strobe_pin_store(struct device *dev,
-                                      struct device_attribute *attr,
-                                      const char *buf, size_t size)
+static ssize_t strobe_pin_store(struct device *dev,
+                               struct device_attribute *attr,
+                               const char *buf, size_t size)
 {
        ssize_t ret;
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
@@ -240,7 +240,7 @@ static ssize_t lm3642_strobe_pin_store(struct device *dev,
        return size;
 }
 
-static DEVICE_ATTR(strobe_pin, S_IWUSR, NULL, lm3642_strobe_pin_store);
+static DEVICE_ATTR_WO(strobe_pin);
 
 static int lm3642_strobe_brightness_set(struct led_classdev *cdev,
                                         enum led_brightness brightness)
index 512a11d142d06a6dad479a31d9fc2009980cd5f6..c0bddb33888d7f1e2c4f725373c86b737a1e3826 100644 (file)
@@ -160,8 +160,8 @@ static void max8997_led_brightness_set(struct led_classdev *led_cdev,
        }
 }
 
-static ssize_t max8997_led_show_mode(struct device *dev,
-                               struct device_attribute *attr, char *buf)
+static ssize_t mode_show(struct device *dev,
+                        struct device_attribute *attr, char *buf)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
        struct max8997_led *led =
@@ -193,9 +193,9 @@ static ssize_t max8997_led_show_mode(struct device *dev,
        return ret;
 }
 
-static ssize_t max8997_led_store_mode(struct device *dev,
-                               struct device_attribute *attr,
-                               const char *buf, size_t size)
+static ssize_t mode_store(struct device *dev,
+                         struct device_attribute *attr,
+                         const char *buf, size_t size)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
        struct max8997_led *led =
@@ -222,7 +222,7 @@ static ssize_t max8997_led_store_mode(struct device *dev,
        return size;
 }
 
-static DEVICE_ATTR(mode, 0644, max8997_led_show_mode, max8997_led_store_mode);
+static DEVICE_ATTR_RW(mode);
 
 static struct attribute *max8997_attrs[] = {
        &dev_attr_mode.attr,
index 68fbf0b66faddfa9e9ac3214010fcbcffcbe3b09..77213b79f84d95d709709e8a24e052aa0eb750c1 100644 (file)
@@ -204,9 +204,9 @@ static void netxbig_led_set(struct led_classdev *led_cdev,
        spin_unlock_irqrestore(&led_dat->lock, flags);
 }
 
-static ssize_t netxbig_led_sata_store(struct device *dev,
-                                     struct device_attribute *attr,
-                                     const char *buff, size_t count)
+static ssize_t sata_store(struct device *dev,
+                         struct device_attribute *attr,
+                         const char *buff, size_t count)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
        struct netxbig_led_data *led_dat =
@@ -255,8 +255,8 @@ exit_unlock:
        return ret;
 }
 
-static ssize_t netxbig_led_sata_show(struct device *dev,
-                                    struct device_attribute *attr, char *buf)
+static ssize_t sata_show(struct device *dev,
+                        struct device_attribute *attr, char *buf)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
        struct netxbig_led_data *led_dat =
@@ -265,7 +265,7 @@ static ssize_t netxbig_led_sata_show(struct device *dev,
        return sprintf(buf, "%d\n", led_dat->sata);
 }
 
-static DEVICE_ATTR(sata, 0644, netxbig_led_sata_show, netxbig_led_sata_store);
+static DEVICE_ATTR_RW(sata);
 
 static struct attribute *netxbig_led_attrs[] = {
        &dev_attr_sata.attr,
index 245de443fe9c4e63ed47ea1e9254d9a6a0c7343a..fcaa34706b6caa16e8d11f941db6198fd5578b6f 100644 (file)
@@ -441,8 +441,8 @@ static void set_power_light_amber_noblink(void)
        nasgpio_led_set_brightness(&amber->led_cdev, LED_FULL);
 }
 
-static ssize_t nas_led_blink_show(struct device *dev,
-                                 struct device_attribute *attr, char *buf)
+static ssize_t blink_show(struct device *dev,
+                         struct device_attribute *attr, char *buf)
 {
        struct led_classdev *led = dev_get_drvdata(dev);
        int blinking = 0;
@@ -451,9 +451,9 @@ static ssize_t nas_led_blink_show(struct device *dev,
        return sprintf(buf, "%u\n", blinking);
 }
 
-static ssize_t nas_led_blink_store(struct device *dev,
-                                  struct device_attribute *attr,
-                                  const char *buf, size_t size)
+static ssize_t blink_store(struct device *dev,
+                          struct device_attribute *attr,
+                          const char *buf, size_t size)
 {
        int ret;
        struct led_classdev *led = dev_get_drvdata(dev);
@@ -468,7 +468,7 @@ static ssize_t nas_led_blink_store(struct device *dev,
        return size;
 }
 
-static DEVICE_ATTR(blink, 0644, nas_led_blink_show, nas_led_blink_store);
+static DEVICE_ATTR_RW(blink);
 
 static struct attribute *nasgpio_led_attrs[] = {
        &dev_attr_blink.attr,
@@ -478,7 +478,6 @@ ATTRIBUTE_GROUPS(nasgpio_led);
 
 static int register_nasgpio_led(int led_nr)
 {
-       int ret;
        struct nasgpio_led *nas_led = &nasgpio_leds[led_nr];
        struct led_classdev *led = get_classdev_for_led_nr(led_nr);
 
@@ -489,11 +488,8 @@ static int register_nasgpio_led(int led_nr)
        led->brightness_set = nasgpio_led_set_brightness;
        led->blink_set = nasgpio_led_set_blink;
        led->groups = nasgpio_led_groups;
-       ret = led_classdev_register(&nas_gpio_pci_dev->dev, led);
-       if (ret)
-               return ret;
 
-       return 0;
+       return led_classdev_register(&nas_gpio_pci_dev->dev, led);
 }
 
 static void unregister_nasgpio_led(int led_nr)
index 67f4235cb28a8b57603dd86170adaa1f7e792ca7..c48b80574f02435247e0f7c8701785c0ad874bae 100644 (file)
@@ -155,8 +155,8 @@ static const char * const led_src_texts[] = {
        "soft",
 };
 
-static ssize_t wm831x_status_src_show(struct device *dev,
-                                     struct device_attribute *attr, char *buf)
+static ssize_t src_show(struct device *dev,
+                       struct device_attribute *attr, char *buf)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
        struct wm831x_status *led = to_wm831x_status(led_cdev);
@@ -178,9 +178,9 @@ static ssize_t wm831x_status_src_show(struct device *dev,
        return ret;
 }
 
-static ssize_t wm831x_status_src_store(struct device *dev,
-                                      struct device_attribute *attr,
-                                      const char *buf, size_t size)
+static ssize_t src_store(struct device *dev,
+                        struct device_attribute *attr,
+                        const char *buf, size_t size)
 {
        struct led_classdev *led_cdev = dev_get_drvdata(dev);
        struct wm831x_status *led = to_wm831x_status(led_cdev);
@@ -197,7 +197,7 @@ static ssize_t wm831x_status_src_store(struct device *dev,
        return size;
 }
 
-static DEVICE_ATTR(src, 0644, wm831x_status_src_show, wm831x_status_src_store);
+static DEVICE_ATTR_RW(src);
 
 static struct attribute *wm831x_status_attrs[] = {
        &dev_attr_src.attr,
index 53791138d78bf4b5296f3aa400f517b60aabe9f4..5f9f9b3a226d7ccad1bedc57c1fede7f1a9c888e 100644 (file)
@@ -3166,11 +3166,12 @@ static int crypt_ctr(struct dm_target *ti, unsigned int argc, char **argv)
        }
 
        if (test_bit(DM_CRYPT_SAME_CPU, &cc->flags))
-               cc->crypt_queue = alloc_workqueue("kcryptd/%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
+               cc->crypt_queue = alloc_workqueue("kcryptd-%s", WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM,
                                                  1, devname);
        else
-               cc->crypt_queue = alloc_workqueue("kcryptd/%s",
-                                                 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM | WQ_UNBOUND,
+               cc->crypt_queue = alloc_workqueue("kcryptd-%s",
+                                                 WQ_CPU_INTENSIVE | WQ_MEM_RECLAIM |
+                                                 WQ_UNBOUND | WQ_SYSFS,
                                                  num_online_cpus(), devname);
        if (!cc->crypt_queue) {
                ti->error = "Couldn't create kcryptd queue";
index 0fdd19d99d99fdf6c8c45e86c1dd089c28524ea6..b1ae9eb8f24793a56089926fe78fedbbfa3399db 100644 (file)
@@ -2577,6 +2577,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
                         NETIF_F_HW_VLAN_CTAG_TX;
        dev->hw_features |= dev->features;
        dev->vlan_features |= dev->features;
+       dev->max_mtu = UMAC_MAX_MTU_SIZE;
 
        /* Request the WOL interrupt and advertise suspend if available */
        priv->wol_irq_disabled = 1;
index ba8869c3d891c0e05e29cd4fb1bcb5b17ce775ba..6d853f018d53133d47fa5a6236174a64a1f5f370 100644 (file)
@@ -3889,6 +3889,7 @@ static int ucc_geth_probe(struct platform_device* ofdev)
        INIT_WORK(&ugeth->timeout_work, ucc_geth_timeout_work);
        netif_napi_add(dev, &ugeth->napi, ucc_geth_poll, 64);
        dev->mtu = 1500;
+       dev->max_mtu = 1518;
 
        ugeth->msg_enable = netif_msg_init(debug.msg_enable, UGETH_MSG_DEFAULT);
        ugeth->phy_interface = phy_interface;
@@ -3934,12 +3935,12 @@ static int ucc_geth_remove(struct platform_device* ofdev)
        struct device_node *np = ofdev->dev.of_node;
 
        unregister_netdev(dev);
-       free_netdev(dev);
        ucc_geth_memclean(ugeth);
        if (of_phy_is_fixed_link(np))
                of_phy_deregister_fixed_link(np);
        of_node_put(ugeth->ug_info->tbi_node);
        of_node_put(ugeth->ug_info->phy_node);
+       free_netdev(dev);
 
        return 0;
 }
index 1a9bdf66a7d81214f5e15fbed436a204597b59cc..11d4bf5dc21f73e2a6d4b34cef723474eb24899f 100644 (file)
@@ -575,7 +575,14 @@ struct ucc_geth_tx_global_pram {
        u32 vtagtable[0x8];     /* 8 4-byte VLAN tags */
        u32 tqptr;              /* a base pointer to the Tx Queues Memory
                                   Region */
-       u8 res2[0x80 - 0x74];
+       u8 res2[0x78 - 0x74];
+       u64 snums_en;
+       u32 l2l3baseptr;        /* top byte consists of a few other bit fields */
+
+       u16 mtu[8];
+       u8 res3[0xa8 - 0x94];
+       u32 wrrtablebase;       /* top byte is reserved */
+       u8 res4[0xc0 - 0xac];
 } __packed;
 
 /* structure representing Extended Filtering Global Parameters in PRAM */
index d231a2cdd98ff244acb0c796dea11c84767e1404..118473dfdcbd248b0472da7f32ae31dffcc5e1d5 100644 (file)
@@ -120,6 +120,7 @@ enum i40e_state_t {
        __I40E_RESET_INTR_RECEIVED,
        __I40E_REINIT_REQUESTED,
        __I40E_PF_RESET_REQUESTED,
+       __I40E_PF_RESET_AND_REBUILD_REQUESTED,
        __I40E_CORE_RESET_REQUESTED,
        __I40E_GLOBAL_RESET_REQUESTED,
        __I40E_EMP_RESET_INTR_RECEIVED,
@@ -146,6 +147,8 @@ enum i40e_state_t {
 };
 
 #define I40E_PF_RESET_FLAG     BIT_ULL(__I40E_PF_RESET_REQUESTED)
+#define I40E_PF_RESET_AND_REBUILD_FLAG \
+       BIT_ULL(__I40E_PF_RESET_AND_REBUILD_REQUESTED)
 
 /* VSI state flags */
 enum i40e_vsi_state_t {
index 1337686bd0998003e1a92a56f03ed7d8d8a99a3e..1db482d310c2d5ad1ca978ec7d47ee8b4f77221b 100644 (file)
@@ -36,6 +36,8 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf);
 static void i40e_determine_queue_usage(struct i40e_pf *pf);
 static int i40e_setup_pf_filter_control(struct i40e_pf *pf);
 static void i40e_prep_for_reset(struct i40e_pf *pf, bool lock_acquired);
+static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit,
+                                  bool lock_acquired);
 static int i40e_reset(struct i40e_pf *pf);
 static void i40e_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired);
 static int i40e_setup_misc_vector_for_recovery_mode(struct i40e_pf *pf);
@@ -8536,6 +8538,14 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags, bool lock_acquired)
                         "FW LLDP is disabled\n" :
                         "FW LLDP is enabled\n");
 
+       } else if (reset_flags & I40E_PF_RESET_AND_REBUILD_FLAG) {
+               /* Request a PF Reset
+                *
+                * Resets PF and reinitializes PFs VSI.
+                */
+               i40e_prep_for_reset(pf, lock_acquired);
+               i40e_reset_and_rebuild(pf, true, lock_acquired);
+
        } else if (reset_flags & BIT_ULL(__I40E_REINIT_REQUESTED)) {
                int v;
 
index 729c4f0d5ac5299a10b9862d7143d724b6eb88c0..21ee56420c3aee60f1428ebf2566611addd34c04 100644 (file)
@@ -1772,7 +1772,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
        if (num_vfs) {
                if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
                        pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
-                       i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+                       i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
                }
                ret = i40e_pci_sriov_enable(pdev, num_vfs);
                goto sriov_configure_out;
@@ -1781,7 +1781,7 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
        if (!pci_vfs_assigned(pf->pdev)) {
                i40e_free_vfs(pf);
                pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
-               i40e_do_reset_safe(pf, I40E_PF_RESET_FLAG);
+               i40e_do_reset_safe(pf, I40E_PF_RESET_AND_REBUILD_FLAG);
        } else {
                dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
                ret = -EINVAL;
index 95543dfd4fe77c73023fa9b96b62de476fd7c909..0a867d64d46753ae4f065ce479b4f4a139f38067 100644 (file)
@@ -1834,11 +1834,9 @@ static int iavf_init_get_resources(struct iavf_adapter *adapter)
        netif_tx_stop_all_queues(netdev);
        if (CLIENT_ALLOWED(adapter)) {
                err = iavf_lan_add_device(adapter);
-               if (err) {
-                       rtnl_unlock();
+               if (err)
                        dev_info(&pdev->dev, "Failed to add VF to client API service list: %d\n",
                                 err);
-               }
        }
        dev_info(&pdev->dev, "MAC address: %pM\n", adapter->hw.mac.addr);
        if (netdev->features & NETIF_F_GRO)
index afdd22827223bb7bb7f787ca2d5a17ffcdeffaaf..f20b31327027006d9450bb1ad6fec84b5eae39ad 100644 (file)
@@ -1231,7 +1231,7 @@ static void mvpp22_gop_init_rgmii(struct mvpp2_port *port)
 
        regmap_read(priv->sysctrl_base, GENCONF_CTRL0, &val);
        if (port->gop_id == 2)
-               val |= GENCONF_CTRL0_PORT0_RGMII | GENCONF_CTRL0_PORT1_RGMII;
+               val |= GENCONF_CTRL0_PORT0_RGMII;
        else if (port->gop_id == 3)
                val |= GENCONF_CTRL0_PORT1_RGMII_MII;
        regmap_write(priv->sysctrl_base, GENCONF_CTRL0, val);
@@ -5487,7 +5487,7 @@ static int mvpp2_port_init(struct mvpp2_port *port)
        struct mvpp2 *priv = port->priv;
        struct mvpp2_txq_pcpu *txq_pcpu;
        unsigned int thread;
-       int queue, err;
+       int queue, err, val;
 
        /* Checks for hardware constraints */
        if (port->first_rxq + port->nrxqs >
@@ -5501,6 +5501,18 @@ static int mvpp2_port_init(struct mvpp2_port *port)
        mvpp2_egress_disable(port);
        mvpp2_port_disable(port);
 
+       if (mvpp2_is_xlg(port->phy_interface)) {
+               val = readl(port->base + MVPP22_XLG_CTRL0_REG);
+               val &= ~MVPP22_XLG_CTRL0_FORCE_LINK_PASS;
+               val |= MVPP22_XLG_CTRL0_FORCE_LINK_DOWN;
+               writel(val, port->base + MVPP22_XLG_CTRL0_REG);
+       } else {
+               val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+               val &= ~MVPP2_GMAC_FORCE_LINK_PASS;
+               val |= MVPP2_GMAC_FORCE_LINK_DOWN;
+               writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+       }
+
        port->tx_time_coal = MVPP2_TXDONE_COAL_USEC;
 
        port->txqs = devm_kcalloc(dev, port->ntxqs, sizeof(*port->txqs),
index 5692c6087bbb0781ef473ea5dfe8f6148c4ae4f7..a30eb90ba3d28a04e57a6a822745bfe232144f48 100644 (file)
@@ -405,6 +405,38 @@ static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
        return -EINVAL;
 }
 
+/* Drop flow control pause frames */
+static void mvpp2_prs_drop_fc(struct mvpp2 *priv)
+{
+       unsigned char da[ETH_ALEN] = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x01 };
+       struct mvpp2_prs_entry pe;
+       unsigned int len;
+
+       memset(&pe, 0, sizeof(pe));
+
+       /* For all ports - drop flow control frames */
+       pe.index = MVPP2_PE_FC_DROP;
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+       /* Set match on DA */
+       len = ETH_ALEN;
+       while (len--)
+               mvpp2_prs_tcam_data_byte_set(&pe, len, da[len], 0xff);
+
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+                                MVPP2_PRS_RI_DROP_MASK);
+
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+       /* Mask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
 /* Enable/disable dropping all mac da's */
 static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
 {
@@ -1162,6 +1194,7 @@ static void mvpp2_prs_mac_init(struct mvpp2 *priv)
        mvpp2_prs_hw_write(priv, &pe);
 
        /* Create dummy entries for drop all and promiscuous modes */
+       mvpp2_prs_drop_fc(priv);
        mvpp2_prs_mac_drop_all_set(priv, 0, false);
        mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_UNI_CAST, false);
        mvpp2_prs_mac_promisc_set(priv, 0, MVPP2_PRS_L2_MULTI_CAST, false);
@@ -1647,8 +1680,9 @@ static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
        mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
        mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
                                 MVPP2_PRS_RI_L3_PROTO_MASK);
-       /* Skip eth_type + 4 bytes of IPv6 header */
-       mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+       /* Jump to DIP of IPV6 header */
+       mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+                                MVPP2_MAX_L3_ADDR_SIZE,
                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
        /* Set L3 offset */
        mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
index e22f6c85d380346312147daf531bf6c3626e9589..4b68dd37473388617efc76295488b753687a1a46 100644 (file)
 #define MVPP2_PE_VID_EDSA_FLTR_DEFAULT (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
 #define MVPP2_PE_VLAN_DBL              (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
 #define MVPP2_PE_VLAN_NONE             (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
-/* reserved */
+#define MVPP2_PE_FC_DROP               (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
 #define MVPP2_PE_MAC_MC_PROMISCUOUS    (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
 #define MVPP2_PE_MAC_UC_PROMISCUOUS    (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
 #define MVPP2_PE_MAC_NON_PROMISCUOUS   (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
index 9156c9825a16fae8467cd153fb592b367204f902..ac4cd5d82e696b91320a8c90756cce871c5e13e2 100644 (file)
@@ -337,7 +337,7 @@ void ionic_rx_fill(struct ionic_queue *q)
        unsigned int i, j;
        unsigned int len;
 
-       len = netdev->mtu + ETH_HLEN;
+       len = netdev->mtu + ETH_HLEN + VLAN_HLEN;
        nfrags = round_up(len, PAGE_SIZE) / PAGE_SIZE;
 
        for (i = ionic_q_space_avail(q); i; i--) {
index c52a38df0e0d8eac628bbc97460ef86ac4f7b972..6571a6a90c1a58928a342c07620a551b96ab6513 100644 (file)
@@ -37,7 +37,6 @@ config SMC91X
        tristate "SMC 91C9x/91C1xxx support"
        select CRC32
        select MII
-       depends on !OF || GPIOLIB
        depends on ARM || ARM64 || ATARI_ETHERNAT || COLDFIRE || \
                   MIPS || NIOS2 || SUPERH || XTENSA || H8300 || COMPILE_TEST
        help
index 742a1f7a838c95ed579a150742a08a5c061a118e..56c36798cb111b4ce6895d0d245ba2cef3a39358 100644 (file)
@@ -2190,6 +2190,7 @@ static const struct of_device_id smc91x_match[] = {
 };
 MODULE_DEVICE_TABLE(of, smc91x_match);
 
+#if defined(CONFIG_GPIOLIB)
 /**
  * of_try_set_control_gpio - configure a gpio if it exists
  * @dev: net device
@@ -2220,6 +2221,15 @@ static int try_toggle_control_gpio(struct device *dev,
 
        return 0;
 }
+#else
+static int try_toggle_control_gpio(struct device *dev,
+                                  struct gpio_desc **desc,
+                                  const char *name, int index,
+                                  int value, unsigned int nsdelay)
+{
+       return 0;
+}
+#endif
 #endif
 
 /*
index b97c38b9a270135c5e4f25d84ea781885bc918c1..350b7913622cb76517c42fde3306acc89ba4799c 100644 (file)
@@ -185,7 +185,7 @@ int ath11k_core_suspend(struct ath11k_base *ab)
        ath11k_hif_ce_irq_disable(ab);
 
        ret = ath11k_hif_suspend(ab);
-       if (!ret) {
+       if (ret) {
                ath11k_warn(ab, "failed to suspend hif: %d\n", ret);
                return ret;
        }
index 205c0f1a40e91d33f84b85f0d68cf4fbf1e3bfb9..920e5026a635fceabfa6f3e388f034dbe886d932 100644 (file)
@@ -2294,6 +2294,7 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
 {
        u8 channel_num;
        u32 center_freq;
+       struct ieee80211_channel *channel;
 
        rx_status->freq = 0;
        rx_status->rate_idx = 0;
@@ -2314,9 +2315,12 @@ static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
                rx_status->band = NL80211_BAND_5GHZ;
        } else {
                spin_lock_bh(&ar->data_lock);
-               rx_status->band = ar->rx_channel->band;
-               channel_num =
-                       ieee80211_frequency_to_channel(ar->rx_channel->center_freq);
+               channel = ar->rx_channel;
+               if (channel) {
+                       rx_status->band = channel->band;
+                       channel_num =
+                               ieee80211_frequency_to_channel(channel->center_freq);
+               }
                spin_unlock_bh(&ar->data_lock);
                ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
                                rx_desc, sizeof(struct hal_rx_desc));
index 5c175e3e09b2850e9d03959c60d1369c80c7b6a0..c1608f64ea95d6b692701e8cf78c1aa9cd8d3872 100644 (file)
@@ -3021,6 +3021,7 @@ static int ath11k_mac_station_add(struct ath11k *ar,
        }
 
        if (ab->hw_params.vdev_start_delay &&
+           !arvif->is_started &&
            arvif->vdev_type != WMI_VDEV_TYPE_AP) {
                ret = ath11k_start_vdev_delay(ar->hw, vif);
                if (ret) {
@@ -5284,7 +5285,8 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
        /* for QCA6390 bss peer must be created before vdev_start */
        if (ab->hw_params.vdev_start_delay &&
            arvif->vdev_type != WMI_VDEV_TYPE_AP &&
-           arvif->vdev_type != WMI_VDEV_TYPE_MONITOR) {
+           arvif->vdev_type != WMI_VDEV_TYPE_MONITOR &&
+           !ath11k_peer_find_by_vdev_id(ab, arvif->vdev_id)) {
                memcpy(&arvif->chanctx, ctx, sizeof(*ctx));
                ret = 0;
                goto out;
@@ -5295,7 +5297,9 @@ ath11k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
                goto out;
        }
 
-       if (ab->hw_params.vdev_start_delay) {
+       if (ab->hw_params.vdev_start_delay &&
+           (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
+           arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)) {
                param.vdev_id = arvif->vdev_id;
                param.peer_type = WMI_PEER_TYPE_DEFAULT;
                param.peer_addr = ar->mac_addr;
index 857647aa57c8a7a66c41eaafa430667d062bc88e..20b415cd96c4aadaa233aed4cbabbae6996c4201 100644 (file)
@@ -274,7 +274,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
                                      PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG,
                                      PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
                                      PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
-       if (!ret) {
+       if (ret) {
                ath11k_warn(ab, "failed to set sysclk: %d\n", ret);
                return ret;
        }
@@ -283,7 +283,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
-       if (!ret) {
+       if (ret) {
                ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
                return ret;
        }
@@ -292,7 +292,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
-       if (!ret) {
+       if (ret) {
                ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
                return ret;
        }
@@ -301,7 +301,7 @@ static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL,
                                      PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK);
-       if (!ret) {
+       if (ret) {
                ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
                return ret;
        }
@@ -886,6 +886,32 @@ static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
                pci_disable_device(pci_dev);
 }
 
+static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
+{
+       struct ath11k_base *ab = ab_pci->ab;
+
+       pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+                                 &ab_pci->link_ctl);
+
+       ath11k_dbg(ab, ATH11K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
+                  ab_pci->link_ctl,
+                  u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
+                  u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
+
+       /* disable L0s and L1 */
+       pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+                                  ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC);
+
+       set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
+}
+
+static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
+{
+       if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
+               pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL,
+                                          ab_pci->link_ctl);
+}
+
 static int ath11k_pci_power_up(struct ath11k_base *ab)
 {
        struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
@@ -895,6 +921,11 @@ static int ath11k_pci_power_up(struct ath11k_base *ab)
        clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
        ath11k_pci_sw_reset(ab_pci->ab, true);
 
+       /* Disable ASPM during firmware download due to problems switching
+        * to AMSS state.
+        */
+       ath11k_pci_aspm_disable(ab_pci);
+
        ret = ath11k_mhi_start(ab_pci);
        if (ret) {
                ath11k_err(ab, "failed to start mhi: %d\n", ret);
@@ -908,6 +939,9 @@ static void ath11k_pci_power_down(struct ath11k_base *ab)
 {
        struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
 
+       /* restore aspm in case firmware bootup fails */
+       ath11k_pci_aspm_restore(ab_pci);
+
        ath11k_pci_force_wake(ab_pci->ab);
        ath11k_mhi_stop(ab_pci);
        clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
@@ -965,6 +999,8 @@ static int ath11k_pci_start(struct ath11k_base *ab)
 
        set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
 
+       ath11k_pci_aspm_restore(ab_pci);
+
        ath11k_pci_ce_irqs_enable(ab);
        ath11k_ce_rx_post_buf(ab);
 
index 0432a702416b42b6dd6bd3046c5c9db61a44b530..fe44d0dfce1956912474eff47f2a86c6ebd0cbb3 100644 (file)
@@ -63,6 +63,7 @@ struct ath11k_msi_config {
 enum ath11k_pci_flags {
        ATH11K_PCI_FLAG_INIT_DONE,
        ATH11K_PCI_FLAG_IS_MSI_64,
+       ATH11K_PCI_ASPM_RESTORE,
 };
 
 struct ath11k_pci {
@@ -80,6 +81,7 @@ struct ath11k_pci {
 
        /* enum ath11k_pci_flags */
        unsigned long flags;
+       u16 link_ctl;
 };
 
 static inline struct ath11k_pci *ath11k_pci_priv(struct ath11k_base *ab)
index 1866d82678fa9fc981cfea09946f0825111c3738..b69e7ebfa930327fdf8b10260c312d5888cf43bc 100644 (file)
@@ -76,6 +76,23 @@ struct ath11k_peer *ath11k_peer_find_by_id(struct ath11k_base *ab,
        return NULL;
 }
 
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+                                               int vdev_id)
+{
+       struct ath11k_peer *peer;
+
+       spin_lock_bh(&ab->base_lock);
+
+       list_for_each_entry(peer, &ab->peers, list) {
+               if (vdev_id == peer->vdev_id) {
+                       spin_unlock_bh(&ab->base_lock);
+                       return peer;
+               }
+       }
+       spin_unlock_bh(&ab->base_lock);
+       return NULL;
+}
+
 void ath11k_peer_unmap_event(struct ath11k_base *ab, u16 peer_id)
 {
        struct ath11k_peer *peer;
index bba2e00b6944aeb00c31f4b80341512528bb52ab..8553ed061aeaaf12b5598a08385cfcc2018fae74 100644 (file)
@@ -43,5 +43,7 @@ int ath11k_peer_create(struct ath11k *ar, struct ath11k_vif *arvif,
                       struct ieee80211_sta *sta, struct peer_create_params *param);
 int ath11k_wait_for_peer_delete_done(struct ath11k *ar, u32 vdev_id,
                                     const u8 *addr);
+struct ath11k_peer *ath11k_peer_find_by_vdev_id(struct ath11k_base *ab,
+                                               int vdev_id);
 
 #endif /* _PEER_H_ */
index f0b5c50974f3e6b6ca04801964c49dfae6c067ae..0db623ff4bb9b33991df8620cb18fb4689e32906 100644 (file)
@@ -1660,6 +1660,7 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
        struct qmi_wlanfw_respond_mem_resp_msg_v01 resp;
        struct qmi_txn txn = {};
        int ret = 0, i;
+       bool delayed;
 
        req = kzalloc(sizeof(*req), GFP_KERNEL);
        if (!req)
@@ -1672,11 +1673,13 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
         * failure to FW and FW will then request mulitple blocks of small
         * chunk size memory.
         */
-       if (!ab->bus_params.fixed_mem_region && ab->qmi.mem_seg_count <= 2) {
+       if (!ab->bus_params.fixed_mem_region && ab->qmi.target_mem_delayed) {
+               delayed = true;
                ath11k_dbg(ab, ATH11K_DBG_QMI, "qmi delays mem_request %d\n",
                           ab->qmi.mem_seg_count);
                memset(req, 0, sizeof(*req));
        } else {
+               delayed = false;
                req->mem_seg_len = ab->qmi.mem_seg_count;
 
                for (i = 0; i < req->mem_seg_len ; i++) {
@@ -1708,6 +1711,12 @@ static int ath11k_qmi_respond_fw_mem_request(struct ath11k_base *ab)
        }
 
        if (resp.resp.result != QMI_RESULT_SUCCESS_V01) {
+               /* the error response is expected when
+                * target_mem_delayed is true.
+                */
+               if (delayed && resp.resp.error == 0)
+                       goto out;
+
                ath11k_warn(ab, "Respond mem req failed, result: %d, err: %d\n",
                            resp.resp.result, resp.resp.error);
                ret = -EINVAL;
@@ -1742,6 +1751,8 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
        int i;
        struct target_mem_chunk *chunk;
 
+       ab->qmi.target_mem_delayed = false;
+
        for (i = 0; i < ab->qmi.mem_seg_count; i++) {
                chunk = &ab->qmi.target_mem[i];
                chunk->vaddr = dma_alloc_coherent(ab->dev,
@@ -1749,6 +1760,15 @@ static int ath11k_qmi_alloc_target_mem_chunk(struct ath11k_base *ab)
                                                  &chunk->paddr,
                                                  GFP_KERNEL);
                if (!chunk->vaddr) {
+                       if (ab->qmi.mem_seg_count <= 2) {
+                               ath11k_dbg(ab, ATH11K_DBG_QMI,
+                                          "qmi dma allocation failed (%d B type %u), will try later with small size\n",
+                                           chunk->size,
+                                           chunk->type);
+                               ath11k_qmi_free_target_mem_chunk(ab);
+                               ab->qmi.target_mem_delayed = true;
+                               return 0;
+                       }
                        ath11k_err(ab, "failed to alloc memory, size: 0x%x, type: %u\n",
                                   chunk->size,
                                   chunk->type);
@@ -2517,7 +2537,7 @@ static void ath11k_qmi_msg_mem_request_cb(struct qmi_handle *qmi_hdl,
                                    ret);
                        return;
                }
-       } else if (msg->mem_seg_len > 2) {
+       } else {
                ret = ath11k_qmi_alloc_target_mem_chunk(ab);
                if (ret) {
                        ath11k_warn(ab, "qmi failed to alloc target memory: %d\n",
index 92925c9eac67499656f47b7925c17e4bd9f7e634..7bad374cc23a63c12152d7b12ca1772aca1bee29 100644 (file)
@@ -125,6 +125,7 @@ struct ath11k_qmi {
        struct target_mem_chunk target_mem[ATH11K_QMI_WLANFW_MAX_NUM_MEM_SEG_V01];
        u32 mem_seg_count;
        u32 target_mem_mode;
+       bool target_mem_delayed;
        u8 cal_done;
        struct target_info target;
        struct m3_mem_region m3_mem;
index da4b546b62cb5d583d2a28038fba9f4a96f46012..73869d445c5b3a151179539a34cd254c79cb68aa 100644 (file)
@@ -3460,6 +3460,9 @@ int ath11k_wmi_set_hw_mode(struct ath11k_base *ab,
        len = sizeof(*cmd);
 
        skb = ath11k_wmi_alloc_skb(wmi_ab, len);
+       if (!skb)
+               return -ENOMEM;
+
        cmd = (struct wmi_pdev_set_hw_mode_cmd_param *)skb->data;
 
        cmd->tlv_header = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_PDEV_SET_HW_MODE_CMD) |
index ed4635bd151a2deb6fb072a86388c56c97044915..102a8f14c22d4f20ce0126e70311fe10aa9ce419 100644 (file)
@@ -40,9 +40,9 @@ static const struct ieee80211_iface_limit if_limits[] = {
                .types = BIT(NL80211_IFTYPE_ADHOC)
        }, {
                .max = 16,
-               .types = BIT(NL80211_IFTYPE_AP) |
+               .types = BIT(NL80211_IFTYPE_AP)
 #ifdef CONFIG_MAC80211_MESH
-                        BIT(NL80211_IFTYPE_MESH_POINT)
+                        BIT(NL80211_IFTYPE_MESH_POINT)
 #endif
        }, {
                .max = MT7915_MAX_INTERFACES,
index 62b5b912818fa205cbf2c69bf820cceab0b847f7..0b6facb17ff722772981476943c1a0d3a6f5c2d8 100644 (file)
@@ -157,10 +157,14 @@ static void mt76s_net_worker(struct mt76_worker *w)
 
 static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
 {
-       bool wake, mcu = q == dev->q_mcu[MT_MCUQ_WM];
        struct mt76_queue_entry entry;
        int nframes = 0;
+       bool mcu;
 
+       if (!q)
+               return 0;
+
+       mcu = q == dev->q_mcu[MT_MCUQ_WM];
        while (q->queued > 0) {
                if (!q->entry[q->tail].done)
                        break;
@@ -177,21 +181,12 @@ static int mt76s_process_tx_queue(struct mt76_dev *dev, struct mt76_queue *q)
                nframes++;
        }
 
-       wake = q->stopped && q->queued < q->ndesc - 8;
-       if (wake)
-               q->stopped = false;
-
        if (!q->queued)
                wake_up(&dev->tx_wait);
 
-       if (mcu)
-               goto out;
-
-       mt76_txq_schedule(&dev->phy, q->qid);
+       if (!mcu)
+               mt76_txq_schedule(&dev->phy, q->qid);
 
-       if (wake)
-               ieee80211_wake_queue(dev->hw, q->qid);
-out:
        return nframes;
 }
 
index dc850109de22d66a9755a75db5a9164d88dcf557..b95d093728b9b7d0fc9b6a8cd52b87e31ea0bacf 100644 (file)
@@ -811,11 +811,12 @@ static void mt76u_status_worker(struct mt76_worker *w)
        struct mt76_dev *dev = container_of(usb, struct mt76_dev, usb);
        struct mt76_queue_entry entry;
        struct mt76_queue *q;
-       bool wake;
        int i;
 
        for (i = 0; i < IEEE80211_NUM_ACS; i++) {
                q = dev->phy.q_tx[i];
+               if (!q)
+                       continue;
 
                while (q->queued > 0) {
                        if (!q->entry[q->tail].done)
@@ -827,10 +828,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
                        mt76_queue_tx_complete(dev, q, &entry);
                }
 
-               wake = q->stopped && q->queued < q->ndesc - 8;
-               if (wake)
-                       q->stopped = false;
-
                if (!q->queued)
                        wake_up(&dev->tx_wait);
 
@@ -839,8 +836,6 @@ static void mt76u_status_worker(struct mt76_worker *w)
                if (dev->drv->tx_status_data &&
                    !test_and_set_bit(MT76_READING_STATS, &dev->phy.state))
                        queue_work(dev->wq, &dev->usb.stat_work);
-               if (wake)
-                       ieee80211_wake_queue(dev->hw, i);
        }
 }
 
index a7259dbc953da7a0158b6f8398ef8eff4df02e28..965bd95890459313c975dffbc17156f8e660d109 100644 (file)
@@ -78,7 +78,6 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
 
        rtl_dbg(rtlpriv, COMP_ERR, DBG_LOUD,
                "Firmware callback routine entered!\n");
-       complete(&rtlpriv->firmware_loading_complete);
        if (!firmware) {
                if (rtlpriv->cfg->alt_fw_name) {
                        err = request_firmware(&firmware,
@@ -91,13 +90,13 @@ static void rtl_fw_do_work(const struct firmware *firmware, void *context,
                }
                pr_err("Selected firmware is not available\n");
                rtlpriv->max_fw_size = 0;
-               return;
+               goto exit;
        }
 found_alt:
        if (firmware->size > rtlpriv->max_fw_size) {
                pr_err("Firmware is too big!\n");
                release_firmware(firmware);
-               return;
+               goto exit;
        }
        if (!is_wow) {
                memcpy(rtlpriv->rtlhal.pfirmware, firmware->data,
@@ -109,6 +108,9 @@ found_alt:
                rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
        }
        release_firmware(firmware);
+
+exit:
+       complete(&rtlpriv->firmware_loading_complete);
 }
 
 void rtl_fw_cb(const struct firmware *firmware, void *context)
index e6d5d98c3ceaba048653ce26ed94f49db74845c6..9309765d0450ec8fe9f1eca04f5b638fd8db2e86 100644 (file)
 #include <linux/regulator/of_regulator.h>
 #include <linux/slab.h>
 
+/* Typical regulator startup times as per data sheet in uS */
+#define BD71847_BUCK1_STARTUP_TIME 144
+#define BD71847_BUCK2_STARTUP_TIME 162
+#define BD71847_BUCK3_STARTUP_TIME 162
+#define BD71847_BUCK4_STARTUP_TIME 240
+#define BD71847_BUCK5_STARTUP_TIME 270
+#define BD71847_BUCK6_STARTUP_TIME 200
+#define BD71847_LDO1_STARTUP_TIME  440
+#define BD71847_LDO2_STARTUP_TIME  370
+#define BD71847_LDO3_STARTUP_TIME  310
+#define BD71847_LDO4_STARTUP_TIME  400
+#define BD71847_LDO5_STARTUP_TIME  530
+#define BD71847_LDO6_STARTUP_TIME  400
+
+#define BD71837_BUCK1_STARTUP_TIME 160
+#define BD71837_BUCK2_STARTUP_TIME 180
+#define BD71837_BUCK3_STARTUP_TIME 180
+#define BD71837_BUCK4_STARTUP_TIME 180
+#define BD71837_BUCK5_STARTUP_TIME 160
+#define BD71837_BUCK6_STARTUP_TIME 240
+#define BD71837_BUCK7_STARTUP_TIME 220
+#define BD71837_BUCK8_STARTUP_TIME 200
+#define BD71837_LDO1_STARTUP_TIME  440
+#define BD71837_LDO2_STARTUP_TIME  370
+#define BD71837_LDO3_STARTUP_TIME  310
+#define BD71837_LDO4_STARTUP_TIME  400
+#define BD71837_LDO5_STARTUP_TIME  310
+#define BD71837_LDO6_STARTUP_TIME  400
+#define BD71837_LDO7_STARTUP_TIME  530
+
 /*
  * BD718(37/47/50) have two "enable control modes". ON/OFF can either be
  * controlled by software - or by PMIC internal HW state machine. Whether
@@ -613,6 +643,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD718XX_REG_BUCK1_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK1_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -646,6 +677,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD718XX_REG_BUCK2_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK2_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -680,6 +712,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .linear_range_selectors = bd71847_buck3_volt_range_sel,
                        .enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK3_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -706,6 +739,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_range_mask = BD71847_BUCK4_RANGE_MASK,
                        .linear_range_selectors = bd71847_buck4_volt_range_sel,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK4_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -727,6 +761,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
                        .enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK5_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -750,6 +785,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
                        .enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71847_BUCK6_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -775,6 +811,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .linear_range_selectors = bd718xx_ldo1_volt_range_sel,
                        .enable_reg = BD718XX_REG_LDO1_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO1_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -796,6 +833,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .n_voltages = ARRAY_SIZE(ldo_2_volts),
                        .enable_reg = BD718XX_REG_LDO2_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO2_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -818,6 +856,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = BD718XX_LDO3_MASK,
                        .enable_reg = BD718XX_REG_LDO3_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO3_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -840,6 +879,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = BD718XX_LDO4_MASK,
                        .enable_reg = BD718XX_REG_LDO4_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO4_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -865,6 +905,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .linear_range_selectors = bd71847_ldo5_volt_range_sel,
                        .enable_reg = BD718XX_REG_LDO5_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO5_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -889,6 +930,7 @@ static struct bd718xx_regulator_data bd71847_regulators[] = {
                        .vsel_mask = BD718XX_LDO6_MASK,
                        .enable_reg = BD718XX_REG_LDO6_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71847_LDO6_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -942,6 +984,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD718XX_REG_BUCK1_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK1_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -975,6 +1018,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD718XX_REG_BUCK2_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK2_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -1005,6 +1049,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD71837_REG_BUCK3_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK3_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -1033,6 +1078,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = DVS_BUCK_RUN_MASK,
                        .enable_reg = BD71837_REG_BUCK4_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK4_STARTUP_TIME,
                        .owner = THIS_MODULE,
                        .of_parse_cb = buck_set_hw_dvs_levels,
                },
@@ -1065,6 +1111,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .linear_range_selectors = bd71837_buck5_volt_range_sel,
                        .enable_reg = BD718XX_REG_1ST_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK5_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1088,6 +1135,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD71837_BUCK6_MASK,
                        .enable_reg = BD718XX_REG_2ND_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK6_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1109,6 +1157,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD718XX_3RD_NODVS_BUCK_MASK,
                        .enable_reg = BD718XX_REG_3RD_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK7_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1132,6 +1181,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD718XX_4TH_NODVS_BUCK_MASK,
                        .enable_reg = BD718XX_REG_4TH_NODVS_BUCK_CTRL,
                        .enable_mask = BD718XX_BUCK_EN,
+                       .enable_time = BD71837_BUCK8_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1157,6 +1207,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .linear_range_selectors = bd718xx_ldo1_volt_range_sel,
                        .enable_reg = BD718XX_REG_LDO1_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO1_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1178,6 +1229,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .n_voltages = ARRAY_SIZE(ldo_2_volts),
                        .enable_reg = BD718XX_REG_LDO2_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO2_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1200,6 +1252,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD718XX_LDO3_MASK,
                        .enable_reg = BD718XX_REG_LDO3_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO3_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1222,6 +1275,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD718XX_LDO4_MASK,
                        .enable_reg = BD718XX_REG_LDO4_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO4_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1246,6 +1300,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD71837_LDO5_MASK,
                        .enable_reg = BD718XX_REG_LDO5_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO5_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1272,6 +1327,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD718XX_LDO6_MASK,
                        .enable_reg = BD718XX_REG_LDO6_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO6_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
@@ -1296,6 +1352,7 @@ static struct bd718xx_regulator_data bd71837_regulators[] = {
                        .vsel_mask = BD71837_LDO7_MASK,
                        .enable_reg = BD71837_REG_LDO7_VOLT,
                        .enable_mask = BD718XX_LDO_EN,
+                       .enable_time = BD71837_LDO7_STARTUP_TIME,
                        .owner = THIS_MODULE,
                },
                .init = {
index 308c27fa6ea80b85e82aba082f898cd910ee2e1b..af9918cd27aa435ef52a2c424811a830eec59795 100644 (file)
@@ -469,13 +469,17 @@ static int pf8x00_i2c_probe(struct i2c_client *client)
 }
 
 static const struct of_device_id pf8x00_dt_ids[] = {
-       { .compatible = "nxp,pf8x00",},
+       { .compatible = "nxp,pf8100",},
+       { .compatible = "nxp,pf8121a",},
+       { .compatible = "nxp,pf8200",},
        { }
 };
 MODULE_DEVICE_TABLE(of, pf8x00_dt_ids);
 
 static const struct i2c_device_id pf8x00_i2c_id[] = {
-       { "pf8x00", 0 },
+       { "pf8100", 0 },
+       { "pf8121a", 0 },
+       { "pf8200", 0 },
        {},
 };
 MODULE_DEVICE_TABLE(i2c, pf8x00_i2c_id);
index 71ab75a464917a5c5146c89c44f94861f9892210..752bef7e564f9d902407405834e691e4f88bdc3c 100644 (file)
@@ -173,7 +173,7 @@ config RESET_SCMI
 
 config RESET_SIMPLE
        bool "Simple Reset Controller Driver" if COMPILE_TEST
-       default ARCH_AGILEX || ARCH_ASPEED || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARC
+       default ARCH_AGILEX || ARCH_ASPEED || ARCH_BCM4908 || ARCH_BITMAIN || ARCH_REALTEK || ARCH_STM32 || ARCH_STRATIX10 || ARCH_SUNXI || ARCH_ZX || ARC
        help
          This enables a simple reset controller driver for reset lines that
          that can be asserted and deasserted by toggling bits in a contiguous,
index a7d4445924e558c10cf3db3b2269d6bcf322acb0..965f5ceba7d8f4da82b41753f786c4d9df206917 100644 (file)
@@ -83,9 +83,14 @@ static int hi3660_reset_probe(struct platform_device *pdev)
        if (!rc)
                return -ENOMEM;
 
-       rc->map = syscon_regmap_lookup_by_phandle(np, "hisi,rst-syscon");
+       rc->map = syscon_regmap_lookup_by_phandle(np, "hisilicon,rst-syscon");
+       if (rc->map == ERR_PTR(-ENODEV)) {
+               /* fall back to the deprecated compatible */
+               rc->map = syscon_regmap_lookup_by_phandle(np,
+                                                         "hisi,rst-syscon");
+       }
        if (IS_ERR(rc->map)) {
-               dev_err(dev, "failed to get hi3660,rst-syscon\n");
+               dev_err(dev, "failed to get hisilicon,rst-syscon\n");
                return PTR_ERR(rc->map);
        }
 
index e066614818a35a9b30e5f37815b7c60808e1cb72..4dda0daf2c6f51ae94da02a0b500718d1c9c2a19 100644 (file)
@@ -146,6 +146,8 @@ static const struct of_device_id reset_simple_dt_ids[] = {
        { .compatible = "aspeed,ast2500-lpc-reset" },
        { .compatible = "bitmain,bm1880-reset",
                .data = &reset_simple_active_low },
+       { .compatible = "brcm,bcm4908-misc-pcie-reset",
+               .data = &reset_simple_active_low },
        { .compatible = "snps,dw-high-reset" },
        { .compatible = "snps,dw-low-reset",
                .data = &reset_simple_active_low },
index b206e266b4e7263203c215969fb9b9346c0d16ef..8b0deece9758b8d6bdd0d02dd0b56cb3278bebc9 100644 (file)
@@ -4,6 +4,7 @@ config SCSI_CXGB4_ISCSI
        depends on PCI && INET && (IPV6 || IPV6=n)
        depends on THERMAL || !THERMAL
        depends on ETHERNET
+       depends on TLS || TLS=n
        select NET_VENDOR_CHELSIO
        select CHELSIO_T4
        select CHELSIO_LIB
index 969baf4cd3f5e9e5fab717afc787e3f4e2bad1f5..6e23dc3209feb9ecf58bf45666f33f21f407d025 100644 (file)
@@ -5034,7 +5034,7 @@ _base_check_for_trigger_pages_support(struct MPT3SAS_ADAPTER *ioc)
 static void
 _base_get_diag_triggers(struct MPT3SAS_ADAPTER *ioc)
 {
-       u16 trigger_flags;
+       int trigger_flags;
 
        /*
         * Default setting of master trigger.
index 4848ae3c7b561d0c3b9e9d68c2b0d95bf44b232a..b3f14f05340ad63a9fe5d4377cfb118d57d4a937 100644 (file)
@@ -249,7 +249,8 @@ int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
 
        req = blk_get_request(sdev->request_queue,
                        data_direction == DMA_TO_DEVICE ?
-                       REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN, BLK_MQ_REQ_PREEMPT);
+                       REQ_OP_SCSI_OUT : REQ_OP_SCSI_IN,
+                       rq_flags & RQF_PM ? BLK_MQ_REQ_PM : 0);
        if (IS_ERR(req))
                return ret;
        rq = scsi_req(req);
@@ -1206,6 +1207,8 @@ static blk_status_t
 scsi_device_state_check(struct scsi_device *sdev, struct request *req)
 {
        switch (sdev->sdev_state) {
+       case SDEV_CREATED:
+               return BLK_STS_OK;
        case SDEV_OFFLINE:
        case SDEV_TRANSPORT_OFFLINE:
                /*
@@ -1232,18 +1235,18 @@ scsi_device_state_check(struct scsi_device *sdev, struct request *req)
                return BLK_STS_RESOURCE;
        case SDEV_QUIESCE:
                /*
-                * If the devices is blocked we defer normal commands.
+                * If the device is blocked we only accept power management
+                * commands.
                 */
-               if (req && !(req->rq_flags & RQF_PREEMPT))
+               if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM)))
                        return BLK_STS_RESOURCE;
                return BLK_STS_OK;
        default:
                /*
                 * For any other not fully online state we only allow
-                * special commands.  In particular any user initiated
-                * command is not allowed.
+                * power management commands.
                 */
-               if (req && !(req->rq_flags & RQF_PREEMPT))
+               if (req && !(req->rq_flags & RQF_PM))
                        return BLK_STS_IOERR;
                return BLK_STS_OK;
        }
@@ -2516,15 +2519,13 @@ void sdev_evt_send_simple(struct scsi_device *sdev,
 EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
 
 /**
- *     scsi_device_quiesce - Block user issued commands.
+ *     scsi_device_quiesce - Block all commands except power management.
  *     @sdev:  scsi device to quiesce.
  *
  *     This works by trying to transition to the SDEV_QUIESCE state
  *     (which must be a legal transition).  When the device is in this
- *     state, only special requests will be accepted, all others will
- *     be deferred.  Since special requests may also be requeued requests,
- *     a successful return doesn't guarantee the device will be
- *     totally quiescent.
+ *     state, only power management requests will be accepted, all others will
+ *     be deferred.
  *
  *     Must be called with user context, may sleep.
  *
@@ -2586,12 +2587,12 @@ void scsi_device_resume(struct scsi_device *sdev)
         * device deleted during suspend)
         */
        mutex_lock(&sdev->state_mutex);
+       if (sdev->sdev_state == SDEV_QUIESCE)
+               scsi_device_set_state(sdev, SDEV_RUNNING);
        if (sdev->quiesced_by) {
                sdev->quiesced_by = NULL;
                blk_clear_pm_only(sdev->request_queue);
        }
-       if (sdev->sdev_state == SDEV_QUIESCE)
-               scsi_device_set_state(sdev, SDEV_RUNNING);
        mutex_unlock(&sdev->state_mutex);
 }
 EXPORT_SYMBOL(scsi_device_resume);
index f3d5b1bbd5aa7eff36c5d91cd9d2df6e41a77f8d..c37dd15d16d24f20b6065e6d7141da6c955fd58c 100644 (file)
@@ -117,12 +117,16 @@ static int spi_execute(struct scsi_device *sdev, const void *cmd,
                sshdr = &sshdr_tmp;
 
        for(i = 0; i < DV_RETRIES; i++) {
+               /*
+                * The purpose of the RQF_PM flag below is to bypass the
+                * SDEV_QUIESCE state.
+                */
                result = scsi_execute(sdev, cmd, dir, buffer, bufflen, sense,
                                      sshdr, DV_TIMEOUT, /* retries */ 1,
                                      REQ_FAILFAST_DEV |
                                      REQ_FAILFAST_TRANSPORT |
                                      REQ_FAILFAST_DRIVER,
-                                     0, NULL);
+                                     RQF_PM, NULL);
                if (driver_byte(result) != DRIVER_SENSE ||
                    sshdr->sense_key != UNIT_ATTENTION)
                        break;
@@ -1005,23 +1009,26 @@ spi_dv_device(struct scsi_device *sdev)
         */
        lock_system_sleep();
 
+       if (scsi_autopm_get_device(sdev))
+               goto unlock_system_sleep;
+
        if (unlikely(spi_dv_in_progress(starget)))
-               goto unlock;
+               goto put_autopm;
 
        if (unlikely(scsi_device_get(sdev)))
-               goto unlock;
+               goto put_autopm;
 
        spi_dv_in_progress(starget) = 1;
 
        buffer = kzalloc(len, GFP_KERNEL);
 
        if (unlikely(!buffer))
-               goto out_put;
+               goto put_sdev;
 
        /* We need to verify that the actual device will quiesce; the
         * later target quiesce is just a nice to have */
        if (unlikely(scsi_device_quiesce(sdev)))
-               goto out_free;
+               goto free_buffer;
 
        scsi_target_quiesce(starget);
 
@@ -1041,12 +1048,16 @@ spi_dv_device(struct scsi_device *sdev)
 
        spi_initial_dv(starget) = 1;
 
- out_free:
+free_buffer:
        kfree(buffer);
- out_put:
+
+put_sdev:
        spi_dv_in_progress(starget) = 0;
        scsi_device_put(sdev);
-unlock:
+put_autopm:
+       scsi_autopm_put_device(sdev);
+
+unlock_system_sleep:
        unlock_system_sleep();
 }
 EXPORT_SYMBOL(spi_dv_device);
index c2afba2a5414df7115ed16a289b33fde9002487a..9624298b9c89df8e9ed137e6fd8db5678bb9ef71 100644 (file)
@@ -690,6 +690,11 @@ static int ses_intf_add(struct device *cdev,
                    type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE)
                        components += type_ptr[1];
        }
+       if (components == 0) {
+               sdev_printk(KERN_ERR, sdev, "enclosure has no enumerated components\n");
+               goto err_free;
+       }
+
        ses_dev->page1 = buf;
        ses_dev->page1_len = len;
        buf = NULL;
index fd6f84c1b4e2256454b52c0b9373c6c09a9fe9e8..895e82ea6ece551d0a5c5fa5c6ba8cf3ee3452e0 100644 (file)
@@ -31,6 +31,6 @@ TRACE_EVENT(ufs_mtk_event,
 
 #undef TRACE_INCLUDE_PATH
 #undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_PATH .
+#define TRACE_INCLUDE_PATH ../../drivers/scsi/ufs/
 #define TRACE_INCLUDE_FILE ufs-mediatek-trace
 #include <trace/define_trace.h>
index 3522458db3bbd0ce479d8b57c6b345ad70413dfc..80618af7c87203b256ce26087579fd582038847e 100644 (file)
@@ -70,6 +70,13 @@ static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba)
        return !!(host->caps & UFS_MTK_CAP_VA09_PWR_CTRL);
 }
 
+static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba)
+{
+       struct ufs_mtk_host *host = ufshcd_get_variant(hba);
+
+       return !!(host->caps & UFS_MTK_CAP_BROKEN_VCC);
+}
+
 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable)
 {
        u32 tmp;
@@ -514,6 +521,9 @@ static void ufs_mtk_init_host_caps(struct ufs_hba *hba)
        if (of_property_read_bool(np, "mediatek,ufs-disable-ah8"))
                host->caps |= UFS_MTK_CAP_DISABLE_AH8;
 
+       if (of_property_read_bool(np, "mediatek,ufs-broken-vcc"))
+               host->caps |= UFS_MTK_CAP_BROKEN_VCC;
+
        dev_info(hba->dev, "caps: 0x%x", host->caps);
 }
 
@@ -1003,6 +1013,17 @@ static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba)
 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba)
 {
        ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups);
+
+       if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc &&
+           (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) {
+               hba->vreg_info.vcc->always_on = true;
+               /*
+                * VCC will be kept always-on thus we don't
+                * need any delay during regulator operations
+                */
+               hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM |
+                       UFS_DEVICE_QUIRK_DELAY_AFTER_LPM);
+       }
 }
 
 static void ufs_mtk_event_notify(struct ufs_hba *hba,
index 93d35097dfb0aeb03c7eaad9e1405527edd51ec2..3f0d3bb769e89b4a4a2543e952580f59e30cf215 100644 (file)
@@ -81,6 +81,7 @@ enum ufs_mtk_host_caps {
        UFS_MTK_CAP_BOOST_CRYPT_ENGINE         = 1 << 0,
        UFS_MTK_CAP_VA09_PWR_CTRL              = 1 << 1,
        UFS_MTK_CAP_DISABLE_AH8                = 1 << 2,
+       UFS_MTK_CAP_BROKEN_VCC                 = 1 << 3,
 };
 
 struct ufs_mtk_crypt_cfg {
index d593edb487677189e45bc2a51b7436ba333df083..14dfda735adf5ac2c2a06d779724f706adbfee62 100644 (file)
@@ -330,7 +330,6 @@ enum {
        UFS_DEV_WRITE_BOOSTER_SUP       = BIT(8),
 };
 
-#define POWER_DESC_MAX_SIZE                    0x62
 #define POWER_DESC_MAX_ACTV_ICC_LVLS           16
 
 /* Attribute  bActiveICCLevel parameter bit masks definitions */
@@ -513,6 +512,7 @@ struct ufs_query_res {
 struct ufs_vreg {
        struct regulator *reg;
        const char *name;
+       bool always_on;
        bool enabled;
        int min_uV;
        int max_uV;
index df3a564c3e334875ed9a2eab331344f3468031b1..fadd566025b86ab4ffe18aefd366823ea6f1ae98 100644 (file)
@@ -148,6 +148,8 @@ static int ufs_intel_common_init(struct ufs_hba *hba)
 {
        struct intel_host *host;
 
+       hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND;
+
        host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL);
        if (!host)
                return -ENOMEM;
@@ -163,6 +165,41 @@ static void ufs_intel_common_exit(struct ufs_hba *hba)
        intel_ltr_hide(hba->dev);
 }
 
+static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op)
+{
+       /*
+        * To support S4 (suspend-to-disk) with spm_lvl other than 5, the base
+        * address registers must be restored because the restore kernel can
+        * have used different addresses.
+        */
+       ufshcd_writel(hba, lower_32_bits(hba->utrdl_dma_addr),
+                     REG_UTP_TRANSFER_REQ_LIST_BASE_L);
+       ufshcd_writel(hba, upper_32_bits(hba->utrdl_dma_addr),
+                     REG_UTP_TRANSFER_REQ_LIST_BASE_H);
+       ufshcd_writel(hba, lower_32_bits(hba->utmrdl_dma_addr),
+                     REG_UTP_TASK_REQ_LIST_BASE_L);
+       ufshcd_writel(hba, upper_32_bits(hba->utmrdl_dma_addr),
+                     REG_UTP_TASK_REQ_LIST_BASE_H);
+
+       if (ufshcd_is_link_hibern8(hba)) {
+               int ret = ufshcd_uic_hibern8_exit(hba);
+
+               if (!ret) {
+                       ufshcd_set_link_active(hba);
+               } else {
+                       dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+                               __func__, ret);
+                       /*
+                        * Force reset and restore. Any other actions can lead
+                        * to an unrecoverable state.
+                        */
+                       ufshcd_set_link_off(hba);
+               }
+       }
+
+       return 0;
+}
+
 static int ufs_intel_ehl_init(struct ufs_hba *hba)
 {
        hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8;
@@ -174,6 +211,7 @@ static struct ufs_hba_variant_ops ufs_intel_cnl_hba_vops = {
        .init                   = ufs_intel_common_init,
        .exit                   = ufs_intel_common_exit,
        .link_startup_notify    = ufs_intel_link_startup_notify,
+       .resume                 = ufs_intel_resume,
 };
 
 static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
@@ -181,6 +219,7 @@ static struct ufs_hba_variant_ops ufs_intel_ehl_hba_vops = {
        .init                   = ufs_intel_ehl_init,
        .exit                   = ufs_intel_common_exit,
        .link_startup_notify    = ufs_intel_link_startup_notify,
+       .resume                 = ufs_intel_resume,
 };
 
 #ifdef CONFIG_PM_SLEEP
@@ -207,6 +246,30 @@ static int ufshcd_pci_resume(struct device *dev)
 {
        return ufshcd_system_resume(dev_get_drvdata(dev));
 }
+
+/**
+ * ufshcd_pci_poweroff - suspend-to-disk poweroff function
+ * @dev: pointer to PCI device handle
+ *
+ * Returns 0 if successful
+ * Returns non-zero otherwise
+ */
+static int ufshcd_pci_poweroff(struct device *dev)
+{
+       struct ufs_hba *hba = dev_get_drvdata(dev);
+       int spm_lvl = hba->spm_lvl;
+       int ret;
+
+       /*
+        * For poweroff we need to set the UFS device to PowerDown mode.
+        * Force spm_lvl to ensure that.
+        */
+       hba->spm_lvl = 5;
+       ret = ufshcd_system_suspend(hba);
+       hba->spm_lvl = spm_lvl;
+       return ret;
+}
+
 #endif /* !CONFIG_PM_SLEEP */
 
 #ifdef CONFIG_PM
@@ -302,8 +365,14 @@ ufshcd_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 }
 
 static const struct dev_pm_ops ufshcd_pci_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(ufshcd_pci_suspend,
-                               ufshcd_pci_resume)
+#ifdef CONFIG_PM_SLEEP
+       .suspend        = ufshcd_pci_suspend,
+       .resume         = ufshcd_pci_resume,
+       .freeze         = ufshcd_pci_suspend,
+       .thaw           = ufshcd_pci_resume,
+       .poweroff       = ufshcd_pci_poweroff,
+       .restore        = ufshcd_pci_resume,
+#endif
        SET_RUNTIME_PM_OPS(ufshcd_pci_runtime_suspend,
                           ufshcd_pci_runtime_resume,
                           ufshcd_pci_runtime_idle)
index 9902b7e3aa4aaf49bcd266bfa4b12e09d84e7763..82ad31781bc9e127d9fc6834f3b704e69f1d179c 100644 (file)
@@ -225,6 +225,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba);
 static int ufshcd_eh_host_reset_handler(struct scsi_cmnd *cmd);
 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
 static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_clear_ua_wluns(struct ufs_hba *hba);
 static int ufshcd_probe_hba(struct ufs_hba *hba, bool async);
 static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
 static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
@@ -580,6 +581,23 @@ static void ufshcd_print_pwr_info(struct ufs_hba *hba)
                 hba->pwr_info.hs_rate);
 }
 
+static void ufshcd_device_reset(struct ufs_hba *hba)
+{
+       int err;
+
+       err = ufshcd_vops_device_reset(hba);
+
+       if (!err) {
+               ufshcd_set_ufs_dev_active(hba);
+               if (ufshcd_is_wb_allowed(hba)) {
+                       hba->wb_enabled = false;
+                       hba->wb_buf_flush_enabled = false;
+               }
+       }
+       if (err != -EOPNOTSUPP)
+               ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
+}
+
 void ufshcd_delay_us(unsigned long us, unsigned long tolerance)
 {
        if (!us)
@@ -3665,7 +3683,7 @@ static int ufshcd_dme_enable(struct ufs_hba *hba)
        ret = ufshcd_send_uic_cmd(hba, &uic_cmd);
        if (ret)
                dev_err(hba->dev,
-                       "dme-reset: error code %d\n", ret);
+                       "dme-enable: error code %d\n", ret);
 
        return ret;
 }
@@ -3964,7 +3982,7 @@ int ufshcd_link_recovery(struct ufs_hba *hba)
        spin_unlock_irqrestore(hba->host->host_lock, flags);
 
        /* Reset the attached device */
-       ufshcd_vops_device_reset(hba);
+       ufshcd_device_reset(hba);
 
        ret = ufshcd_host_reset_and_restore(hba);
 
@@ -6930,7 +6948,8 @@ static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
 
        /* Establish the link again and restore the device */
        err = ufshcd_probe_hba(hba, false);
-
+       if (!err)
+               ufshcd_clear_ua_wluns(hba);
 out:
        if (err)
                dev_err(hba->dev, "%s: Host init failed %d\n", __func__, err);
@@ -6968,7 +6987,7 @@ static int ufshcd_reset_and_restore(struct ufs_hba *hba)
 
        do {
                /* Reset the attached device */
-               ufshcd_vops_device_reset(hba);
+               ufshcd_device_reset(hba);
 
                err = ufshcd_host_reset_and_restore(hba);
        } while (err && --retries);
@@ -8045,7 +8064,7 @@ static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
 {
        int ret = 0;
 
-       if (!vreg || !vreg->enabled)
+       if (!vreg || !vreg->enabled || vreg->always_on)
                goto out;
 
        ret = regulator_disable(vreg->reg);
@@ -8414,13 +8433,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
         * handling context.
         */
        hba->host->eh_noresume = 1;
-       if (hba->wlun_dev_clr_ua) {
-               ret = ufshcd_send_request_sense(hba, sdp);
-               if (ret)
-                       goto out;
-               /* Unit attention condition is cleared now */
-               hba->wlun_dev_clr_ua = false;
-       }
+       ufshcd_clear_ua_wluns(hba);
 
        cmd[4] = pwr_mode << 4;
 
@@ -8441,7 +8454,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
 
        if (!ret)
                hba->curr_dev_pwr_mode = pwr_mode;
-out:
+
        scsi_device_put(sdp);
        hba->host->eh_noresume = 0;
        return ret;
@@ -8747,7 +8760,7 @@ set_link_active:
         * further below.
         */
        if (ufshcd_is_ufs_dev_deepsleep(hba)) {
-               ufshcd_vops_device_reset(hba);
+               ufshcd_device_reset(hba);
                WARN_ON(!ufshcd_is_link_off(hba));
        }
        if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
@@ -8757,7 +8770,7 @@ set_link_active:
 set_dev_active:
        /* Can also get here needing to exit DeepSleep */
        if (ufshcd_is_ufs_dev_deepsleep(hba)) {
-               ufshcd_vops_device_reset(hba);
+               ufshcd_device_reset(hba);
                ufshcd_host_reset_and_restore(hba);
        }
        if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
@@ -9353,7 +9366,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
        }
 
        /* Reset the attached device */
-       ufshcd_vops_device_reset(hba);
+       ufshcd_device_reset(hba);
 
        ufshcd_init_crypto(hba);
 
index f8c2467dc0142b47f47d81123fc73fc951ff2d2a..aa9ea355232395f4915830119a0e37698444e7cb 100644 (file)
@@ -1218,16 +1218,12 @@ static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
                hba->vops->dbg_register_dump(hba);
 }
 
-static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
+static inline int ufshcd_vops_device_reset(struct ufs_hba *hba)
 {
-       if (hba->vops && hba->vops->device_reset) {
-               int err = hba->vops->device_reset(hba);
-
-               if (!err)
-                       ufshcd_set_ufs_dev_active(hba);
-               if (err != -EOPNOTSUPP)
-                       ufshcd_update_evt_hist(hba, UFS_EVT_DEV_RESET, err);
-       }
+       if (hba->vops && hba->vops->device_reset)
+               return hba->vops->device_reset(hba);
+
+       return -EOPNOTSUPP;
 }
 
 static inline void ufshcd_vops_config_scaling_param(struct ufs_hba *hba,
index 512e925d5ea48b8609f770626e1df95b92cbf98f..881f645661cc6137fd20cc7376e90b57e265a9f1 100644 (file)
@@ -83,6 +83,7 @@ struct spi_geni_master {
        spinlock_t lock;
        int irq;
        bool cs_flag;
+       bool abort_failed;
 };
 
 static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -141,8 +142,49 @@ static void handle_fifo_timeout(struct spi_master *spi,
        spin_unlock_irq(&mas->lock);
 
        time_left = wait_for_completion_timeout(&mas->abort_done, HZ);
-       if (!time_left)
+       if (!time_left) {
                dev_err(mas->dev, "Failed to cancel/abort m_cmd\n");
+
+               /*
+                * No need for a lock since SPI core has a lock and we never
+                * access this from an interrupt.
+                */
+               mas->abort_failed = true;
+       }
+}
+
+static bool spi_geni_is_abort_still_pending(struct spi_geni_master *mas)
+{
+       struct geni_se *se = &mas->se;
+       u32 m_irq, m_irq_en;
+
+       if (!mas->abort_failed)
+               return false;
+
+       /*
+        * The only known case where a transfer times out and then a cancel
+        * times out then an abort times out is if something is blocking our
+        * interrupt handler from running.  Avoid starting any new transfers
+        * until that sorts itself out.
+        */
+       spin_lock_irq(&mas->lock);
+       m_irq = readl(se->base + SE_GENI_M_IRQ_STATUS);
+       m_irq_en = readl(se->base + SE_GENI_M_IRQ_EN);
+       spin_unlock_irq(&mas->lock);
+
+       if (m_irq & m_irq_en) {
+               dev_err(mas->dev, "Interrupts pending after abort: %#010x\n",
+                       m_irq & m_irq_en);
+               return true;
+       }
+
+       /*
+        * If we're here the problem resolved itself so no need to check more
+        * on future transfers.
+        */
+       mas->abort_failed = false;
+
+       return false;
 }
 
 static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
@@ -158,10 +200,21 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
        if (set_flag == mas->cs_flag)
                return;
 
-       mas->cs_flag = set_flag;
-
        pm_runtime_get_sync(mas->dev);
+
+       if (spi_geni_is_abort_still_pending(mas)) {
+               dev_err(mas->dev, "Can't set chip select\n");
+               goto exit;
+       }
+
        spin_lock_irq(&mas->lock);
+       if (mas->cur_xfer) {
+               dev_err(mas->dev, "Can't set CS when prev xfer running\n");
+               spin_unlock_irq(&mas->lock);
+               goto exit;
+       }
+
+       mas->cs_flag = set_flag;
        reinit_completion(&mas->cs_done);
        if (set_flag)
                geni_se_setup_m_cmd(se, SPI_CS_ASSERT, 0);
@@ -170,9 +223,12 @@ static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
        spin_unlock_irq(&mas->lock);
 
        time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
-       if (!time_left)
+       if (!time_left) {
+               dev_warn(mas->dev, "Timeout setting chip select\n");
                handle_fifo_timeout(spi, NULL);
+       }
 
+exit:
        pm_runtime_put(mas->dev);
 }
 
@@ -280,6 +336,9 @@ static int spi_geni_prepare_message(struct spi_master *spi,
        int ret;
        struct spi_geni_master *mas = spi_master_get_devdata(spi);
 
+       if (spi_geni_is_abort_still_pending(mas))
+               return -EBUSY;
+
        ret = setup_fifo_params(spi_msg->spi, spi);
        if (ret)
                dev_err(mas->dev, "Couldn't select mode %d\n", ret);
@@ -354,6 +413,12 @@ static bool geni_spi_handle_tx(struct spi_geni_master *mas)
        unsigned int bytes_per_fifo_word = geni_byte_per_fifo_word(mas);
        unsigned int i = 0;
 
+       /* Stop the watermark IRQ if nothing to send */
+       if (!mas->cur_xfer) {
+               writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+               return false;
+       }
+
        max_bytes = (mas->tx_fifo_depth - mas->tx_wm) * bytes_per_fifo_word;
        if (mas->tx_rem_bytes < max_bytes)
                max_bytes = mas->tx_rem_bytes;
@@ -396,6 +461,14 @@ static void geni_spi_handle_rx(struct spi_geni_master *mas)
                if (rx_last_byte_valid && rx_last_byte_valid < 4)
                        rx_bytes -= bytes_per_fifo_word - rx_last_byte_valid;
        }
+
+       /* Clear out the FIFO and bail if nowhere to put it */
+       if (!mas->cur_xfer) {
+               for (i = 0; i < DIV_ROUND_UP(rx_bytes, bytes_per_fifo_word); i++)
+                       readl(se->base + SE_GENI_RX_FIFOn);
+               return;
+       }
+
        if (mas->rx_rem_bytes < rx_bytes)
                rx_bytes = mas->rx_rem_bytes;
 
@@ -495,6 +568,9 @@ static int spi_geni_transfer_one(struct spi_master *spi,
 {
        struct spi_geni_master *mas = spi_master_get_devdata(spi);
 
+       if (spi_geni_is_abort_still_pending(mas))
+               return -EBUSY;
+
        /* Terminate and return success for 0 byte length transfer */
        if (!xfer->len)
                return 0;
index 471dedf3d3392e7b682e7746110ee2488012459d..6017209c6d2f7b42eba482256448172933ac8be5 100644 (file)
@@ -493,9 +493,9 @@ static u32 stm32h7_spi_prepare_fthlv(struct stm32_spi *spi, u32 xfer_len)
 
        /* align packet size with data registers access */
        if (spi->cur_bpw > 8)
-               fthlv -= (fthlv % 2); /* multiple of 2 */
+               fthlv += (fthlv % 2) ? 1 : 0;
        else
-               fthlv -= (fthlv % 4); /* multiple of 4 */
+               fthlv += (fthlv % 4) ? (4 - (fthlv % 4)) : 0;
 
        if (!fthlv)
                fthlv = 1;
index e1179e74a2b89d892f4c8e0edceb3378c6755e5a..ae8463aa8e0fb3c8677238616eaef5b444d1b718 100644 (file)
@@ -2972,10 +2972,7 @@ static int sci_init_single(struct platform_device *dev,
        port->fifosize          = sci_port->params->fifosize;
 
        if (port->type == PORT_SCI) {
-               if (sci_port->reg_size >= 0x20)
-                       port->regshift = 2;
-               else
-                       port->regshift = 1;
+               port->regshift = sci_port->reg_size >> 4;
        }
 
        /*
index ba5706ccc18887d5fe90a9bcc46e0dd6b565e9d4..3e2cc95b7b0b74dcd90e0cb450c4b20b0f4f57f2 100644 (file)
@@ -13,7 +13,9 @@ obj-$(CONFIG_USB_DWC3)                += dwc3/
 obj-$(CONFIG_USB_DWC2)         += dwc2/
 obj-$(CONFIG_USB_ISP1760)      += isp1760/
 
+obj-$(CONFIG_USB_CDNS_SUPPORT) += cdns3/
 obj-$(CONFIG_USB_CDNS3)                += cdns3/
+obj-$(CONFIG_USB_CDNSP_PCI)    += cdns3/
 
 obj-$(CONFIG_USB_MON)          += mon/
 obj-$(CONFIG_USB_MTU3)         += mtu3/
index 84716d216ae5fc50311bf914ad0f3baa49cfcea7..b98ca0a1352a2a84fc9067f8ca8af74656393002 100644 (file)
@@ -1,14 +1,28 @@
-config USB_CDNS3
-       tristate "Cadence USB3 Dual-Role Controller"
+config USB_CDNS_SUPPORT
+       tristate "Cadence USB Support"
        depends on USB_SUPPORT && (USB || USB_GADGET) && HAS_DMA
        select USB_XHCI_PLATFORM if USB_XHCI_HCD
        select USB_ROLE_SWITCH
+       help
+         Say Y here if your system has a Cadence USBSS or USBSSP
+         dual-role controller.
+         It supports: dual-role switch, Host-only, and Peripheral-only.
+
+config USB_CDNS_HOST
+       bool
+
+if USB_CDNS_SUPPORT
+
+config USB_CDNS3
+       tristate "Cadence USB3 Dual-Role Controller"
+       depends on USB_CDNS_SUPPORT
        help
          Say Y here if your system has a Cadence USB3 dual-role controller.
          It supports: dual-role switch, Host-only, and Peripheral-only.
 
          If you choose to build this driver is a dynamically linked
          as module, the module will be called cdns3.ko.
+endif
 
 if USB_CDNS3
 
@@ -25,6 +39,7 @@ config USB_CDNS3_GADGET
 config USB_CDNS3_HOST
        bool "Cadence USB3 host controller"
        depends on USB=y || USB=USB_CDNS3
+       select USB_CDNS_HOST
        help
          Say Y here to enable host controller functionality of the
          Cadence driver.
@@ -64,3 +79,44 @@ config USB_CDNS3_IMX
          For example, imx8qm and imx8qxp.
 
 endif
+
+if USB_CDNS_SUPPORT
+
+config USB_CDNSP_PCI
+       tristate "Cadence CDNSP Dual-Role Controller"
+       depends on USB_CDNS_SUPPORT && USB_PCI && ACPI
+       help
+         Say Y here if your system has a Cadence CDNSP dual-role controller.
+         It supports: dual-role switch Host-only, and Peripheral-only.
+
+         If you choose to build this driver is a dynamically linked
+         module, the module will be called cdnsp.ko.
+endif
+
+if USB_CDNSP_PCI
+
+config USB_CDNSP_GADGET
+       bool "Cadence CDNSP device controller"
+       depends on USB_GADGET=y || USB_GADGET=USB_CDNSP_PCI
+       help
+         Say Y here to enable device controller functionality of the
+         Cadence CDNSP-DEV driver.
+
+         Cadence CDNSP Device Controller in device mode is
+         very similar to XHCI controller. Therefore some algorithms
+         used has been taken from host driver.
+         This controller supports FF, HS, SS and SSP mode.
+         It doesn't support LS.
+
+config USB_CDNSP_HOST
+       bool "Cadence CDNSP host controller"
+       depends on USB=y || USB=USB_CDNSP_PCI
+       select USB_CDNS_HOST
+       help
+         Say Y here to enable host controller functionality of the
+         Cadence driver.
+
+         Host controller is compliant with XHCI so it uses
+         standard XHCI driver.
+
+endif
index d47e341a6f399a3fb519ce991ddddc7954dcd0a4..3f9b7fa8a59435c0ab5d637b5345fb255f982c8c 100644 (file)
@@ -1,18 +1,35 @@
 # SPDX-License-Identifier: GPL-2.0
 # define_trace.h needs to know how to find our header
-CFLAGS_trace.o                         := -I$(src)
+CFLAGS_cdns3-trace.o                           := -I$(src)
+CFLAGS_cdnsp-trace.o                           := -I$(src)
 
-cdns3-y                                        := core.o drd.o
+cdns-usb-common-y                              := core.o drd.o
+cdns3-y                                                := cdns3-plat.o
 
-obj-$(CONFIG_USB_CDNS3)                        += cdns3.o
-cdns3-$(CONFIG_USB_CDNS3_GADGET)       += gadget.o ep0.o
+ifeq ($(CONFIG_USB),m)
+obj-m                                          += cdns-usb-common.o
+obj-m                                          += cdns3.o
+else
+obj-$(CONFIG_USB_CDNS_SUPPORT)                 += cdns-usb-common.o
+obj-$(CONFIG_USB_CDNS3)                                += cdns3.o
+endif
+
+cdns-usb-common-$(CONFIG_USB_CDNS_HOST)        += host.o
+cdns3-$(CONFIG_USB_CDNS3_GADGET)               += cdns3-gadget.o cdns3-ep0.o
 
 ifneq ($(CONFIG_USB_CDNS3_GADGET),)
-cdns3-$(CONFIG_TRACING)                        += trace.o
+cdns3-$(CONFIG_TRACING)                                += cdns3-trace.o
 endif
 
-cdns3-$(CONFIG_USB_CDNS3_HOST)         += host.o
+obj-$(CONFIG_USB_CDNS3_PCI_WRAP)               += cdns3-pci-wrap.o
+obj-$(CONFIG_USB_CDNS3_TI)                     += cdns3-ti.o
+obj-$(CONFIG_USB_CDNS3_IMX)                    += cdns3-imx.o
 
-obj-$(CONFIG_USB_CDNS3_PCI_WRAP)       += cdns3-pci-wrap.o
-obj-$(CONFIG_USB_CDNS3_TI)             += cdns3-ti.o
-obj-$(CONFIG_USB_CDNS3_IMX)            += cdns3-imx.o
+cdnsp-udc-pci-y                                        := cdnsp-pci.o
+obj-$(CONFIG_USB_CDNSP_PCI)                    += cdnsp-udc-pci.o
+cdnsp-udc-pci-$(CONFIG_USB_CDNSP_GADGET)       += cdnsp-ring.o cdnsp-gadget.o \
+                                                  cdnsp-mem.o cdnsp-ep0.o
+
+ifneq ($(CONFIG_USB_CDNSP_GADGET),)
+cdnsp-udc-pci-$(CONFIG_TRACING)                        += cdnsp-trace.o
+endif
similarity index 99%
rename from drivers/usb/cdns3/ep0.c
rename to drivers/usb/cdns3/cdns3-ep0.c
index d3121a32cc68cfc25bec49668285ef03d3458b7d..9a17802275d51bcdeae97dcdcd4d2bcbc418702f 100644 (file)
@@ -13,8 +13,8 @@
 #include <linux/usb/composite.h>
 #include <linux/iopoll.h>
 
-#include "gadget.h"
-#include "trace.h"
+#include "cdns3-gadget.h"
+#include "cdns3-trace.h"
 
 static struct usb_endpoint_descriptor cdns3_gadget_ep0_desc = {
        .bLength = USB_DT_ENDPOINT_SIZE,
@@ -364,7 +364,7 @@ static int cdns3_ep0_feature_handle_endpoint(struct cdns3_device *priv_dev,
        if (le16_to_cpu(ctrl->wValue) != USB_ENDPOINT_HALT)
                return -EINVAL;
 
-       if (!(ctrl->wIndex & ~USB_DIR_IN))
+       if (!(le16_to_cpu(ctrl->wIndex) & ~USB_DIR_IN))
                return 0;
 
        index = cdns3_ep_addr_to_index(le16_to_cpu(ctrl->wIndex));
@@ -789,7 +789,7 @@ int cdns3_gadget_ep_set_wedge(struct usb_ep *ep)
        return 0;
 }
 
-const struct usb_ep_ops cdns3_gadget_ep0_ops = {
+static const struct usb_ep_ops cdns3_gadget_ep0_ops = {
        .enable = cdns3_gadget_ep0_enable,
        .disable = cdns3_gadget_ep0_disable,
        .alloc_request = cdns3_gadget_ep_alloc_request,
similarity index 99%
rename from drivers/usb/cdns3/gadget.c
rename to drivers/usb/cdns3/cdns3-gadget.c
index 08a4e693c4706682cf9b066cdb123ac859ccf435..582bfeceedb47f523342359fc28985129f8ff6ff 100644 (file)
@@ -63,8 +63,8 @@
 
 #include "core.h"
 #include "gadget-export.h"
-#include "gadget.h"
-#include "trace.h"
+#include "cdns3-gadget.h"
+#include "cdns3-trace.h"
 #include "drd.h"
 
 static int __cdns3_gadget_ep_queue(struct usb_ep *ep,
@@ -1200,7 +1200,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
                td_size = DIV_ROUND_UP(request->length,
                                       priv_ep->endpoint.maxpacket);
                if (priv_dev->gadget.speed == USB_SPEED_SUPER)
-                       trb->length = TRB_TDL_SS_SIZE(td_size);
+                       trb->length = cpu_to_le32(TRB_TDL_SS_SIZE(td_size));
                else
                        control |= TRB_TDL_HS_SIZE(td_size);
        }
@@ -1247,10 +1247,10 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep,
                        priv_req->trb->control = cpu_to_le32(control);
 
                if (sg_supported) {
-                       trb->control |= TRB_ISP;
+                       trb->control |= cpu_to_le32(TRB_ISP);
                        /* Don't set chain bit for last TRB */
                        if (sg_iter < num_trb - 1)
-                               trb->control |= TRB_CHAIN;
+                               trb->control |= cpu_to_le32(TRB_CHAIN);
 
                        s = sg_next(s);
                }
@@ -1844,7 +1844,7 @@ __must_hold(&priv_dev->lock)
 static irqreturn_t cdns3_device_irq_handler(int irq, void *data)
 {
        struct cdns3_device *priv_dev = data;
-       struct cdns3 *cdns = dev_get_drvdata(priv_dev->dev);
+       struct cdns *cdns = dev_get_drvdata(priv_dev->dev);
        irqreturn_t ret = IRQ_NONE;
        u32 reg;
 
@@ -3084,7 +3084,7 @@ static void cdns3_gadget_release(struct device *dev)
        kfree(priv_dev);
 }
 
-static void cdns3_gadget_exit(struct cdns3 *cdns)
+static void cdns3_gadget_exit(struct cdns *cdns)
 {
        struct cdns3_device *priv_dev;
 
@@ -3117,10 +3117,10 @@ static void cdns3_gadget_exit(struct cdns3 *cdns)
        kfree(priv_dev->zlp_buf);
        usb_put_gadget(&priv_dev->gadget);
        cdns->gadget_dev = NULL;
-       cdns3_drd_gadget_off(cdns);
+       cdns_drd_gadget_off(cdns);
 }
 
-static int cdns3_gadget_start(struct cdns3 *cdns)
+static int cdns3_gadget_start(struct cdns *cdns)
 {
        struct cdns3_device *priv_dev;
        u32 max_speed;
@@ -3240,7 +3240,7 @@ err1:
        return ret;
 }
 
-static int __cdns3_gadget_init(struct cdns3 *cdns)
+static int __cdns3_gadget_init(struct cdns *cdns)
 {
        int ret = 0;
 
@@ -3251,7 +3251,7 @@ static int __cdns3_gadget_init(struct cdns3 *cdns)
                return ret;
        }
 
-       cdns3_drd_gadget_on(cdns);
+       cdns_drd_gadget_on(cdns);
        pm_runtime_get_sync(cdns->dev);
 
        ret = cdns3_gadget_start(cdns);
@@ -3277,7 +3277,7 @@ err0:
        return ret;
 }
 
-static int cdns3_gadget_suspend(struct cdns3 *cdns, bool do_wakeup)
+static int cdns3_gadget_suspend(struct cdns *cdns, bool do_wakeup)
 __must_hold(&cdns->lock)
 {
        struct cdns3_device *priv_dev = cdns->gadget_dev;
@@ -3296,7 +3296,7 @@ __must_hold(&cdns->lock)
        return 0;
 }
 
-static int cdns3_gadget_resume(struct cdns3 *cdns, bool hibernated)
+static int cdns3_gadget_resume(struct cdns *cdns, bool hibernated)
 {
        struct cdns3_device *priv_dev = cdns->gadget_dev;
 
@@ -3311,13 +3311,13 @@ static int cdns3_gadget_resume(struct cdns3 *cdns, bool hibernated)
 /**
  * cdns3_gadget_init - initialize device structure
  *
- * @cdns: cdns3 instance
+ * @cdns: cdns instance
  *
  * This function initializes the gadget.
  */
-int cdns3_gadget_init(struct cdns3 *cdns)
+int cdns3_gadget_init(struct cdns *cdns)
 {
-       struct cdns3_role_driver *rdrv;
+       struct cdns_role_driver *rdrv;
 
        rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
        if (!rdrv)
@@ -3327,7 +3327,7 @@ int cdns3_gadget_init(struct cdns3 *cdns)
        rdrv->stop      = cdns3_gadget_exit;
        rdrv->suspend   = cdns3_gadget_suspend;
        rdrv->resume    = cdns3_gadget_resume;
-       rdrv->state     = CDNS3_ROLE_STATE_INACTIVE;
+       rdrv->state     = CDNS_ROLE_STATE_INACTIVE;
        rdrv->name      = "gadget";
        cdns->roles[USB_ROLE_DEVICE] = rdrv;
 
index 22a56c4dce678d507a80a27378412b1f91a30a8d..d9fb68766a15d0e0c88654f076becda9eb849add 100644 (file)
@@ -250,7 +250,7 @@ static void cdns3_set_wakeup(struct cdns_imx *data, bool enable)
 static int cdns_imx_platform_suspend(struct device *dev,
                bool suspend, bool wakeup)
 {
-       struct cdns3 *cdns = dev_get_drvdata(dev);
+       struct cdns *cdns = dev_get_drvdata(dev);
        struct device *parent = dev->parent;
        struct cdns_imx *data = dev_get_drvdata(parent);
        void __iomem *otg_regs = (void __iomem *)(cdns->otg_regs);
diff --git a/drivers/usb/cdns3/cdns3-plat.c b/drivers/usb/cdns3/cdns3-plat.c
new file mode 100644 (file)
index 0000000..4b18e1c
--- /dev/null
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence USBSS DRD Driver.
+ *
+ * Copyright (C) 2018-2020 Cadence.
+ * Copyright (C) 2017-2018 NXP
+ * Copyright (C) 2019 Texas Instruments
+ *
+ *
+ * Author: Peter Chen <peter.chen@nxp.com>
+ *         Pawel Laszczak <pawell@cadence.com>
+ *         Roger Quadros <rogerq@ti.com>
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "core.h"
+#include "gadget-export.h"
+
+static int set_phy_power_on(struct cdns *cdns)
+{
+       int ret;
+
+       ret = phy_power_on(cdns->usb2_phy);
+       if (ret)
+               return ret;
+
+       ret = phy_power_on(cdns->usb3_phy);
+       if (ret)
+               phy_power_off(cdns->usb2_phy);
+
+       return ret;
+}
+
+static void set_phy_power_off(struct cdns *cdns)
+{
+       phy_power_off(cdns->usb3_phy);
+       phy_power_off(cdns->usb2_phy);
+}
+
+/**
+ * cdns3_plat_probe - probe for cdns3 core device
+ * @pdev: Pointer to cdns3 core platform device
+ *
+ * Returns 0 on success otherwise negative errno
+ */
+static int cdns3_plat_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct resource *res;
+       struct cdns *cdns;
+       void __iomem *regs;
+       int ret;
+
+       cdns = devm_kzalloc(dev, sizeof(*cdns), GFP_KERNEL);
+       if (!cdns)
+               return -ENOMEM;
+
+       cdns->dev = dev;
+       cdns->pdata = dev_get_platdata(dev);
+
+       platform_set_drvdata(pdev, cdns);
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "host");
+       if (!res) {
+               dev_err(dev, "missing host IRQ\n");
+               return -ENODEV;
+       }
+
+       cdns->xhci_res[0] = *res;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci");
+       if (!res) {
+               dev_err(dev, "couldn't get xhci resource\n");
+               return -ENXIO;
+       }
+
+       cdns->xhci_res[1] = *res;
+
+       cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral");
+
+       if (cdns->dev_irq < 0)
+               return cdns->dev_irq;
+
+       regs = devm_platform_ioremap_resource_byname(pdev, "dev");
+       if (IS_ERR(regs))
+               return PTR_ERR(regs);
+       cdns->dev_regs  = regs;
+
+       cdns->otg_irq = platform_get_irq_byname(pdev, "otg");
+       if (cdns->otg_irq < 0)
+               return cdns->otg_irq;
+
+       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg");
+       if (!res) {
+               dev_err(dev, "couldn't get otg resource\n");
+               return -ENXIO;
+       }
+
+       cdns->phyrst_a_enable = device_property_read_bool(dev, "cdns,phyrst-a-enable");
+
+       cdns->otg_res = *res;
+
+       cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup");
+       if (cdns->wakeup_irq == -EPROBE_DEFER)
+               return cdns->wakeup_irq;
+       else if (cdns->wakeup_irq == 0)
+               return -EINVAL;
+
+       if (cdns->wakeup_irq < 0) {
+               dev_dbg(dev, "couldn't get wakeup irq\n");
+               cdns->wakeup_irq = 0x0;
+       }
+
+       cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy");
+       if (IS_ERR(cdns->usb2_phy))
+               return PTR_ERR(cdns->usb2_phy);
+
+       ret = phy_init(cdns->usb2_phy);
+       if (ret)
+               return ret;
+
+       cdns->usb3_phy = devm_phy_optional_get(dev, "cdns3,usb3-phy");
+       if (IS_ERR(cdns->usb3_phy))
+               return PTR_ERR(cdns->usb3_phy);
+
+       ret = phy_init(cdns->usb3_phy);
+       if (ret)
+               goto err_phy3_init;
+
+       ret = set_phy_power_on(cdns);
+       if (ret)
+               goto err_phy_power_on;
+
+       cdns->gadget_init = cdns3_gadget_init;
+
+       ret = cdns_init(cdns);
+       if (ret)
+               goto err_cdns_init;
+
+       device_set_wakeup_capable(dev, true);
+       pm_runtime_set_active(dev);
+       pm_runtime_enable(dev);
+       if (!(cdns->pdata && (cdns->pdata->quirks & CDNS3_DEFAULT_PM_RUNTIME_ALLOW)))
+               pm_runtime_forbid(dev);
+
+       /*
+        * The controller needs less time between bus and controller suspend,
+        * and we also needs a small delay to avoid frequently entering low
+        * power mode.
+        */
+       pm_runtime_set_autosuspend_delay(dev, 20);
+       pm_runtime_mark_last_busy(dev);
+       pm_runtime_use_autosuspend(dev);
+
+       return 0;
+
+err_cdns_init:
+       set_phy_power_off(cdns);
+err_phy_power_on:
+       phy_exit(cdns->usb3_phy);
+err_phy3_init:
+       phy_exit(cdns->usb2_phy);
+
+       return ret;
+}
+
+/**
+ * cdns3_remove - unbind drd driver and clean up
+ * @pdev: Pointer to Linux platform device
+ *
+ * Returns 0 on success otherwise negative errno
+ */
+static int cdns3_plat_remove(struct platform_device *pdev)
+{
+       struct cdns *cdns = platform_get_drvdata(pdev);
+       struct device *dev = cdns->dev;
+
+       pm_runtime_get_sync(dev);
+       pm_runtime_disable(dev);
+       pm_runtime_put_noidle(dev);
+       cdns_remove(cdns);
+       set_phy_power_off(cdns);
+       phy_exit(cdns->usb2_phy);
+       phy_exit(cdns->usb3_phy);
+       return 0;
+}
+
+#ifdef CONFIG_PM
+
+static int cdns3_set_platform_suspend(struct device *dev,
+                                     bool suspend, bool wakeup)
+{
+       struct cdns *cdns = dev_get_drvdata(dev);
+       int ret = 0;
+
+       if (cdns->pdata && cdns->pdata->platform_suspend)
+               ret = cdns->pdata->platform_suspend(dev, suspend, wakeup);
+
+       return ret;
+}
+
+static int cdns3_controller_suspend(struct device *dev, pm_message_t msg)
+{
+       struct cdns *cdns = dev_get_drvdata(dev);
+       bool wakeup;
+       unsigned long flags;
+
+       if (cdns->in_lpm)
+               return 0;
+
+       if (PMSG_IS_AUTO(msg))
+               wakeup = true;
+       else
+               wakeup = device_may_wakeup(dev);
+
+       cdns3_set_platform_suspend(cdns->dev, true, wakeup);
+       set_phy_power_off(cdns);
+       spin_lock_irqsave(&cdns->lock, flags);
+       cdns->in_lpm = true;
+       spin_unlock_irqrestore(&cdns->lock, flags);
+       dev_dbg(cdns->dev, "%s ends\n", __func__);
+
+       return 0;
+}
+
+static int cdns3_controller_resume(struct device *dev, pm_message_t msg)
+{
+       struct cdns *cdns = dev_get_drvdata(dev);
+       int ret;
+       unsigned long flags;
+
+       if (!cdns->in_lpm)
+               return 0;
+
+       ret = set_phy_power_on(cdns);
+       if (ret)
+               return ret;
+
+       cdns3_set_platform_suspend(cdns->dev, false, false);
+
+       spin_lock_irqsave(&cdns->lock, flags);
+       cdns_resume(cdns, !PMSG_IS_AUTO(msg));
+       cdns->in_lpm = false;
+       spin_unlock_irqrestore(&cdns->lock, flags);
+       if (cdns->wakeup_pending) {
+               cdns->wakeup_pending = false;
+               enable_irq(cdns->wakeup_irq);
+       }
+       dev_dbg(cdns->dev, "%s ends\n", __func__);
+
+       return ret;
+}
+
+static int cdns3_plat_runtime_suspend(struct device *dev)
+{
+       return cdns3_controller_suspend(dev, PMSG_AUTO_SUSPEND);
+}
+
+static int cdns3_plat_runtime_resume(struct device *dev)
+{
+       return cdns3_controller_resume(dev, PMSG_AUTO_RESUME);
+}
+
+#ifdef CONFIG_PM_SLEEP
+
+static int cdns3_plat_suspend(struct device *dev)
+{
+       struct cdns *cdns = dev_get_drvdata(dev);
+
+       cdns_suspend(cdns);
+
+       return cdns3_controller_suspend(dev, PMSG_SUSPEND);
+}
+
+static int cdns3_plat_resume(struct device *dev)
+{
+       return cdns3_controller_resume(dev, PMSG_RESUME);
+}
+#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops cdns3_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(cdns3_plat_suspend, cdns3_plat_resume)
+       SET_RUNTIME_PM_OPS(cdns3_plat_runtime_suspend,
+                          cdns3_plat_runtime_resume, NULL)
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_cdns3_match[] = {
+       { .compatible = "cdns,usb3" },
+       { },
+};
+MODULE_DEVICE_TABLE(of, of_cdns3_match);
+#endif
+
+static struct platform_driver cdns3_driver = {
+       .probe          = cdns3_plat_probe,
+       .remove         = cdns3_plat_remove,
+       .driver         = {
+               .name   = "cdns-usb3",
+               .of_match_table = of_match_ptr(of_cdns3_match),
+               .pm     = &cdns3_pm_ops,
+       },
+};
+
+module_platform_driver(cdns3_driver);
+
+MODULE_ALIAS("platform:cdns3");
+MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence USB3 DRD Controller Driver");
similarity index 89%
rename from drivers/usb/cdns3/trace.c
rename to drivers/usb/cdns3/cdns3-trace.c
index 459fa72d9c74d981fac8ddfd50c7a36dd105ff87..b9858acaef025f1b5a1d492121157158844909e0 100644 (file)
@@ -8,4 +8,4 @@
  */
 
 #define CREATE_TRACE_POINTS
-#include "trace.h"
+#include "cdns3-trace.h"
similarity index 99%
rename from drivers/usb/cdns3/trace.h
rename to drivers/usb/cdns3/cdns3-trace.h
index 0a2a3269bfac61fcbbd5712b5824ddb0694dacd0..8648c7a7a9dd792070ce813a0a595bc16cb4e313 100644 (file)
@@ -19,8 +19,8 @@
 #include <asm/byteorder.h>
 #include <linux/usb/ch9.h>
 #include "core.h"
-#include "gadget.h"
-#include "debug.h"
+#include "cdns3-gadget.h"
+#include "cdns3-debug.h"
 
 #define CDNS3_MSG_MAX  500
 
@@ -565,6 +565,6 @@ DEFINE_EVENT(cdns3_log_request_handled, cdns3_request_handled,
 #define TRACE_INCLUDE_PATH .
 
 #undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE trace
+#define TRACE_INCLUDE_FILE cdns3-trace
 
 #include <trace/define_trace.h>
diff --git a/drivers/usb/cdns3/cdnsp-debug.h b/drivers/usb/cdns3/cdnsp-debug.h
new file mode 100644 (file)
index 0000000..a8776df
--- /dev/null
@@ -0,0 +1,583 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+#ifndef __LINUX_CDNSP_DEBUG
+#define __LINUX_CDNSP_DEBUG
+
+static inline const char *cdnsp_trb_comp_code_string(u8 status)
+{
+       switch (status) {
+       case COMP_INVALID:
+               return "Invalid";
+       case COMP_SUCCESS:
+               return "Success";
+       case COMP_DATA_BUFFER_ERROR:
+               return "Data Buffer Error";
+       case COMP_BABBLE_DETECTED_ERROR:
+               return "Babble Detected";
+       case COMP_TRB_ERROR:
+               return "TRB Error";
+       case COMP_RESOURCE_ERROR:
+               return "Resource Error";
+       case COMP_NO_SLOTS_AVAILABLE_ERROR:
+               return "No Slots Available Error";
+       case COMP_INVALID_STREAM_TYPE_ERROR:
+               return "Invalid Stream Type Error";
+       case COMP_SLOT_NOT_ENABLED_ERROR:
+               return "Slot Not Enabled Error";
+       case COMP_ENDPOINT_NOT_ENABLED_ERROR:
+               return "Endpoint Not Enabled Error";
+       case COMP_SHORT_PACKET:
+               return "Short Packet";
+       case COMP_RING_UNDERRUN:
+               return "Ring Underrun";
+       case COMP_RING_OVERRUN:
+               return "Ring Overrun";
+       case COMP_VF_EVENT_RING_FULL_ERROR:
+               return "VF Event Ring Full Error";
+       case COMP_PARAMETER_ERROR:
+               return "Parameter Error";
+       case COMP_CONTEXT_STATE_ERROR:
+               return "Context State Error";
+       case COMP_EVENT_RING_FULL_ERROR:
+               return "Event Ring Full Error";
+       case COMP_INCOMPATIBLE_DEVICE_ERROR:
+               return "Incompatible Device Error";
+       case COMP_MISSED_SERVICE_ERROR:
+               return "Missed Service Error";
+       case COMP_COMMAND_RING_STOPPED:
+               return "Command Ring Stopped";
+       case COMP_COMMAND_ABORTED:
+               return "Command Aborted";
+       case COMP_STOPPED:
+               return "Stopped";
+       case COMP_STOPPED_LENGTH_INVALID:
+               return "Stopped - Length Invalid";
+       case COMP_STOPPED_SHORT_PACKET:
+               return "Stopped - Short Packet";
+       case COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR:
+               return "Max Exit Latency Too Large Error";
+       case COMP_ISOCH_BUFFER_OVERRUN:
+               return "Isoch Buffer Overrun";
+       case COMP_EVENT_LOST_ERROR:
+               return "Event Lost Error";
+       case COMP_UNDEFINED_ERROR:
+               return "Undefined Error";
+       case COMP_INVALID_STREAM_ID_ERROR:
+               return "Invalid Stream ID Error";
+       default:
+               return "Unknown!!";
+       }
+}
+
+static inline const char *cdnsp_trb_type_string(u8 type)
+{
+       switch (type) {
+       case TRB_NORMAL:
+               return "Normal";
+       case TRB_SETUP:
+               return "Setup Stage";
+       case TRB_DATA:
+               return "Data Stage";
+       case TRB_STATUS:
+               return "Status Stage";
+       case TRB_ISOC:
+               return "Isoch";
+       case TRB_LINK:
+               return "Link";
+       case TRB_EVENT_DATA:
+               return "Event Data";
+       case TRB_TR_NOOP:
+               return "No-Op";
+       case TRB_ENABLE_SLOT:
+               return "Enable Slot Command";
+       case TRB_DISABLE_SLOT:
+               return "Disable Slot Command";
+       case TRB_ADDR_DEV:
+               return "Address Device Command";
+       case TRB_CONFIG_EP:
+               return "Configure Endpoint Command";
+       case TRB_EVAL_CONTEXT:
+               return "Evaluate Context Command";
+       case TRB_RESET_EP:
+               return "Reset Endpoint Command";
+       case TRB_STOP_RING:
+               return "Stop Ring Command";
+       case TRB_SET_DEQ:
+               return "Set TR Dequeue Pointer Command";
+       case TRB_RESET_DEV:
+               return "Reset Device Command";
+       case TRB_FORCE_HEADER:
+               return "Force Header Command";
+       case TRB_CMD_NOOP:
+               return "No-Op Command";
+       case TRB_TRANSFER:
+               return "Transfer Event";
+       case TRB_COMPLETION:
+               return "Command Completion Event";
+       case TRB_PORT_STATUS:
+               return "Port Status Change Event";
+       case TRB_HC_EVENT:
+               return "Device Controller Event";
+       case TRB_MFINDEX_WRAP:
+               return "MFINDEX Wrap Event";
+       case TRB_ENDPOINT_NRDY:
+               return "Endpoint Not ready";
+       case TRB_HALT_ENDPOINT:
+               return "Halt Endpoint";
+       case TRB_FLUSH_ENDPOINT:
+               return "FLush Endpoint";
+       default:
+               return "UNKNOWN";
+       }
+}
+
+static inline const char *cdnsp_ring_type_string(enum cdnsp_ring_type type)
+{
+       switch (type) {
+       case TYPE_CTRL:
+               return "CTRL";
+       case TYPE_ISOC:
+               return "ISOC";
+       case TYPE_BULK:
+               return "BULK";
+       case TYPE_INTR:
+               return "INTR";
+       case TYPE_STREAM:
+               return "STREAM";
+       case TYPE_COMMAND:
+               return "CMD";
+       case TYPE_EVENT:
+               return "EVENT";
+       }
+
+       return "UNKNOWN";
+}
+
+static inline char *cdnsp_slot_state_string(u32 state)
+{
+       switch (state) {
+       case SLOT_STATE_ENABLED:
+               return "enabled/disabled";
+       case SLOT_STATE_DEFAULT:
+               return "default";
+       case SLOT_STATE_ADDRESSED:
+               return "addressed";
+       case SLOT_STATE_CONFIGURED:
+               return "configured";
+       default:
+               return "reserved";
+       }
+}
+
+static inline const char *cdnsp_decode_trb(char *str, size_t size, u32 field0,
+                                          u32 field1, u32 field2, u32 field3)
+{
+       int ep_id = TRB_TO_EP_INDEX(field3) - 1;
+       int type = TRB_FIELD_TO_TYPE(field3);
+       unsigned int ep_num;
+       int ret = 0;
+       u32 temp;
+
+       ep_num = DIV_ROUND_UP(ep_id, 2);
+
+       switch (type) {
+       case TRB_LINK:
+               ret += snprintf(str, size,
+                               "LINK %08x%08x intr %ld type '%s' flags %c:%c:%c:%c",
+                               field1, field0, GET_INTR_TARGET(field2),
+                               cdnsp_trb_type_string(type),
+                               field3 & TRB_IOC ? 'I' : 'i',
+                               field3 & TRB_CHAIN ? 'C' : 'c',
+                               field3 & TRB_TC ? 'T' : 't',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       case TRB_TRANSFER:
+       case TRB_COMPLETION:
+       case TRB_PORT_STATUS:
+       case TRB_HC_EVENT:
+               ret += snprintf(str, size,
+                               "ep%d%s(%d) type '%s' TRB %08x%08x status '%s'"
+                               " len %ld slot %ld flags %c:%c",
+                               ep_num, ep_id % 2 ? "out" : "in",
+                               TRB_TO_EP_INDEX(field3),
+                               cdnsp_trb_type_string(type), field1, field0,
+                               cdnsp_trb_comp_code_string(GET_COMP_CODE(field2)),
+                               EVENT_TRB_LEN(field2), TRB_TO_SLOT_ID(field3),
+                               field3 & EVENT_DATA ? 'E' : 'e',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       case TRB_MFINDEX_WRAP:
+               ret += snprintf(str, size, "%s: flags %c",
+                               cdnsp_trb_type_string(type),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       case TRB_SETUP:
+               ret += snprintf(str, size,
+                               "type '%s' bRequestType %02x bRequest %02x "
+                               "wValue %02x%02x wIndex %02x%02x wLength %d "
+                               "length %ld TD size %ld intr %ld Setup ID %ld "
+                               "flags %c:%c:%c",
+                               cdnsp_trb_type_string(type),
+                               field0 & 0xff,
+                               (field0 & 0xff00) >> 8,
+                               (field0 & 0xff000000) >> 24,
+                               (field0 & 0xff0000) >> 16,
+                               (field1 & 0xff00) >> 8,
+                               field1 & 0xff,
+                               (field1 & 0xff000000) >> 16 |
+                               (field1 & 0xff0000) >> 16,
+                               TRB_LEN(field2), GET_TD_SIZE(field2),
+                               GET_INTR_TARGET(field2),
+                               TRB_SETUPID_TO_TYPE(field3),
+                               field3 & TRB_IDT ? 'D' : 'd',
+                               field3 & TRB_IOC ? 'I' : 'i',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       case TRB_DATA:
+               ret += snprintf(str, size,
+                               "type '%s' Buffer %08x%08x length %ld TD size %ld "
+                               "intr %ld flags %c:%c:%c:%c:%c:%c:%c",
+                               cdnsp_trb_type_string(type),
+                               field1, field0, TRB_LEN(field2),
+                               GET_TD_SIZE(field2),
+                               GET_INTR_TARGET(field2),
+                               field3 & TRB_IDT ? 'D' : 'i',
+                               field3 & TRB_IOC ? 'I' : 'i',
+                               field3 & TRB_CHAIN ? 'C' : 'c',
+                               field3 & TRB_NO_SNOOP ? 'S' : 's',
+                               field3 & TRB_ISP ? 'I' : 'i',
+                               field3 & TRB_ENT ? 'E' : 'e',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       case TRB_STATUS:
+               ret += snprintf(str, size,
+                               "Buffer %08x%08x length %ld TD size %ld intr"
+                               "%ld type '%s' flags %c:%c:%c:%c",
+                               field1, field0, TRB_LEN(field2),
+                               GET_TD_SIZE(field2),
+                               GET_INTR_TARGET(field2),
+                               cdnsp_trb_type_string(type),
+                               field3 & TRB_IOC ? 'I' : 'i',
+                               field3 & TRB_CHAIN ? 'C' : 'c',
+                               field3 & TRB_ENT ? 'E' : 'e',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       case TRB_NORMAL:
+       case TRB_ISOC:
+       case TRB_EVENT_DATA:
+       case TRB_TR_NOOP:
+               ret += snprintf(str, size,
+                               "type '%s' Buffer %08x%08x length %ld "
+                               "TD size %ld intr %ld "
+                               "flags %c:%c:%c:%c:%c:%c:%c:%c:%c",
+                               cdnsp_trb_type_string(type),
+                               field1, field0, TRB_LEN(field2),
+                               GET_TD_SIZE(field2),
+                               GET_INTR_TARGET(field2),
+                               field3 & TRB_BEI ? 'B' : 'b',
+                               field3 & TRB_IDT ? 'T' : 't',
+                               field3 & TRB_IOC ? 'I' : 'i',
+                               field3 & TRB_CHAIN ? 'C' : 'c',
+                               field3 & TRB_NO_SNOOP ? 'S' : 's',
+                               field3 & TRB_ISP ? 'I' : 'i',
+                               field3 & TRB_ENT ? 'E' : 'e',
+                               field3 & TRB_CYCLE ? 'C' : 'c',
+                               !(field3 & TRB_EVENT_INVALIDATE) ? 'V' : 'v');
+               break;
+       case TRB_CMD_NOOP:
+       case TRB_ENABLE_SLOT:
+               ret += snprintf(str, size, "%s: flags %c",
+                               cdnsp_trb_type_string(type),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       case TRB_DISABLE_SLOT:
+               ret += snprintf(str, size, "%s: slot %ld flags %c",
+                               cdnsp_trb_type_string(type),
+                               TRB_TO_SLOT_ID(field3),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       case TRB_ADDR_DEV:
+               ret += snprintf(str, size,
+                               "%s: ctx %08x%08x slot %ld flags %c:%c",
+                               cdnsp_trb_type_string(type), field1, field0,
+                               TRB_TO_SLOT_ID(field3),
+                               field3 & TRB_BSR ? 'B' : 'b',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       case TRB_CONFIG_EP:
+               ret += snprintf(str, size,
+                               "%s: ctx %08x%08x slot %ld flags %c:%c",
+                               cdnsp_trb_type_string(type), field1, field0,
+                               TRB_TO_SLOT_ID(field3),
+                               field3 & TRB_DC ? 'D' : 'd',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       case TRB_EVAL_CONTEXT:
+               ret += snprintf(str, size,
+                               "%s: ctx %08x%08x slot %ld flags %c",
+                               cdnsp_trb_type_string(type), field1, field0,
+                               TRB_TO_SLOT_ID(field3),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       case TRB_RESET_EP:
+       case TRB_HALT_ENDPOINT:
+       case TRB_FLUSH_ENDPOINT:
+               ret += snprintf(str, size,
+                               "%s: ep%d%s(%d) ctx %08x%08x slot %ld flags %c",
+                               cdnsp_trb_type_string(type),
+                               ep_num, ep_id % 2 ? "out" : "in",
+                               TRB_TO_EP_INDEX(field3), field1, field0,
+                               TRB_TO_SLOT_ID(field3),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       case TRB_STOP_RING:
+               ret += snprintf(str, size,
+                               "%s: ep%d%s(%d) slot %ld sp %d flags %c",
+                               cdnsp_trb_type_string(type),
+                               ep_num, ep_id % 2 ? "out" : "in",
+                               TRB_TO_EP_INDEX(field3),
+                               TRB_TO_SLOT_ID(field3),
+                               TRB_TO_SUSPEND_PORT(field3),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       case TRB_SET_DEQ:
+               ret += snprintf(str, size,
+                               "%s: ep%d%s(%d) deq %08x%08x stream %ld slot %ld  flags %c",
+                               cdnsp_trb_type_string(type),
+                               ep_num, ep_id % 2 ? "out" : "in",
+                               TRB_TO_EP_INDEX(field3), field1, field0,
+                               TRB_TO_STREAM_ID(field2),
+                               TRB_TO_SLOT_ID(field3),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       case TRB_RESET_DEV:
+               ret += snprintf(str, size, "%s: slot %ld flags %c",
+                               cdnsp_trb_type_string(type),
+                               TRB_TO_SLOT_ID(field3),
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       case TRB_ENDPOINT_NRDY:
+               temp  = TRB_TO_HOST_STREAM(field2);
+
+               ret += snprintf(str, size,
+                               "%s: ep%d%s(%d) H_SID %x%s%s D_SID %lx flags %c:%c",
+                               cdnsp_trb_type_string(type),
+                               ep_num, ep_id % 2 ? "out" : "in",
+                               TRB_TO_EP_INDEX(field3), temp,
+                               temp == STREAM_PRIME_ACK ? "(PRIME)" : "",
+                               temp == STREAM_REJECTED ? "(REJECTED)" : "",
+                               TRB_TO_DEV_STREAM(field0),
+                               field3 & TRB_STAT ? 'S' : 's',
+                               field3 & TRB_CYCLE ? 'C' : 'c');
+               break;
+       default:
+               ret += snprintf(str, size,
+                               "type '%s' -> raw %08x %08x %08x %08x",
+                               cdnsp_trb_type_string(type),
+                               field0, field1, field2, field3);
+       }
+
+       return str;
+}
+
+static inline const char *cdnsp_decode_slot_context(u32 info, u32 info2,
+                                                   u32 int_target, u32 state)
+{
+       static char str[1024];
+       int ret = 0;
+       u32 speed;
+       char *s;
+
+       speed = info & DEV_SPEED;
+
+       switch (speed) {
+       case SLOT_SPEED_FS:
+               s = "full-speed";
+               break;
+       case SLOT_SPEED_HS:
+               s = "high-speed";
+               break;
+       case SLOT_SPEED_SS:
+               s = "super-speed";
+               break;
+       case SLOT_SPEED_SSP:
+               s = "super-speed plus";
+               break;
+       default:
+               s = "UNKNOWN speed";
+       }
+
+       ret = sprintf(str, "%s Ctx Entries %d",
+                     s, (info & LAST_CTX_MASK) >> 27);
+
+       ret += sprintf(str + ret, " [Intr %ld] Addr %ld State %s",
+                      GET_INTR_TARGET(int_target), state & DEV_ADDR_MASK,
+                      cdnsp_slot_state_string(GET_SLOT_STATE(state)));
+
+       return str;
+}
+
+static inline const char *cdnsp_portsc_link_state_string(u32 portsc)
+{
+       switch (portsc & PORT_PLS_MASK) {
+       case XDEV_U0:
+               return "U0";
+       case XDEV_U1:
+               return "U1";
+       case XDEV_U2:
+               return "U2";
+       case XDEV_U3:
+               return "U3";
+       case XDEV_DISABLED:
+               return "Disabled";
+       case XDEV_RXDETECT:
+               return "RxDetect";
+       case XDEV_INACTIVE:
+               return "Inactive";
+       case XDEV_POLLING:
+               return "Polling";
+       case XDEV_RECOVERY:
+               return "Recovery";
+       case XDEV_HOT_RESET:
+               return "Hot Reset";
+       case XDEV_COMP_MODE:
+               return "Compliance mode";
+       case XDEV_TEST_MODE:
+               return "Test mode";
+       case XDEV_RESUME:
+               return "Resume";
+       default:
+               break;
+       }
+
+       return "Unknown";
+}
+
+static inline const char *cdnsp_decode_portsc(char *str, size_t size,
+                                             u32 portsc)
+{
+       int ret;
+
+       ret = snprintf(str, size, "%s %s %s Link:%s PortSpeed:%d ",
+                      portsc & PORT_POWER ? "Powered" : "Powered-off",
+                      portsc & PORT_CONNECT ? "Connected" : "Not-connected",
+                      portsc & PORT_PED ? "Enabled" : "Disabled",
+                      cdnsp_portsc_link_state_string(portsc),
+                      DEV_PORT_SPEED(portsc));
+
+       if (portsc & PORT_RESET)
+               ret += snprintf(str + ret, size - ret, "In-Reset ");
+
+       ret += snprintf(str + ret, size - ret, "Change: ");
+       if (portsc & PORT_CSC)
+               ret += snprintf(str + ret, size - ret, "CSC ");
+       if (portsc & PORT_WRC)
+               ret += snprintf(str + ret, size - ret, "WRC ");
+       if (portsc & PORT_RC)
+               ret += snprintf(str + ret, size - ret, "PRC ");
+       if (portsc & PORT_PLC)
+               ret += snprintf(str + ret, size - ret, "PLC ");
+       if (portsc & PORT_CEC)
+               ret += snprintf(str + ret, size - ret, "CEC ");
+       ret += snprintf(str + ret, size - ret, "Wake: ");
+       if (portsc & PORT_WKCONN_E)
+               ret += snprintf(str + ret, size - ret, "WCE ");
+       if (portsc & PORT_WKDISC_E)
+               ret += snprintf(str + ret, size - ret, "WDE ");
+
+       return str;
+}
+
+static inline const char *cdnsp_ep_state_string(u8 state)
+{
+       switch (state) {
+       case EP_STATE_DISABLED:
+               return "disabled";
+       case EP_STATE_RUNNING:
+               return "running";
+       case EP_STATE_HALTED:
+               return "halted";
+       case EP_STATE_STOPPED:
+               return "stopped";
+       case EP_STATE_ERROR:
+               return "error";
+       default:
+               return "INVALID";
+       }
+}
+
+static inline const char *cdnsp_ep_type_string(u8 type)
+{
+       switch (type) {
+       case ISOC_OUT_EP:
+               return "Isoc OUT";
+       case BULK_OUT_EP:
+               return "Bulk OUT";
+       case INT_OUT_EP:
+               return "Int OUT";
+       case CTRL_EP:
+               return "Ctrl";
+       case ISOC_IN_EP:
+               return "Isoc IN";
+       case BULK_IN_EP:
+               return "Bulk IN";
+       case INT_IN_EP:
+               return "Int IN";
+       default:
+               return "INVALID";
+       }
+}
+
+static inline const char *cdnsp_decode_ep_context(char *str, size_t size,
+                                                 u32 info, u32 info2,
+                                                 u64 deq, u32 tx_info)
+{
+       u8 max_pstr, ep_state, interval, ep_type, burst, cerr, mult;
+       bool lsa, hid;
+       u16 maxp, avg;
+       u32 esit;
+       int ret;
+
+       esit = CTX_TO_MAX_ESIT_PAYLOAD_HI(info) << 16 |
+              CTX_TO_MAX_ESIT_PAYLOAD_LO(tx_info);
+
+       ep_state = info & EP_STATE_MASK;
+       max_pstr = CTX_TO_EP_MAXPSTREAMS(info);
+       interval = CTX_TO_EP_INTERVAL(info);
+       mult = CTX_TO_EP_MULT(info) + 1;
+       lsa = !!(info & EP_HAS_LSA);
+
+       cerr = (info2 & (3 << 1)) >> 1;
+       ep_type = CTX_TO_EP_TYPE(info2);
+       hid = !!(info2 & (1 << 7));
+       burst = CTX_TO_MAX_BURST(info2);
+       maxp = MAX_PACKET_DECODED(info2);
+
+       avg = EP_AVG_TRB_LENGTH(tx_info);
+
+       ret = snprintf(str, size, "State %s mult %d max P. Streams %d %s",
+                      cdnsp_ep_state_string(ep_state), mult,
+                      max_pstr, lsa ? "LSA " : "");
+
+       ret += snprintf(str + ret, size - ret,
+                       "interval %d us max ESIT payload %d CErr %d ",
+                       (1 << interval) * 125, esit, cerr);
+
+       ret += snprintf(str + ret, size - ret,
+                       "Type %s %sburst %d maxp %d deq %016llx ",
+                       cdnsp_ep_type_string(ep_type), hid ? "HID" : "",
+                       burst, maxp, deq);
+
+       ret += snprintf(str + ret, size - ret, "avg trb len %d", avg);
+
+       return str;
+}
+
+#endif /*__LINUX_CDNSP_DEBUG*/
diff --git a/drivers/usb/cdns3/cdnsp-ep0.c b/drivers/usb/cdns3/cdnsp-ep0.c
new file mode 100644 (file)
index 0000000..e2b1bcb
--- /dev/null
@@ -0,0 +1,494 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#include <linux/usb/composite.h>
+#include <linux/usb/gadget.h>
+#include <linux/list.h>
+
+#include "cdnsp-gadget.h"
+#include "cdnsp-trace.h"
+
+static void cdnsp_ep0_stall(struct cdnsp_device *pdev)
+{
+       struct cdnsp_request *preq;
+       struct cdnsp_ep *pep;
+
+       pep = &pdev->eps[0];
+       preq = next_request(&pep->pending_list);
+
+       if (pdev->three_stage_setup) {
+               trace_cdnsp_ep0_data_stage("send stall");
+               cdnsp_halt_endpoint(pdev, pep, true);
+
+               if (preq)
+                       cdnsp_gadget_giveback(pep, preq, -ECONNRESET);
+       } else {
+               trace_cdnsp_ep0_status_stage("send stall");
+               pep->ep_state |= EP0_HALTED_STATUS;
+
+               if (preq)
+                       list_del(&preq->list);
+
+               cdnsp_status_stage(pdev);
+       }
+}
+
+static int cdnsp_ep0_delegate_req(struct cdnsp_device *pdev,
+                                 struct usb_ctrlrequest *ctrl)
+{
+       int ret;
+
+       trace_cdnsp_ep0_request("delagete");
+
+       spin_unlock(&pdev->lock);
+       ret = pdev->gadget_driver->setup(&pdev->gadget, ctrl);
+       spin_lock(&pdev->lock);
+
+       return ret;
+}
+
+static int cdnsp_ep0_set_config(struct cdnsp_device *pdev,
+                               struct usb_ctrlrequest *ctrl)
+{
+       enum usb_device_state state = pdev->gadget.state;
+       u32 cfg;
+       int ret;
+
+       cfg = le16_to_cpu(ctrl->wValue);
+
+       switch (state) {
+       case USB_STATE_ADDRESS:
+               trace_cdnsp_ep0_set_config("from Address state");
+               break;
+       case USB_STATE_CONFIGURED:
+               trace_cdnsp_ep0_set_config("from Configured state");
+               break;
+       default:
+               dev_err(pdev->dev, "Set Configuration - bad device state\n");
+               return -EINVAL;
+       }
+
+       ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+       if (ret)
+               return ret;
+
+       if (!cfg)
+               usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS);
+
+       return 0;
+}
+
+static int cdnsp_ep0_set_address(struct cdnsp_device *pdev,
+                                struct usb_ctrlrequest *ctrl)
+{
+       enum usb_device_state state = pdev->gadget.state;
+       struct cdnsp_slot_ctx *slot_ctx;
+       unsigned int slot_state;
+       int ret;
+       u32 addr;
+
+       addr = le16_to_cpu(ctrl->wValue);
+
+       if (addr > 127) {
+               dev_err(pdev->dev, "Invalid device address %d\n", addr);
+               return -EINVAL;
+       }
+
+       slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
+
+       if (state == USB_STATE_CONFIGURED) {
+               dev_err(pdev->dev, "Can't Set Address from Configured State\n");
+               return -EINVAL;
+       }
+
+       pdev->device_address = le16_to_cpu(ctrl->wValue);
+
+       slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
+       slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
+       if (slot_state == SLOT_STATE_ADDRESSED)
+               cdnsp_reset_device(pdev);
+
+       /*set device address*/
+       ret = cdnsp_setup_device(pdev, SETUP_CONTEXT_ADDRESS);
+       if (ret)
+               return ret;
+
+       if (addr)
+               usb_gadget_set_state(&pdev->gadget, USB_STATE_ADDRESS);
+       else
+               usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT);
+
+       return 0;
+}
+
+int cdnsp_status_stage(struct cdnsp_device *pdev)
+{
+       trace_cdnsp_ep0_status_stage("preparing");
+       pdev->ep0_stage = CDNSP_STATUS_STAGE;
+       pdev->ep0_preq.request.length = 0;
+
+       return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq);
+}
+
+static int cdnsp_w_index_to_ep_index(u16 wIndex)
+{
+       if (!(wIndex & USB_ENDPOINT_NUMBER_MASK))
+               return 0;
+
+       return ((wIndex & USB_ENDPOINT_NUMBER_MASK) * 2) +
+               (wIndex & USB_ENDPOINT_DIR_MASK ? 1 : 0) - 1;
+}
+
+static int cdnsp_ep0_handle_status(struct cdnsp_device *pdev,
+                                  struct usb_ctrlrequest *ctrl)
+{
+       struct cdnsp_ep *pep;
+       __le16 *response;
+       int ep_sts = 0;
+       u16 status = 0;
+       u32 recipient;
+
+       recipient = ctrl->bRequestType & USB_RECIP_MASK;
+
+       switch (recipient) {
+       case USB_RECIP_DEVICE:
+               status = pdev->gadget.is_selfpowered;
+               status |= pdev->may_wakeup << USB_DEVICE_REMOTE_WAKEUP;
+
+               if (pdev->gadget.speed >= USB_SPEED_SUPER) {
+                       status |= pdev->u1_allowed << USB_DEV_STAT_U1_ENABLED;
+                       status |= pdev->u2_allowed << USB_DEV_STAT_U2_ENABLED;
+               }
+               break;
+       case USB_RECIP_INTERFACE:
+               /*
+                * Function Remote Wake Capable D0
+                * Function Remote Wakeup       D1
+                */
+               return cdnsp_ep0_delegate_req(pdev, ctrl);
+       case USB_RECIP_ENDPOINT:
+               ep_sts = cdnsp_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex));
+               pep = &pdev->eps[ep_sts];
+               ep_sts = GET_EP_CTX_STATE(pep->out_ctx);
+
+               /* check if endpoint is stalled */
+               if (ep_sts == EP_STATE_HALTED)
+                       status =  BIT(USB_ENDPOINT_HALT);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       response = (__le16 *)pdev->setup_buf;
+       *response = cpu_to_le16(status);
+
+       pdev->ep0_preq.request.length = sizeof(*response);
+       pdev->ep0_preq.request.buf = pdev->setup_buf;
+
+       return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq);
+}
+
+static void cdnsp_enter_test_mode(struct cdnsp_device *pdev)
+{
+       u32 temp;
+
+       temp = readl(&pdev->active_port->regs->portpmsc) & ~GENMASK(31, 28);
+       temp |= PORT_TEST_MODE(pdev->test_mode);
+       writel(temp, &pdev->active_port->regs->portpmsc);
+}
+
+static int cdnsp_ep0_handle_feature_device(struct cdnsp_device *pdev,
+                                          struct usb_ctrlrequest *ctrl,
+                                          int set)
+{
+       enum usb_device_state state;
+       enum usb_device_speed speed;
+       u16 tmode;
+
+       state = pdev->gadget.state;
+       speed = pdev->gadget.speed;
+
+       switch (le16_to_cpu(ctrl->wValue)) {
+       case USB_DEVICE_REMOTE_WAKEUP:
+               pdev->may_wakeup = !!set;
+               trace_cdnsp_may_wakeup(set);
+               break;
+       case USB_DEVICE_U1_ENABLE:
+               if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER)
+                       return -EINVAL;
+
+               pdev->u1_allowed = !!set;
+               trace_cdnsp_u1(set);
+               break;
+       case USB_DEVICE_U2_ENABLE:
+               if (state != USB_STATE_CONFIGURED || speed < USB_SPEED_SUPER)
+                       return -EINVAL;
+
+               pdev->u2_allowed = !!set;
+               trace_cdnsp_u2(set);
+               break;
+       case USB_DEVICE_LTM_ENABLE:
+               return -EINVAL;
+       case USB_DEVICE_TEST_MODE:
+               if (state != USB_STATE_CONFIGURED || speed > USB_SPEED_HIGH)
+                       return -EINVAL;
+
+               tmode = le16_to_cpu(ctrl->wIndex);
+
+               if (!set || (tmode & 0xff) != 0)
+                       return -EINVAL;
+
+               tmode = tmode >> 8;
+
+               if (tmode > USB_TEST_FORCE_ENABLE || tmode < USB_TEST_J)
+                       return -EINVAL;
+
+               pdev->test_mode = tmode;
+
+               /*
+                * Test mode must be set before Status Stage but controller
+                * will start testing sequence after Status Stage.
+                */
+               cdnsp_enter_test_mode(pdev);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int cdnsp_ep0_handle_feature_intf(struct cdnsp_device *pdev,
+                                        struct usb_ctrlrequest *ctrl,
+                                        int set)
+{
+       u16 wValue, wIndex;
+       int ret;
+
+       wValue = le16_to_cpu(ctrl->wValue);
+       wIndex = le16_to_cpu(ctrl->wIndex);
+
+       switch (wValue) {
+       case USB_INTRF_FUNC_SUSPEND:
+               ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+               if (ret)
+                       return ret;
+
+               /*
+                * Remote wakeup is enabled when any function within a device
+                * is enabled for function remote wakeup.
+                */
+               if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
+                       pdev->may_wakeup++;
+               else
+                       if (pdev->may_wakeup > 0)
+                               pdev->may_wakeup--;
+
+               return 0;
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int cdnsp_ep0_handle_feature_endpoint(struct cdnsp_device *pdev,
+                                            struct usb_ctrlrequest *ctrl,
+                                            int set)
+{
+       struct cdnsp_ep *pep;
+       u16 wValue;
+
+       wValue = le16_to_cpu(ctrl->wValue);
+       pep = &pdev->eps[cdnsp_w_index_to_ep_index(le16_to_cpu(ctrl->wIndex))];
+
+       switch (wValue) {
+       case USB_ENDPOINT_HALT:
+               if (!set && (pep->ep_state & EP_WEDGE)) {
+                       /* Resets Sequence Number */
+                       cdnsp_halt_endpoint(pdev, pep, 0);
+                       cdnsp_halt_endpoint(pdev, pep, 1);
+                       break;
+               }
+
+               return cdnsp_halt_endpoint(pdev, pep, set);
+       default:
+               dev_warn(pdev->dev, "WARN Incorrect wValue %04x\n", wValue);
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+static int cdnsp_ep0_handle_feature(struct cdnsp_device *pdev,
+                                   struct usb_ctrlrequest *ctrl,
+                                   int set)
+{
+       switch (ctrl->bRequestType & USB_RECIP_MASK) {
+       case USB_RECIP_DEVICE:
+               return cdnsp_ep0_handle_feature_device(pdev, ctrl, set);
+       case USB_RECIP_INTERFACE:
+               return cdnsp_ep0_handle_feature_intf(pdev, ctrl, set);
+       case USB_RECIP_ENDPOINT:
+               return cdnsp_ep0_handle_feature_endpoint(pdev, ctrl, set);
+       default:
+               return -EINVAL;
+       }
+}
+
+static int cdnsp_ep0_set_sel(struct cdnsp_device *pdev,
+                            struct usb_ctrlrequest *ctrl)
+{
+       enum usb_device_state state = pdev->gadget.state;
+       u16 wLength;
+
+       if (state == USB_STATE_DEFAULT)
+               return -EINVAL;
+
+       wLength = le16_to_cpu(ctrl->wLength);
+
+       if (wLength != 6) {
+               dev_err(pdev->dev, "Set SEL should be 6 bytes, got %d\n",
+                       wLength);
+               return -EINVAL;
+       }
+
+       /*
+        * To handle Set SEL we need to receive 6 bytes from Host. So let's
+        * queue a usb_request for 6 bytes.
+        */
+       pdev->ep0_preq.request.length = 6;
+       pdev->ep0_preq.request.buf = pdev->setup_buf;
+
+       return cdnsp_ep_enqueue(pdev->ep0_preq.pep, &pdev->ep0_preq);
+}
+
+static int cdnsp_ep0_set_isoch_delay(struct cdnsp_device *pdev,
+                                    struct usb_ctrlrequest *ctrl)
+{
+       if (le16_to_cpu(ctrl->wIndex) || le16_to_cpu(ctrl->wLength))
+               return -EINVAL;
+
+       pdev->gadget.isoch_delay = le16_to_cpu(ctrl->wValue);
+
+       return 0;
+}
+
+static int cdnsp_ep0_std_request(struct cdnsp_device *pdev,
+                                struct usb_ctrlrequest *ctrl)
+{
+       int ret;
+
+       switch (ctrl->bRequest) {
+       case USB_REQ_GET_STATUS:
+               ret = cdnsp_ep0_handle_status(pdev, ctrl);
+               break;
+       case USB_REQ_CLEAR_FEATURE:
+               ret = cdnsp_ep0_handle_feature(pdev, ctrl, 0);
+               break;
+       case USB_REQ_SET_FEATURE:
+               ret = cdnsp_ep0_handle_feature(pdev, ctrl, 1);
+               break;
+       case USB_REQ_SET_ADDRESS:
+               ret = cdnsp_ep0_set_address(pdev, ctrl);
+               break;
+       case USB_REQ_SET_CONFIGURATION:
+               ret = cdnsp_ep0_set_config(pdev, ctrl);
+               break;
+       case USB_REQ_SET_SEL:
+               ret = cdnsp_ep0_set_sel(pdev, ctrl);
+               break;
+       case USB_REQ_SET_ISOCH_DELAY:
+               ret = cdnsp_ep0_set_isoch_delay(pdev, ctrl);
+               break;
+       case USB_REQ_SET_INTERFACE:
+               /*
+                * Add request into pending list to block sending status stage
+                * by libcomposite.
+                */
+               list_add_tail(&pdev->ep0_preq.list,
+                             &pdev->ep0_preq.pep->pending_list);
+
+               ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+               if (ret == -EBUSY)
+                       ret = 0;
+
+               list_del(&pdev->ep0_preq.list);
+               break;
+       default:
+               ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+               break;
+       }
+
+       return ret;
+}
+
+void cdnsp_setup_analyze(struct cdnsp_device *pdev)
+{
+       struct usb_ctrlrequest *ctrl = &pdev->setup;
+       int ret = 0;
+       u16 len;
+
+       trace_cdnsp_ctrl_req(ctrl);
+
+       if (!pdev->gadget_driver)
+               goto out;
+
+       if (pdev->gadget.state == USB_STATE_NOTATTACHED) {
+               dev_err(pdev->dev, "ERR: Setup detected in unattached state\n");
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* Restore the ep0 to Stopped/Running state. */
+       if (pdev->eps[0].ep_state & EP_HALTED) {
+               trace_cdnsp_ep0_halted("Restore to normal state");
+               cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0);
+       }
+
+       /*
+        * Finishing previous SETUP transfer by removing request from
+        * list and informing upper layer
+        */
+       if (!list_empty(&pdev->eps[0].pending_list)) {
+               struct cdnsp_request    *req;
+
+               trace_cdnsp_ep0_request("Remove previous");
+               req = next_request(&pdev->eps[0].pending_list);
+               cdnsp_ep_dequeue(&pdev->eps[0], req);
+       }
+
+       len = le16_to_cpu(ctrl->wLength);
+       if (!len) {
+               pdev->three_stage_setup = false;
+               pdev->ep0_expect_in = false;
+       } else {
+               pdev->three_stage_setup = true;
+               pdev->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
+       }
+
+       if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
+               ret = cdnsp_ep0_std_request(pdev, ctrl);
+       else
+               ret = cdnsp_ep0_delegate_req(pdev, ctrl);
+
+       if (!len)
+               pdev->ep0_stage = CDNSP_STATUS_STAGE;
+
+       if (ret == USB_GADGET_DELAYED_STATUS) {
+               trace_cdnsp_ep0_status_stage("delayed");
+               return;
+       }
+out:
+       if (ret < 0)
+               cdnsp_ep0_stall(pdev);
+       else if (pdev->ep0_stage == CDNSP_STATUS_STAGE)
+               cdnsp_status_stage(pdev);
+}
diff --git a/drivers/usb/cdns3/cdnsp-gadget.c b/drivers/usb/cdns3/cdnsp-gadget.c
new file mode 100644 (file)
index 0000000..f28f150
--- /dev/null
@@ -0,0 +1,2011 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/dma-mapping.h>
+#include <linux/module.h>
+#include <linux/iopoll.h>
+#include <linux/delay.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/irq.h>
+#include <linux/dmi.h>
+
+#include "core.h"
+#include "gadget-export.h"
+#include "drd.h"
+#include "cdnsp-gadget.h"
+#include "cdnsp-trace.h"
+
+unsigned int cdnsp_port_speed(unsigned int port_status)
+{
+       /*Detect gadget speed based on PORTSC register*/
+       if (DEV_SUPERSPEEDPLUS(port_status))
+               return USB_SPEED_SUPER_PLUS;
+       else if (DEV_SUPERSPEED(port_status))
+               return USB_SPEED_SUPER;
+       else if (DEV_HIGHSPEED(port_status))
+               return USB_SPEED_HIGH;
+       else if (DEV_FULLSPEED(port_status))
+               return USB_SPEED_FULL;
+
+       /* If device is detached then speed will be USB_SPEED_UNKNOWN.*/
+       return USB_SPEED_UNKNOWN;
+}
+
+/*
+ * Given a port state, this function returns a value that would result in the
+ * port being in the same state, if the value was written to the port status
+ * control register.
+ * Save Read Only (RO) bits and save read/write bits where
+ * writing a 0 clears the bit and writing a 1 sets the bit (RWS).
+ * For all other types (RW1S, RW1CS, RW, and RZ), writing a '0' has no effect.
+ */
+u32 cdnsp_port_state_to_neutral(u32 state)
+{
+       /* Save read-only status and port state. */
+       return (state & CDNSP_PORT_RO) | (state & CDNSP_PORT_RWS);
+}
+
+/**
+ * Find the offset of the extended capabilities with capability ID id.
+ * @base: PCI MMIO registers base address.
+ * @start: Address at which to start looking, (0 or HCC_PARAMS to start at
+ *         beginning of list)
+ * @id: Extended capability ID to search for.
+ *
+ * Returns the offset of the next matching extended capability structure.
+ * Some capabilities can occur several times,
+ * e.g., the EXT_CAPS_PROTOCOL, and this provides a way to find them all.
+ */
+int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id)
+{
+       u32 offset = start;
+       u32 next;
+       u32 val;
+
+       if (!start || start == HCC_PARAMS_OFFSET) {
+               val = readl(base + HCC_PARAMS_OFFSET);
+               if (val == ~0)
+                       return 0;
+
+               offset = HCC_EXT_CAPS(val) << 2;
+               if (!offset)
+                       return 0;
+       };
+
+       do {
+               val = readl(base + offset);
+               if (val == ~0)
+                       return 0;
+
+               if (EXT_CAPS_ID(val) == id && offset != start)
+                       return offset;
+
+               next = EXT_CAPS_NEXT(val);
+               offset += next << 2;
+       } while (next);
+
+       return 0;
+}
+
+void cdnsp_set_link_state(struct cdnsp_device *pdev,
+                         __le32 __iomem *port_regs,
+                         u32 link_state)
+{
+       int port_num = 0xFF;
+       u32 temp;
+
+       temp = readl(port_regs);
+       temp = cdnsp_port_state_to_neutral(temp);
+       temp |= PORT_WKCONN_E | PORT_WKDISC_E;
+       writel(temp, port_regs);
+
+       temp &= ~PORT_PLS_MASK;
+       temp |= PORT_LINK_STROBE | link_state;
+
+       if (pdev->active_port)
+               port_num = pdev->active_port->port_num;
+
+       trace_cdnsp_handle_port_status(port_num, readl(port_regs));
+       writel(temp, port_regs);
+       trace_cdnsp_link_state_changed(port_num, readl(port_regs));
+}
+
+static void cdnsp_disable_port(struct cdnsp_device *pdev,
+                              __le32 __iomem *port_regs)
+{
+       u32 temp = cdnsp_port_state_to_neutral(readl(port_regs));
+
+       writel(temp | PORT_PED, port_regs);
+}
+
+static void cdnsp_clear_port_change_bit(struct cdnsp_device *pdev,
+                                       __le32 __iomem *port_regs)
+{
+       u32 portsc = readl(port_regs);
+
+       writel(cdnsp_port_state_to_neutral(portsc) |
+              (portsc & PORT_CHANGE_BITS), port_regs);
+}
+
+static void cdnsp_set_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
+{
+       __le32 __iomem *reg;
+       void __iomem *base;
+       u32 offset = 0;
+
+       base = &pdev->cap_regs->hc_capbase;
+       offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
+       reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
+
+       bit = readl(reg) | bit;
+       writel(bit, reg);
+}
+
+static void cdnsp_clear_chicken_bits_2(struct cdnsp_device *pdev, u32 bit)
+{
+       __le32 __iomem *reg;
+       void __iomem *base;
+       u32 offset = 0;
+
+       base = &pdev->cap_regs->hc_capbase;
+       offset = cdnsp_find_next_ext_cap(base, offset, D_XEC_PRE_REGS_CAP);
+       reg = base + offset + REG_CHICKEN_BITS_2_OFFSET;
+
+       bit = readl(reg) & ~bit;
+       writel(bit, reg);
+}
+
+/*
+ * Disable interrupts and begin the controller halting process.
+ */
+static void cdnsp_quiesce(struct cdnsp_device *pdev)
+{
+       u32 halted;
+       u32 mask;
+       u32 cmd;
+
+       mask = ~(u32)(CDNSP_IRQS);
+
+       halted = readl(&pdev->op_regs->status) & STS_HALT;
+       if (!halted)
+               mask &= ~(CMD_R_S | CMD_DEVEN);
+
+       cmd = readl(&pdev->op_regs->command);
+       cmd &= mask;
+       writel(cmd, &pdev->op_regs->command);
+}
+
+/*
+ * Force controller into halt state.
+ *
+ * Disable any IRQs and clear the run/stop bit.
+ * Controller will complete any current and actively pipelined transactions, and
+ * should halt within 16 ms of the run/stop bit being cleared.
+ * Read controller Halted bit in the status register to see when the
+ * controller is finished.
+ */
+int cdnsp_halt(struct cdnsp_device *pdev)
+{
+       int ret;
+       u32 val;
+
+       cdnsp_quiesce(pdev);
+
+       ret = readl_poll_timeout_atomic(&pdev->op_regs->status, val,
+                                       val & STS_HALT, 1,
+                                       CDNSP_MAX_HALT_USEC);
+       if (ret) {
+               dev_err(pdev->dev, "ERROR: Device halt failed\n");
+               return ret;
+       }
+
+       pdev->cdnsp_state |= CDNSP_STATE_HALTED;
+
+       return 0;
+}
+
+/*
+ * device controller died, register read returns 0xffffffff, or command never
+ * ends.
+ */
+void cdnsp_died(struct cdnsp_device *pdev)
+{
+       dev_err(pdev->dev, "ERROR: CDNSP controller not responding\n");
+       pdev->cdnsp_state |= CDNSP_STATE_DYING;
+       cdnsp_halt(pdev);
+}
+
+/*
+ * Set the run bit and wait for the device to be running.
+ */
+static int cdnsp_start(struct cdnsp_device *pdev)
+{
+       u32 temp;
+       int ret;
+
+       temp = readl(&pdev->op_regs->command);
+       temp |= (CMD_R_S | CMD_DEVEN);
+       writel(temp, &pdev->op_regs->command);
+
+       trace_cdnsp_init("Turn on controller");
+
+       pdev->cdnsp_state = 0;
+
+       /*
+        * Wait for the STS_HALT Status bit to be 0 to indicate the device is
+        * running.
+        */
+       ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
+                                       !(temp & STS_HALT), 1,
+                                       CDNSP_MAX_HALT_USEC);
+       if (ret) {
+               pdev->cdnsp_state = CDNSP_STATE_DYING;
+               dev_err(pdev->dev, "ERROR: Controller run failed\n");
+       }
+
+       return ret;
+}
+
+/*
+ * Reset a halted controller.
+ *
+ * This resets pipelines, timers, counters, state machines, etc.
+ * Transactions will be terminated immediately, and operational registers
+ * will be set to their defaults.
+ */
+int cdnsp_reset(struct cdnsp_device *pdev)
+{
+       u32 command;
+       u32 temp;
+       int ret;
+
+       temp = readl(&pdev->op_regs->status);
+
+       if (temp == ~(u32)0) {
+               dev_err(pdev->dev, "Device not accessible, reset failed.\n");
+               return -ENODEV;
+       }
+
+       if ((temp & STS_HALT) == 0) {
+               dev_err(pdev->dev, "Controller not halted, aborting reset.\n");
+               return -EINVAL;
+       }
+
+       command = readl(&pdev->op_regs->command);
+       command |= CMD_RESET;
+       writel(command, &pdev->op_regs->command);
+
+       ret = readl_poll_timeout_atomic(&pdev->op_regs->command, temp,
+                                       !(temp & CMD_RESET), 1,
+                                       10 * 1000);
+       if (ret) {
+               dev_err(pdev->dev, "ERROR: Controller reset failed\n");
+               return ret;
+       }
+
+       /*
+        * CDNSP cannot write any doorbells or operational registers other
+        * than status until the "Controller Not Ready" flag is cleared.
+        */
+       ret = readl_poll_timeout_atomic(&pdev->op_regs->status, temp,
+                                       !(temp & STS_CNR), 1,
+                                       10 * 1000);
+
+       if (ret) {
+               dev_err(pdev->dev, "ERROR: Controller not ready to work\n");
+               return ret;
+       }
+
+       dev_dbg(pdev->dev, "Controller ready to work");
+
+       return ret;
+}
+
+/*
+ * cdnsp_get_endpoint_index - Find the index for an endpoint given its
+ * descriptor.Use the return value to right shift 1 for the bitmask.
+ *
+ * Index = (epnum * 2) + direction - 1,
+ * where direction = 0 for OUT, 1 for IN.
+ * For control endpoints, the IN index is used (OUT index is unused), so
+ * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
+ */
+static unsigned int
+       cdnsp_get_endpoint_index(const struct usb_endpoint_descriptor *desc)
+{
+       unsigned int index = (unsigned int)usb_endpoint_num(desc);
+
+       if (usb_endpoint_xfer_control(desc))
+               return index * 2;
+
+       return (index * 2) + (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
+}
+
+/*
+ * Find the flag for this endpoint (for use in the control context). Use the
+ * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
+ * bit 1, etc.
+ */
+static unsigned int
+       cdnsp_get_endpoint_flag(const struct usb_endpoint_descriptor *desc)
+{
+       return 1 << (cdnsp_get_endpoint_index(desc) + 1);
+}
+
+int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
+{
+       struct cdnsp_device *pdev = pep->pdev;
+       struct usb_request *request;
+       int ret;
+
+       if (preq->epnum == 0 && !list_empty(&pep->pending_list)) {
+               trace_cdnsp_request_enqueue_busy(preq);
+               return -EBUSY;
+       }
+
+       request = &preq->request;
+       request->actual = 0;
+       request->status = -EINPROGRESS;
+       preq->direction = pep->direction;
+       preq->epnum = pep->number;
+       preq->td.drbl = 0;
+
+       ret = usb_gadget_map_request_by_dev(pdev->dev, request, pep->direction);
+       if (ret) {
+               trace_cdnsp_request_enqueue_error(preq);
+               return ret;
+       }
+
+       list_add_tail(&preq->list, &pep->pending_list);
+
+       trace_cdnsp_request_enqueue(preq);
+
+       switch (usb_endpoint_type(pep->endpoint.desc)) {
+       case USB_ENDPOINT_XFER_CONTROL:
+               ret = cdnsp_queue_ctrl_tx(pdev, preq);
+               break;
+       case USB_ENDPOINT_XFER_BULK:
+       case USB_ENDPOINT_XFER_INT:
+               ret = cdnsp_queue_bulk_tx(pdev, preq);
+               break;
+       case USB_ENDPOINT_XFER_ISOC:
+               ret = cdnsp_queue_isoc_tx_prepare(pdev, preq);
+       }
+
+       if (ret)
+               goto unmap;
+
+       return 0;
+
+unmap:
+       usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
+                                       pep->direction);
+       list_del(&preq->list);
+       trace_cdnsp_request_enqueue_error(preq);
+
+       return ret;
+}
+
+/*
+ * Remove the request's TD from the endpoint ring. This may cause the
+ * controller to stop USB transfers, potentially stopping in the middle of a
+ * TRB buffer. The controller should pick up where it left off in the TD,
+ * unless a Set Transfer Ring Dequeue Pointer is issued.
+ *
+ * The TRBs that make up the buffers for the canceled request will be "removed"
+ * from the ring. Since the ring is a contiguous structure, they can't be
+ * physically removed. Instead, there are two options:
+ *
+ *  1) If the controller is in the middle of processing the request to be
+ *     canceled, we simply move the ring's dequeue pointer past those TRBs
+ *     using the Set Transfer Ring Dequeue Pointer command. This will be
+ *     the common case, when drivers timeout on the last submitted request
+ *     and attempt to cancel.
+ *
+ *  2) If the controller is in the middle of a different TD, we turn the TRBs
+ *     into a series of 1-TRB transfer no-op TDs. No-ops shouldn't be chained.
+ *     The controller will need to invalidate the any TRBs it has cached after
+ *     the stop endpoint command.
+ *
+ *  3) The TD may have completed by the time the Stop Endpoint Command
+ *     completes, so software needs to handle that case too.
+ *
+ */
+int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq)
+{
+       struct cdnsp_device *pdev = pep->pdev;
+       int ret;
+
+       trace_cdnsp_request_dequeue(preq);
+
+       if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_RUNNING) {
+               ret = cdnsp_cmd_stop_ep(pdev, pep);
+               if (ret)
+                       return ret;
+       }
+
+       return cdnsp_remove_request(pdev, preq, pep);
+}
+
+static void cdnsp_zero_in_ctx(struct cdnsp_device *pdev)
+{
+       struct cdnsp_input_control_ctx *ctrl_ctx;
+       struct cdnsp_slot_ctx *slot_ctx;
+       struct cdnsp_ep_ctx *ep_ctx;
+       int i;
+
+       ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
+
+       /*
+        * When a device's add flag and drop flag are zero, any subsequent
+        * configure endpoint command will leave that endpoint's state
+        * untouched. Make sure we don't leave any old state in the input
+        * endpoint contexts.
+        */
+       ctrl_ctx->drop_flags = 0;
+       ctrl_ctx->add_flags = 0;
+       slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
+       slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
+
+       /* Endpoint 0 is always valid */
+       slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
+       for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i) {
+               ep_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, i);
+               ep_ctx->ep_info = 0;
+               ep_ctx->ep_info2 = 0;
+               ep_ctx->deq = 0;
+               ep_ctx->tx_info = 0;
+       }
+}
+
+/* Issue a configure endpoint command and wait for it to finish. */
+static int cdnsp_configure_endpoint(struct cdnsp_device *pdev)
+{
+       int ret;
+
+       cdnsp_queue_configure_endpoint(pdev, pdev->cmd.in_ctx->dma);
+       cdnsp_ring_cmd_db(pdev);
+       ret = cdnsp_wait_for_cmd_compl(pdev);
+       if (ret) {
+               dev_err(pdev->dev,
+                       "ERR: unexpected command completion code 0x%x.\n", ret);
+               return -EINVAL;
+       }
+
+       return ret;
+}
+
+static void cdnsp_invalidate_ep_events(struct cdnsp_device *pdev,
+                                      struct cdnsp_ep *pep)
+{
+       struct cdnsp_segment *segment;
+       union cdnsp_trb *event;
+       u32 cycle_state;
+       u32  data;
+
+       event = pdev->event_ring->dequeue;
+       segment = pdev->event_ring->deq_seg;
+       cycle_state = pdev->event_ring->cycle_state;
+
+       while (1) {
+               data = le32_to_cpu(event->trans_event.flags);
+
+               /* Check the owner of the TRB. */
+               if ((data & TRB_CYCLE) != cycle_state)
+                       break;
+
+               if (TRB_FIELD_TO_TYPE(data) == TRB_TRANSFER &&
+                   TRB_TO_EP_ID(data) == (pep->idx + 1)) {
+                       data |= TRB_EVENT_INVALIDATE;
+                       event->trans_event.flags = cpu_to_le32(data);
+               }
+
+               if (cdnsp_last_trb_on_seg(segment, event)) {
+                       cycle_state ^= 1;
+                       segment = pdev->event_ring->deq_seg->next;
+                       event = segment->trbs;
+               } else {
+                       event++;
+               }
+       }
+}
+
+int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev)
+{
+       struct cdnsp_segment *event_deq_seg;
+       union cdnsp_trb *cmd_trb;
+       dma_addr_t cmd_deq_dma;
+       union cdnsp_trb *event;
+       u32 cycle_state;
+       int ret, val;
+       u64 cmd_dma;
+       u32  flags;
+
+       cmd_trb = pdev->cmd.command_trb;
+       pdev->cmd.status = 0;
+
+       trace_cdnsp_cmd_wait_for_compl(pdev->cmd_ring, &cmd_trb->generic);
+
+       ret = readl_poll_timeout_atomic(&pdev->op_regs->cmd_ring, val,
+                                       !CMD_RING_BUSY(val), 1,
+                                       CDNSP_CMD_TIMEOUT);
+       if (ret) {
+               dev_err(pdev->dev, "ERR: Timeout while waiting for command\n");
+               trace_cdnsp_cmd_timeout(pdev->cmd_ring, &cmd_trb->generic);
+               pdev->cdnsp_state = CDNSP_STATE_DYING;
+               return -ETIMEDOUT;
+       }
+
+       event = pdev->event_ring->dequeue;
+       event_deq_seg = pdev->event_ring->deq_seg;
+       cycle_state = pdev->event_ring->cycle_state;
+
+       cmd_deq_dma = cdnsp_trb_virt_to_dma(pdev->cmd_ring->deq_seg, cmd_trb);
+       if (!cmd_deq_dma)
+               return -EINVAL;
+
+       while (1) {
+               flags = le32_to_cpu(event->event_cmd.flags);
+
+               /* Check the owner of the TRB. */
+               if ((flags & TRB_CYCLE) != cycle_state)
+                       return -EINVAL;
+
+               cmd_dma = le64_to_cpu(event->event_cmd.cmd_trb);
+
+               /*
+                * Check whether the completion event is for last queued
+                * command.
+                */
+               if (TRB_FIELD_TO_TYPE(flags) != TRB_COMPLETION ||
+                   cmd_dma != (u64)cmd_deq_dma) {
+                       if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
+                               event++;
+                               continue;
+                       }
+
+                       if (cdnsp_last_trb_on_ring(pdev->event_ring,
+                                                  event_deq_seg, event))
+                               cycle_state ^= 1;
+
+                       event_deq_seg = event_deq_seg->next;
+                       event = event_deq_seg->trbs;
+                       continue;
+               }
+
+               trace_cdnsp_handle_command(pdev->cmd_ring, &cmd_trb->generic);
+
+               pdev->cmd.status = GET_COMP_CODE(le32_to_cpu(event->event_cmd.status));
+               if (pdev->cmd.status == COMP_SUCCESS)
+                       return 0;
+
+               return -pdev->cmd.status;
+       }
+}
+
+int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
+                       struct cdnsp_ep *pep,
+                       int value)
+{
+       int ret;
+
+       trace_cdnsp_ep_halt(value ? "Set" : "Clear");
+
+       if (value) {
+               ret = cdnsp_cmd_stop_ep(pdev, pep);
+               if (ret)
+                       return ret;
+
+               if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_STOPPED) {
+                       cdnsp_queue_halt_endpoint(pdev, pep->idx);
+                       cdnsp_ring_cmd_db(pdev);
+                       ret = cdnsp_wait_for_cmd_compl(pdev);
+               }
+
+               pep->ep_state |= EP_HALTED;
+       } else {
+               /*
+                * In device mode driver can call reset endpoint command
+                * from any endpoint state.
+                */
+               cdnsp_queue_reset_ep(pdev, pep->idx);
+               cdnsp_ring_cmd_db(pdev);
+               ret = cdnsp_wait_for_cmd_compl(pdev);
+               trace_cdnsp_handle_cmd_reset_ep(pep->out_ctx);
+
+               if (ret)
+                       return ret;
+
+               pep->ep_state &= ~EP_HALTED;
+
+               if (pep->idx != 0 && !(pep->ep_state & EP_WEDGE))
+                       cdnsp_ring_doorbell_for_active_rings(pdev, pep);
+
+               pep->ep_state &= ~EP_WEDGE;
+       }
+
+       return 0;
+}
+
+static int cdnsp_update_eps_configuration(struct cdnsp_device *pdev,
+                                         struct cdnsp_ep *pep)
+{
+       struct cdnsp_input_control_ctx *ctrl_ctx;
+       struct cdnsp_slot_ctx *slot_ctx;
+       int ret = 0;
+       u32 ep_sts;
+       int i;
+
+       ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
+
+       /* Don't issue the command if there's no endpoints to update. */
+       if (ctrl_ctx->add_flags == 0 && ctrl_ctx->drop_flags == 0)
+               return 0;
+
+       ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
+       ctrl_ctx->add_flags &= cpu_to_le32(~EP0_FLAG);
+       ctrl_ctx->drop_flags &= cpu_to_le32(~(SLOT_FLAG | EP0_FLAG));
+
+       /* Fix up Context Entries field. Minimum value is EP0 == BIT(1). */
+       slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
+       for (i = CDNSP_ENDPOINTS_NUM; i >= 1; i--) {
+               __le32 le32 = cpu_to_le32(BIT(i));
+
+               if ((pdev->eps[i - 1].ring && !(ctrl_ctx->drop_flags & le32)) ||
+                   (ctrl_ctx->add_flags & le32) || i == 1) {
+                       slot_ctx->dev_info &= cpu_to_le32(~LAST_CTX_MASK);
+                       slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(i));
+                       break;
+               }
+       }
+
+       ep_sts = GET_EP_CTX_STATE(pep->out_ctx);
+
+       if ((ctrl_ctx->add_flags != cpu_to_le32(SLOT_FLAG) &&
+            ep_sts == EP_STATE_DISABLED) ||
+           (ep_sts != EP_STATE_DISABLED && ctrl_ctx->drop_flags))
+               ret = cdnsp_configure_endpoint(pdev);
+
+       trace_cdnsp_configure_endpoint(cdnsp_get_slot_ctx(&pdev->out_ctx));
+       trace_cdnsp_handle_cmd_config_ep(pep->out_ctx);
+
+       cdnsp_zero_in_ctx(pdev);
+
+       return ret;
+}
+
+/*
+ * This submits a Reset Device Command, which will set the device state to 0,
+ * set the device address to 0, and disable all the endpoints except the default
+ * control endpoint. The USB core should come back and call
+ * cdnsp_setup_device(), and then re-set up the configuration.
+ */
+int cdnsp_reset_device(struct cdnsp_device *pdev)
+{
+       struct cdnsp_slot_ctx *slot_ctx;
+       int slot_state;
+       int ret, i;
+
+       slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
+       slot_ctx->dev_info = 0;
+       pdev->device_address = 0;
+
+       /* If device is not setup, there is no point in resetting it. */
+       slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
+       slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
+       trace_cdnsp_reset_device(slot_ctx);
+
+       if (slot_state <= SLOT_STATE_DEFAULT &&
+           pdev->eps[0].ep_state & EP_HALTED) {
+               cdnsp_halt_endpoint(pdev, &pdev->eps[0], 0);
+       }
+
+       /*
+        * During Reset Device command controller shall transition the
+        * endpoint ep0 to the Running State.
+        */
+       pdev->eps[0].ep_state &= ~(EP_STOPPED | EP_HALTED);
+       pdev->eps[0].ep_state |= EP_ENABLED;
+
+       if (slot_state <= SLOT_STATE_DEFAULT)
+               return 0;
+
+       cdnsp_queue_reset_device(pdev);
+       cdnsp_ring_cmd_db(pdev);
+       ret = cdnsp_wait_for_cmd_compl(pdev);
+
+       /*
+        * After Reset Device command all not default endpoints
+        * are in Disabled state.
+        */
+       for (i = 1; i < CDNSP_ENDPOINTS_NUM; ++i)
+               pdev->eps[i].ep_state |= EP_STOPPED;
+
+       trace_cdnsp_handle_cmd_reset_dev(slot_ctx);
+
+       if (ret)
+               dev_err(pdev->dev, "Reset device failed with error code %d",
+                       ret);
+
+       return ret;
+}
+
+/*
+ * Sets the MaxPStreams field and the Linear Stream Array field.
+ * Sets the dequeue pointer to the stream context array.
+ */
+static void cdnsp_setup_streams_ep_input_ctx(struct cdnsp_device *pdev,
+                                            struct cdnsp_ep_ctx *ep_ctx,
+                                            struct cdnsp_stream_info *stream_info)
+{
+       u32 max_primary_streams;
+
+       /* MaxPStreams is the number of stream context array entries, not the
+        * number we're actually using. Must be in 2^(MaxPstreams + 1) format.
+        * fls(0) = 0, fls(0x1) = 1, fls(0x10) = 2, fls(0x100) = 3, etc.
+        */
+       max_primary_streams = fls(stream_info->num_stream_ctxs) - 2;
+       ep_ctx->ep_info &= cpu_to_le32(~EP_MAXPSTREAMS_MASK);
+       ep_ctx->ep_info |= cpu_to_le32(EP_MAXPSTREAMS(max_primary_streams)
+                                      | EP_HAS_LSA);
+       ep_ctx->deq  = cpu_to_le64(stream_info->ctx_array_dma);
+}
+
+/*
+ * The drivers use this function to prepare a bulk endpoints to use streams.
+ *
+ * Don't allow the call to succeed if endpoint only supports one stream
+ * (which means it doesn't support streams at all).
+ */
+int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+       unsigned int num_streams = usb_ss_max_streams(pep->endpoint.comp_desc);
+       unsigned int num_stream_ctxs;
+       int ret;
+
+       if (num_streams ==  0)
+               return 0;
+
+       if (num_streams > STREAM_NUM_STREAMS)
+               return -EINVAL;
+
+       /*
+        * Add two to the number of streams requested to account for
+        * stream 0 that is reserved for controller usage and one additional
+        * for TASK SET FULL response.
+        */
+       num_streams += 2;
+
+       /* The stream context array size must be a power of two */
+       num_stream_ctxs = roundup_pow_of_two(num_streams);
+
+       trace_cdnsp_stream_number(pep, num_stream_ctxs, num_streams);
+
+       ret = cdnsp_alloc_stream_info(pdev, pep, num_stream_ctxs, num_streams);
+       if (ret)
+               return ret;
+
+       cdnsp_setup_streams_ep_input_ctx(pdev, pep->in_ctx, &pep->stream_info);
+
+       pep->ep_state |= EP_HAS_STREAMS;
+       pep->stream_info.td_count = 0;
+       pep->stream_info.first_prime_det = 0;
+
+       /* Subtract 1 for stream 0, which drivers can't use. */
+       return num_streams - 1;
+}
+
+int cdnsp_disable_slot(struct cdnsp_device *pdev)
+{
+       int ret;
+
+       cdnsp_queue_slot_control(pdev, TRB_DISABLE_SLOT);
+       cdnsp_ring_cmd_db(pdev);
+       ret = cdnsp_wait_for_cmd_compl(pdev);
+
+       pdev->slot_id = 0;
+       pdev->active_port = NULL;
+
+       trace_cdnsp_handle_cmd_disable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx));
+
+       memset(pdev->in_ctx.bytes, 0, CDNSP_CTX_SIZE);
+       memset(pdev->out_ctx.bytes, 0, CDNSP_CTX_SIZE);
+
+       return ret;
+}
+
+int cdnsp_enable_slot(struct cdnsp_device *pdev)
+{
+       struct cdnsp_slot_ctx *slot_ctx;
+       int slot_state;
+       int ret;
+
+       /* If device is not setup, there is no point in resetting it */
+       slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
+       slot_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
+
+       if (slot_state != SLOT_STATE_DISABLED)
+               return 0;
+
+       cdnsp_queue_slot_control(pdev, TRB_ENABLE_SLOT);
+       cdnsp_ring_cmd_db(pdev);
+       ret = cdnsp_wait_for_cmd_compl(pdev);
+       if (ret)
+               goto show_trace;
+
+       pdev->slot_id = 1;
+
+show_trace:
+       trace_cdnsp_handle_cmd_enable_slot(cdnsp_get_slot_ctx(&pdev->out_ctx));
+
+       return ret;
+}
+
+/*
+ * Issue an Address Device command with BSR=0 if setup is SETUP_CONTEXT_ONLY
+ * or with BSR = 1 if set_address is SETUP_CONTEXT_ADDRESS.
+ */
+int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup)
+{
+       struct cdnsp_input_control_ctx *ctrl_ctx;
+       struct cdnsp_slot_ctx *slot_ctx;
+       int dev_state = 0;
+       int ret;
+
+       if (!pdev->slot_id) {
+               trace_cdnsp_slot_id("incorrect");
+               return -EINVAL;
+       }
+
+       if (!pdev->active_port->port_num)
+               return -EINVAL;
+
+       slot_ctx = cdnsp_get_slot_ctx(&pdev->out_ctx);
+       dev_state = GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state));
+
+       if (setup == SETUP_CONTEXT_ONLY && dev_state == SLOT_STATE_DEFAULT) {
+               trace_cdnsp_slot_already_in_default(slot_ctx);
+               return 0;
+       }
+
+       slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
+       ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
+
+       if (!slot_ctx->dev_info || dev_state == SLOT_STATE_DEFAULT) {
+               ret = cdnsp_setup_addressable_priv_dev(pdev);
+               if (ret)
+                       return ret;
+       }
+
+       cdnsp_copy_ep0_dequeue_into_input_ctx(pdev);
+
+       ctrl_ctx->add_flags = cpu_to_le32(SLOT_FLAG | EP0_FLAG);
+       ctrl_ctx->drop_flags = 0;
+
+       trace_cdnsp_setup_device_slot(slot_ctx);
+
+       cdnsp_queue_address_device(pdev, pdev->in_ctx.dma, setup);
+       cdnsp_ring_cmd_db(pdev);
+       ret = cdnsp_wait_for_cmd_compl(pdev);
+
+       trace_cdnsp_handle_cmd_addr_dev(cdnsp_get_slot_ctx(&pdev->out_ctx));
+
+       /* Zero the input context control for later use. */
+       ctrl_ctx->add_flags = 0;
+       ctrl_ctx->drop_flags = 0;
+
+       return ret;
+}
+
+void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *pdev,
+                                struct usb_request *req,
+                                int enable)
+{
+       if (pdev->active_port != &pdev->usb2_port || !pdev->gadget.lpm_capable)
+               return;
+
+       trace_cdnsp_lpm(enable);
+
+       if (enable)
+               writel(PORT_BESL(CDNSP_DEFAULT_BESL) | PORT_L1S_NYET | PORT_HLE,
+                      &pdev->active_port->regs->portpmsc);
+       else
+               writel(PORT_L1S_NYET, &pdev->active_port->regs->portpmsc);
+}
+
+static int cdnsp_get_frame(struct cdnsp_device *pdev)
+{
+       return readl(&pdev->run_regs->microframe_index) >> 3;
+}
+
+static int cdnsp_gadget_ep_enable(struct usb_ep *ep,
+                                 const struct usb_endpoint_descriptor *desc)
+{
+       struct cdnsp_input_control_ctx *ctrl_ctx;
+       struct cdnsp_device *pdev;
+       struct cdnsp_ep *pep;
+       unsigned long flags;
+       u32 added_ctxs;
+       int ret;
+
+       if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT ||
+           !desc->wMaxPacketSize)
+               return -EINVAL;
+
+       pep = to_cdnsp_ep(ep);
+       pdev = pep->pdev;
+
+       if (dev_WARN_ONCE(pdev->dev, pep->ep_state & EP_ENABLED,
+                         "%s is already enabled\n", pep->name))
+               return 0;
+
+       spin_lock_irqsave(&pdev->lock, flags);
+
+       added_ctxs = cdnsp_get_endpoint_flag(desc);
+       if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
+               dev_err(pdev->dev, "ERROR: Bad endpoint number\n");
+               ret = -EINVAL;
+               goto unlock;
+       }
+
+       pep->interval = desc->bInterval ? BIT(desc->bInterval - 1) : 0;
+
+       if (pdev->gadget.speed == USB_SPEED_FULL) {
+               if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_INT)
+                       pep->interval = desc->bInterval << 3;
+               if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC)
+                       pep->interval = BIT(desc->bInterval - 1) << 3;
+       }
+
+       if (usb_endpoint_type(desc) == USB_ENDPOINT_XFER_ISOC) {
+               if (pep->interval > BIT(12)) {
+                       dev_err(pdev->dev, "bInterval %d not supported\n",
+                               desc->bInterval);
+                       ret = -EINVAL;
+                       goto unlock;
+               }
+               cdnsp_set_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
+       }
+
+       ret = cdnsp_endpoint_init(pdev, pep, GFP_ATOMIC);
+       if (ret)
+               goto unlock;
+
+       ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
+       ctrl_ctx->add_flags = cpu_to_le32(added_ctxs);
+       ctrl_ctx->drop_flags = 0;
+
+       ret = cdnsp_update_eps_configuration(pdev, pep);
+       if (ret) {
+               cdnsp_free_endpoint_rings(pdev, pep);
+               goto unlock;
+       }
+
+       pep->ep_state |= EP_ENABLED;
+       pep->ep_state &= ~EP_STOPPED;
+
+unlock:
+       trace_cdnsp_ep_enable_end(pep, 0);
+       spin_unlock_irqrestore(&pdev->lock, flags);
+
+       return ret;
+}
+
+static int cdnsp_gadget_ep_disable(struct usb_ep *ep)
+{
+       struct cdnsp_input_control_ctx *ctrl_ctx;
+       struct cdnsp_request *preq;
+       struct cdnsp_device *pdev;
+       struct cdnsp_ep *pep;
+       unsigned long flags;
+       u32 drop_flag;
+       int ret = 0;
+
+       if (!ep)
+               return -EINVAL;
+
+       pep = to_cdnsp_ep(ep);
+       pdev = pep->pdev;
+
+       spin_lock_irqsave(&pdev->lock, flags);
+
+       if (!(pep->ep_state & EP_ENABLED)) {
+               dev_err(pdev->dev, "%s is already disabled\n", pep->name);
+               ret = -EINVAL;
+               goto finish;
+       }
+
+       cdnsp_cmd_stop_ep(pdev, pep);
+       pep->ep_state |= EP_DIS_IN_RROGRESS;
+       cdnsp_cmd_flush_ep(pdev, pep);
+
+       /* Remove all queued USB requests. */
+       while (!list_empty(&pep->pending_list)) {
+               preq = next_request(&pep->pending_list);
+               cdnsp_ep_dequeue(pep, preq);
+       }
+
+       cdnsp_invalidate_ep_events(pdev, pep);
+
+       pep->ep_state &= ~EP_DIS_IN_RROGRESS;
+       drop_flag = cdnsp_get_endpoint_flag(pep->endpoint.desc);
+       ctrl_ctx = cdnsp_get_input_control_ctx(&pdev->in_ctx);
+       ctrl_ctx->drop_flags = cpu_to_le32(drop_flag);
+       ctrl_ctx->add_flags = 0;
+
+       cdnsp_endpoint_zero(pdev, pep);
+
+       ret = cdnsp_update_eps_configuration(pdev, pep);
+       cdnsp_free_endpoint_rings(pdev, pep);
+
+       pep->ep_state &= ~EP_ENABLED;
+       pep->ep_state |= EP_STOPPED;
+
+finish:
+       trace_cdnsp_ep_disable_end(pep, 0);
+       spin_unlock_irqrestore(&pdev->lock, flags);
+
+       return ret;
+}
+
+static struct usb_request *cdnsp_gadget_ep_alloc_request(struct usb_ep *ep,
+                                                        gfp_t gfp_flags)
+{
+       struct cdnsp_ep *pep = to_cdnsp_ep(ep);
+       struct cdnsp_request *preq;
+
+       preq = kzalloc(sizeof(*preq), gfp_flags);
+       if (!preq)
+               return NULL;
+
+       preq->epnum = pep->number;
+       preq->pep = pep;
+
+       trace_cdnsp_alloc_request(preq);
+
+       return &preq->request;
+}
+
+static void cdnsp_gadget_ep_free_request(struct usb_ep *ep,
+                                        struct usb_request *request)
+{
+       struct cdnsp_request *preq = to_cdnsp_request(request);
+
+       trace_cdnsp_free_request(preq);
+       kfree(preq);
+}
+
+static int cdnsp_gadget_ep_queue(struct usb_ep *ep,
+                                struct usb_request *request,
+                                gfp_t gfp_flags)
+{
+       struct cdnsp_request *preq;
+       struct cdnsp_device *pdev;
+       struct cdnsp_ep *pep;
+       unsigned long flags;
+       int ret;
+
+       if (!request || !ep)
+               return -EINVAL;
+
+       pep = to_cdnsp_ep(ep);
+       pdev = pep->pdev;
+
+       if (!(pep->ep_state & EP_ENABLED)) {
+               dev_err(pdev->dev, "%s: can't queue to disabled endpoint\n",
+                       pep->name);
+               return -EINVAL;
+       }
+
+       preq = to_cdnsp_request(request);
+       spin_lock_irqsave(&pdev->lock, flags);
+       ret = cdnsp_ep_enqueue(pep, preq);
+       spin_unlock_irqrestore(&pdev->lock, flags);
+
+       return ret;
+}
+
+static int cdnsp_gadget_ep_dequeue(struct usb_ep *ep,
+                                  struct usb_request *request)
+{
+       struct cdnsp_ep *pep = to_cdnsp_ep(ep);
+       struct cdnsp_device *pdev = pep->pdev;
+       unsigned long flags;
+       int ret;
+
+       if (!pep->endpoint.desc) {
+               dev_err(pdev->dev,
+                       "%s: can't dequeue to disabled endpoint\n",
+                       pep->name);
+               return -ESHUTDOWN;
+       }
+
+       spin_lock_irqsave(&pdev->lock, flags);
+       ret = cdnsp_ep_dequeue(pep, to_cdnsp_request(request));
+       spin_unlock_irqrestore(&pdev->lock, flags);
+
+       return ret;
+}
+
+static int cdnsp_gadget_ep_set_halt(struct usb_ep *ep, int value)
+{
+       struct cdnsp_ep *pep = to_cdnsp_ep(ep);
+       struct cdnsp_device *pdev = pep->pdev;
+       struct cdnsp_request *preq;
+       unsigned long flags = 0;
+       int ret;
+
+       spin_lock_irqsave(&pdev->lock, flags);
+
+       preq = next_request(&pep->pending_list);
+       if (value) {
+               if (preq) {
+                       trace_cdnsp_ep_busy_try_halt_again(pep, 0);
+                       ret = -EAGAIN;
+                       goto done;
+               }
+       }
+
+       ret = cdnsp_halt_endpoint(pdev, pep, value);
+
+done:
+       spin_unlock_irqrestore(&pdev->lock, flags);
+       return ret;
+}
+
+static int cdnsp_gadget_ep_set_wedge(struct usb_ep *ep)
+{
+       struct cdnsp_ep *pep = to_cdnsp_ep(ep);
+       struct cdnsp_device *pdev = pep->pdev;
+       unsigned long flags = 0;
+       int ret;
+
+       spin_lock_irqsave(&pdev->lock, flags);
+       pep->ep_state |= EP_WEDGE;
+       ret = cdnsp_halt_endpoint(pdev, pep, 1);
+       spin_unlock_irqrestore(&pdev->lock, flags);
+
+       return ret;
+}
+
+static const struct usb_ep_ops cdnsp_gadget_ep0_ops = {
+       .enable         = cdnsp_gadget_ep_enable,
+       .disable        = cdnsp_gadget_ep_disable,
+       .alloc_request  = cdnsp_gadget_ep_alloc_request,
+       .free_request   = cdnsp_gadget_ep_free_request,
+       .queue          = cdnsp_gadget_ep_queue,
+       .dequeue        = cdnsp_gadget_ep_dequeue,
+       .set_halt       = cdnsp_gadget_ep_set_halt,
+       .set_wedge      = cdnsp_gadget_ep_set_wedge,
+};
+
+static const struct usb_ep_ops cdnsp_gadget_ep_ops = {
+       .enable         = cdnsp_gadget_ep_enable,
+       .disable        = cdnsp_gadget_ep_disable,
+       .alloc_request  = cdnsp_gadget_ep_alloc_request,
+       .free_request   = cdnsp_gadget_ep_free_request,
+       .queue          = cdnsp_gadget_ep_queue,
+       .dequeue        = cdnsp_gadget_ep_dequeue,
+       .set_halt       = cdnsp_gadget_ep_set_halt,
+       .set_wedge      = cdnsp_gadget_ep_set_wedge,
+};
+
+void cdnsp_gadget_giveback(struct cdnsp_ep *pep,
+                          struct cdnsp_request *preq,
+                          int status)
+{
+       struct cdnsp_device *pdev = pep->pdev;
+
+       list_del(&preq->list);
+
+       if (preq->request.status == -EINPROGRESS)
+               preq->request.status = status;
+
+       usb_gadget_unmap_request_by_dev(pdev->dev, &preq->request,
+                                       preq->direction);
+
+       trace_cdnsp_request_giveback(preq);
+
+       if (preq != &pdev->ep0_preq) {
+               spin_unlock(&pdev->lock);
+               usb_gadget_giveback_request(&pep->endpoint, &preq->request);
+               spin_lock(&pdev->lock);
+       }
+}
+
+static struct usb_endpoint_descriptor cdnsp_gadget_ep0_desc = {
+       .bLength =              USB_DT_ENDPOINT_SIZE,
+       .bDescriptorType =      USB_DT_ENDPOINT,
+       .bmAttributes =         USB_ENDPOINT_XFER_CONTROL,
+};
+
+static int cdnsp_run(struct cdnsp_device *pdev,
+                    enum usb_device_speed speed)
+{
+       u32 fs_speed = 0;
+       u64 temp_64;
+       u32 temp;
+       int ret;
+
+       temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
+       temp_64 &= ~ERST_PTR_MASK;
+       temp = readl(&pdev->ir_set->irq_control);
+       temp &= ~IMOD_INTERVAL_MASK;
+       temp |= ((IMOD_DEFAULT_INTERVAL / 250) & IMOD_INTERVAL_MASK);
+       writel(temp, &pdev->ir_set->irq_control);
+
+       temp = readl(&pdev->port3x_regs->mode_addr);
+
+       switch (speed) {
+       case USB_SPEED_SUPER_PLUS:
+               temp |= CFG_3XPORT_SSP_SUPPORT;
+               break;
+       case USB_SPEED_SUPER:
+               temp &= ~CFG_3XPORT_SSP_SUPPORT;
+               break;
+       case USB_SPEED_HIGH:
+               break;
+       case USB_SPEED_FULL:
+               fs_speed = PORT_REG6_FORCE_FS;
+               break;
+       default:
+               dev_err(pdev->dev, "invalid maximum_speed parameter %d\n",
+                       speed);
+               fallthrough;
+       case USB_SPEED_UNKNOWN:
+               /* Default to superspeed. */
+               speed = USB_SPEED_SUPER;
+               break;
+       }
+
+       if (speed >= USB_SPEED_SUPER) {
+               writel(temp, &pdev->port3x_regs->mode_addr);
+               cdnsp_set_link_state(pdev, &pdev->usb3_port.regs->portsc,
+                                    XDEV_RXDETECT);
+       } else {
+               cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
+       }
+
+       cdnsp_set_link_state(pdev, &pdev->usb2_port.regs->portsc,
+                            XDEV_RXDETECT);
+
+       cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+
+       writel(PORT_REG6_L1_L0_HW_EN | fs_speed, &pdev->port20_regs->port_reg6);
+
+       ret = cdnsp_start(pdev);
+       if (ret) {
+               ret = -ENODEV;
+               goto err;
+       }
+
+       temp = readl(&pdev->op_regs->command);
+       temp |= (CMD_INTE);
+       writel(temp, &pdev->op_regs->command);
+
+       temp = readl(&pdev->ir_set->irq_pending);
+       writel(IMAN_IE_SET(temp), &pdev->ir_set->irq_pending);
+
+       trace_cdnsp_init("Controller ready to work");
+       return 0;
+err:
+       cdnsp_halt(pdev);
+       return ret;
+}
+
+static int cdnsp_gadget_udc_start(struct usb_gadget *g,
+                                 struct usb_gadget_driver *driver)
+{
+       enum usb_device_speed max_speed = driver->max_speed;
+       struct cdnsp_device *pdev = gadget_to_cdnsp(g);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&pdev->lock, flags);
+       pdev->gadget_driver = driver;
+
+       /* limit speed if necessary */
+       max_speed = min(driver->max_speed, g->max_speed);
+       ret = cdnsp_run(pdev, max_speed);
+
+       spin_unlock_irqrestore(&pdev->lock, flags);
+
+       return ret;
+}
+
+/*
+ * Update Event Ring Dequeue Pointer:
+ * - When all events have finished
+ * - To avoid "Event Ring Full Error" condition
+ */
+void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev,
+                              union cdnsp_trb *event_ring_deq,
+                              u8 clear_ehb)
+{
+       u64 temp_64;
+       dma_addr_t deq;
+
+       temp_64 = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
+
+       /* If necessary, update the HW's version of the event ring deq ptr. */
+       if (event_ring_deq != pdev->event_ring->dequeue) {
+               deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
+                                           pdev->event_ring->dequeue);
+               temp_64 &= ERST_PTR_MASK;
+               temp_64 |= ((u64)deq & (u64)~ERST_PTR_MASK);
+       }
+
+       /* Clear the event handler busy flag (RW1C). */
+       if (clear_ehb)
+               temp_64 |= ERST_EHB;
+       else
+               temp_64 &= ~ERST_EHB;
+
+       cdnsp_write_64(temp_64, &pdev->ir_set->erst_dequeue);
+}
+
+static void cdnsp_clear_cmd_ring(struct cdnsp_device *pdev)
+{
+       struct cdnsp_segment *seg;
+       u64 val_64;
+       int i;
+
+       cdnsp_initialize_ring_info(pdev->cmd_ring);
+
+       seg = pdev->cmd_ring->first_seg;
+       for (i = 0; i < pdev->cmd_ring->num_segs; i++) {
+               memset(seg->trbs, 0,
+                      sizeof(union cdnsp_trb) * (TRBS_PER_SEGMENT - 1));
+               seg = seg->next;
+       }
+
+       /* Set the address in the Command Ring Control register. */
+       val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
+       val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
+                (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
+                pdev->cmd_ring->cycle_state;
+       cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
+}
+
+static void cdnsp_consume_all_events(struct cdnsp_device *pdev)
+{
+       struct cdnsp_segment *event_deq_seg;
+       union cdnsp_trb *event_ring_deq;
+       union cdnsp_trb *event;
+       u32 cycle_bit;
+
+       event_ring_deq = pdev->event_ring->dequeue;
+       event_deq_seg = pdev->event_ring->deq_seg;
+       event = pdev->event_ring->dequeue;
+
+       /* Update ring dequeue pointer. */
+       while (1) {
+               cycle_bit = (le32_to_cpu(event->event_cmd.flags) & TRB_CYCLE);
+
+               /* Does the controller or driver own the TRB? */
+               if (cycle_bit != pdev->event_ring->cycle_state)
+                       break;
+
+               cdnsp_inc_deq(pdev, pdev->event_ring);
+
+               if (!cdnsp_last_trb_on_seg(event_deq_seg, event)) {
+                       event++;
+                       continue;
+               }
+
+               if (cdnsp_last_trb_on_ring(pdev->event_ring, event_deq_seg,
+                                          event))
+                       cycle_bit ^= 1;
+
+               event_deq_seg = event_deq_seg->next;
+               event = event_deq_seg->trbs;
+       }
+
+       cdnsp_update_erst_dequeue(pdev,  event_ring_deq, 1);
+}
+
+static void cdnsp_stop(struct cdnsp_device *pdev)
+{
+       u32 temp;
+
+       cdnsp_cmd_flush_ep(pdev, &pdev->eps[0]);
+
+       /* Remove internally queued request for ep0. */
+       if (!list_empty(&pdev->eps[0].pending_list)) {
+               struct cdnsp_request *req;
+
+               req = next_request(&pdev->eps[0].pending_list);
+               if (req == &pdev->ep0_preq)
+                       cdnsp_ep_dequeue(&pdev->eps[0], req);
+       }
+
+       cdnsp_disable_port(pdev, &pdev->usb2_port.regs->portsc);
+       cdnsp_disable_port(pdev, &pdev->usb3_port.regs->portsc);
+       cdnsp_disable_slot(pdev);
+       cdnsp_halt(pdev);
+
+       temp = readl(&pdev->op_regs->status);
+       writel((temp & ~0x1fff) | STS_EINT, &pdev->op_regs->status);
+       temp = readl(&pdev->ir_set->irq_pending);
+       writel(IMAN_IE_CLEAR(temp), &pdev->ir_set->irq_pending);
+
+       cdnsp_clear_port_change_bit(pdev, &pdev->usb2_port.regs->portsc);
+       cdnsp_clear_port_change_bit(pdev, &pdev->usb3_port.regs->portsc);
+
+       /* Clear interrupt line */
+       temp = readl(&pdev->ir_set->irq_pending);
+       temp |= IMAN_IP;
+       writel(temp, &pdev->ir_set->irq_pending);
+
+       cdnsp_consume_all_events(pdev);
+       cdnsp_clear_cmd_ring(pdev);
+
+       trace_cdnsp_exit("Controller stopped.");
+}
+
+/*
+ * Stop controller.
+ * This function is called by the gadget core when the driver is removed.
+ * Disable slot, disable IRQs, and quiesce the controller.
+ */
+static int cdnsp_gadget_udc_stop(struct usb_gadget *g)
+{
+       struct cdnsp_device *pdev = gadget_to_cdnsp(g);
+       unsigned long flags;
+
+       spin_lock_irqsave(&pdev->lock, flags);
+       cdnsp_stop(pdev);
+       pdev->gadget_driver = NULL;
+       spin_unlock_irqrestore(&pdev->lock, flags);
+
+       return 0;
+}
+
+static int cdnsp_gadget_get_frame(struct usb_gadget *g)
+{
+       struct cdnsp_device *pdev = gadget_to_cdnsp(g);
+
+       return cdnsp_get_frame(pdev);
+}
+
+static void __cdnsp_gadget_wakeup(struct cdnsp_device *pdev)
+{
+       struct cdnsp_port_regs __iomem *port_regs;
+       u32 portpm, portsc;
+
+       port_regs = pdev->active_port->regs;
+       portsc = readl(&port_regs->portsc) & PORT_PLS_MASK;
+
+       /* Remote wakeup feature is not enabled by host. */
+       if (pdev->gadget.speed < USB_SPEED_SUPER && portsc == XDEV_U2) {
+               portpm = readl(&port_regs->portpmsc);
+
+               if (!(portpm & PORT_RWE))
+                       return;
+       }
+
+       if (portsc == XDEV_U3 && !pdev->may_wakeup)
+               return;
+
+       cdnsp_set_link_state(pdev, &port_regs->portsc, XDEV_U0);
+
+       pdev->cdnsp_state |= CDNSP_WAKEUP_PENDING;
+}
+
+static int cdnsp_gadget_wakeup(struct usb_gadget *g)
+{
+       struct cdnsp_device *pdev = gadget_to_cdnsp(g);
+       unsigned long flags;
+
+       spin_lock_irqsave(&pdev->lock, flags);
+       __cdnsp_gadget_wakeup(pdev);
+       spin_unlock_irqrestore(&pdev->lock, flags);
+
+       return 0;
+}
+
+static int cdnsp_gadget_set_selfpowered(struct usb_gadget *g,
+                                       int is_selfpowered)
+{
+       struct cdnsp_device *pdev = gadget_to_cdnsp(g);
+       unsigned long flags;
+
+       spin_lock_irqsave(&pdev->lock, flags);
+       g->is_selfpowered = !!is_selfpowered;
+       spin_unlock_irqrestore(&pdev->lock, flags);
+
+       return 0;
+}
+
+static int cdnsp_gadget_pullup(struct usb_gadget *gadget, int is_on)
+{
+       struct cdnsp_device *pdev = gadget_to_cdnsp(gadget);
+       struct cdns *cdns = dev_get_drvdata(pdev->dev);
+
+       trace_cdnsp_pullup(is_on);
+
+       if (!is_on) {
+               cdnsp_reset_device(pdev);
+               cdns_clear_vbus(cdns);
+       } else {
+               cdns_set_vbus(cdns);
+       }
+       return 0;
+}
+
+static const struct usb_gadget_ops cdnsp_gadget_ops = {
+       .get_frame              = cdnsp_gadget_get_frame,
+       .wakeup                 = cdnsp_gadget_wakeup,
+       .set_selfpowered        = cdnsp_gadget_set_selfpowered,
+       .pullup                 = cdnsp_gadget_pullup,
+       .udc_start              = cdnsp_gadget_udc_start,
+       .udc_stop               = cdnsp_gadget_udc_stop,
+};
+
+static void cdnsp_get_ep_buffering(struct cdnsp_device *pdev,
+                                  struct cdnsp_ep *pep)
+{
+       void __iomem *reg = &pdev->cap_regs->hc_capbase;
+       int endpoints;
+
+       reg += cdnsp_find_next_ext_cap(reg, 0, XBUF_CAP_ID);
+
+       if (!pep->direction) {
+               pep->buffering = readl(reg + XBUF_RX_TAG_MASK_0_OFFSET);
+               pep->buffering_period = readl(reg + XBUF_RX_TAG_MASK_1_OFFSET);
+               pep->buffering = (pep->buffering + 1) / 2;
+               pep->buffering_period = (pep->buffering_period + 1) / 2;
+               return;
+       }
+
+       endpoints = HCS_ENDPOINTS(pdev->hcs_params1) / 2;
+
+       /* Set to XBUF_TX_TAG_MASK_0 register. */
+       reg += XBUF_TX_CMD_OFFSET + (endpoints * 2 + 2) * sizeof(u32);
+       /* Set reg to XBUF_TX_TAG_MASK_N related with this endpoint. */
+       reg += pep->number * sizeof(u32) * 2;
+
+       pep->buffering = (readl(reg) + 1) / 2;
+       pep->buffering_period = pep->buffering;
+}
+
+static int cdnsp_gadget_init_endpoints(struct cdnsp_device *pdev)
+{
+       int max_streams = HCC_MAX_PSA(pdev->hcc_params);
+       struct cdnsp_ep *pep;
+       int i;
+
+       INIT_LIST_HEAD(&pdev->gadget.ep_list);
+
+       if (max_streams < STREAM_LOG_STREAMS) {
+               dev_err(pdev->dev, "Stream size %d not supported\n",
+                       max_streams);
+               return -EINVAL;
+       }
+
+       max_streams = STREAM_LOG_STREAMS;
+
+       for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
+               bool direction = !(i & 1); /* Start from OUT endpoint. */
+               u8 epnum = ((i + 1) >> 1);
+
+               if (!CDNSP_IF_EP_EXIST(pdev, epnum, direction))
+                       continue;
+
+               pep = &pdev->eps[i];
+               pep->pdev = pdev;
+               pep->number = epnum;
+               pep->direction = direction; /* 0 for OUT, 1 for IN. */
+
+               /*
+                * Ep0 is bidirectional, so ep0in and ep0out are represented by
+                * pdev->eps[0]
+                */
+               if (epnum == 0) {
+                       snprintf(pep->name, sizeof(pep->name), "ep%d%s",
+                                epnum, "BiDir");
+
+                       pep->idx = 0;
+                       usb_ep_set_maxpacket_limit(&pep->endpoint, 512);
+                       pep->endpoint.maxburst = 1;
+                       pep->endpoint.ops = &cdnsp_gadget_ep0_ops;
+                       pep->endpoint.desc = &cdnsp_gadget_ep0_desc;
+                       pep->endpoint.comp_desc = NULL;
+                       pep->endpoint.caps.type_control = true;
+                       pep->endpoint.caps.dir_in = true;
+                       pep->endpoint.caps.dir_out = true;
+
+                       pdev->ep0_preq.epnum = pep->number;
+                       pdev->ep0_preq.pep = pep;
+                       pdev->gadget.ep0 = &pep->endpoint;
+               } else {
+                       snprintf(pep->name, sizeof(pep->name), "ep%d%s",
+                                epnum, (pep->direction) ? "in" : "out");
+
+                       pep->idx =  (epnum * 2 + (direction ? 1 : 0)) - 1;
+                       usb_ep_set_maxpacket_limit(&pep->endpoint, 1024);
+
+                       pep->endpoint.max_streams = max_streams;
+                       pep->endpoint.ops = &cdnsp_gadget_ep_ops;
+                       list_add_tail(&pep->endpoint.ep_list,
+                                     &pdev->gadget.ep_list);
+
+                       pep->endpoint.caps.type_iso = true;
+                       pep->endpoint.caps.type_bulk = true;
+                       pep->endpoint.caps.type_int = true;
+
+                       pep->endpoint.caps.dir_in = direction;
+                       pep->endpoint.caps.dir_out = !direction;
+               }
+
+               pep->endpoint.name = pep->name;
+               pep->in_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, pep->idx);
+               pep->out_ctx = cdnsp_get_ep_ctx(&pdev->out_ctx, pep->idx);
+               cdnsp_get_ep_buffering(pdev, pep);
+
+               dev_dbg(pdev->dev, "Init %s, MPS: %04x SupType: "
+                       "CTRL: %s, INT: %s, BULK: %s, ISOC %s, "
+                       "SupDir IN: %s, OUT: %s\n",
+                       pep->name, 1024,
+                       (pep->endpoint.caps.type_control) ? "yes" : "no",
+                       (pep->endpoint.caps.type_int) ? "yes" : "no",
+                       (pep->endpoint.caps.type_bulk) ? "yes" : "no",
+                       (pep->endpoint.caps.type_iso) ? "yes" : "no",
+                       (pep->endpoint.caps.dir_in) ? "yes" : "no",
+                       (pep->endpoint.caps.dir_out) ? "yes" : "no");
+
+               INIT_LIST_HEAD(&pep->pending_list);
+       }
+
+       return 0;
+}
+
+static void cdnsp_gadget_free_endpoints(struct cdnsp_device *pdev)
+{
+       struct cdnsp_ep *pep;
+       int i;
+
+       for (i = 0; i < CDNSP_ENDPOINTS_NUM; i++) {
+               pep = &pdev->eps[i];
+               if (pep->number != 0 && pep->out_ctx)
+                       list_del(&pep->endpoint.ep_list);
+       }
+}
+
+void cdnsp_disconnect_gadget(struct cdnsp_device *pdev)
+{
+       pdev->cdnsp_state |= CDNSP_STATE_DISCONNECT_PENDING;
+
+       if (pdev->gadget_driver && pdev->gadget_driver->disconnect) {
+               spin_unlock(&pdev->lock);
+               pdev->gadget_driver->disconnect(&pdev->gadget);
+               spin_lock(&pdev->lock);
+       }
+
+       pdev->gadget.speed = USB_SPEED_UNKNOWN;
+       usb_gadget_set_state(&pdev->gadget, USB_STATE_NOTATTACHED);
+
+       pdev->cdnsp_state &= ~CDNSP_STATE_DISCONNECT_PENDING;
+}
+
+void cdnsp_suspend_gadget(struct cdnsp_device *pdev)
+{
+       if (pdev->gadget_driver && pdev->gadget_driver->suspend) {
+               spin_unlock(&pdev->lock);
+               pdev->gadget_driver->suspend(&pdev->gadget);
+               spin_lock(&pdev->lock);
+       }
+}
+
+void cdnsp_resume_gadget(struct cdnsp_device *pdev)
+{
+       if (pdev->gadget_driver && pdev->gadget_driver->resume) {
+               spin_unlock(&pdev->lock);
+               pdev->gadget_driver->resume(&pdev->gadget);
+               spin_lock(&pdev->lock);
+       }
+}
+
+void cdnsp_irq_reset(struct cdnsp_device *pdev)
+{
+       struct cdnsp_port_regs __iomem *port_regs;
+
+       cdnsp_reset_device(pdev);
+
+       port_regs = pdev->active_port->regs;
+       pdev->gadget.speed = cdnsp_port_speed(readl(port_regs));
+
+       spin_unlock(&pdev->lock);
+       usb_gadget_udc_reset(&pdev->gadget, pdev->gadget_driver);
+       spin_lock(&pdev->lock);
+
+       switch (pdev->gadget.speed) {
+       case USB_SPEED_SUPER_PLUS:
+       case USB_SPEED_SUPER:
+               cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
+               pdev->gadget.ep0->maxpacket = 512;
+               break;
+       case USB_SPEED_HIGH:
+       case USB_SPEED_FULL:
+               cdnsp_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
+               pdev->gadget.ep0->maxpacket = 64;
+               break;
+       default:
+               /* Low speed is not supported. */
+               dev_err(pdev->dev, "Unknown device speed\n");
+               break;
+       }
+
+       cdnsp_clear_chicken_bits_2(pdev, CHICKEN_XDMA_2_TP_CACHE_DIS);
+       cdnsp_setup_device(pdev, SETUP_CONTEXT_ONLY);
+       usb_gadget_set_state(&pdev->gadget, USB_STATE_DEFAULT);
+}
+
+static void cdnsp_get_rev_cap(struct cdnsp_device *pdev)
+{
+       void __iomem *reg = &pdev->cap_regs->hc_capbase;
+
+       reg += cdnsp_find_next_ext_cap(reg, 0, RTL_REV_CAP);
+       pdev->rev_cap  = reg;
+
+       dev_info(pdev->dev, "Rev: %08x/%08x, eps: %08x, buff: %08x/%08x\n",
+                readl(&pdev->rev_cap->ctrl_revision),
+                readl(&pdev->rev_cap->rtl_revision),
+                readl(&pdev->rev_cap->ep_supported),
+                readl(&pdev->rev_cap->rx_buff_size),
+                readl(&pdev->rev_cap->tx_buff_size));
+}
+
+static int cdnsp_gen_setup(struct cdnsp_device *pdev)
+{
+       int ret;
+       u32 reg;
+
+       pdev->cap_regs = pdev->regs;
+       pdev->op_regs = pdev->regs +
+               HC_LENGTH(readl(&pdev->cap_regs->hc_capbase));
+       pdev->run_regs = pdev->regs +
+               (readl(&pdev->cap_regs->run_regs_off) & RTSOFF_MASK);
+
+       /* Cache read-only capability registers */
+       pdev->hcs_params1 = readl(&pdev->cap_regs->hcs_params1);
+       pdev->hcc_params = readl(&pdev->cap_regs->hc_capbase);
+       pdev->hci_version = HC_VERSION(pdev->hcc_params);
+       pdev->hcc_params = readl(&pdev->cap_regs->hcc_params);
+
+       cdnsp_get_rev_cap(pdev);
+
+       /* Make sure the Device Controller is halted. */
+       ret = cdnsp_halt(pdev);
+       if (ret)
+               return ret;
+
+       /* Reset the internal controller memory state and registers. */
+       ret = cdnsp_reset(pdev);
+       if (ret)
+               return ret;
+
+       /*
+        * Set dma_mask and coherent_dma_mask to 64-bits,
+        * if controller supports 64-bit addressing.
+        */
+       if (HCC_64BIT_ADDR(pdev->hcc_params) &&
+           !dma_set_mask(pdev->dev, DMA_BIT_MASK(64))) {
+               dev_dbg(pdev->dev, "Enabling 64-bit DMA addresses.\n");
+               dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(64));
+       } else {
+               /*
+                * This is to avoid error in cases where a 32-bit USB
+                * controller is used on a 64-bit capable system.
+                */
+               ret = dma_set_mask(pdev->dev, DMA_BIT_MASK(32));
+               if (ret)
+                       return ret;
+
+               dev_dbg(pdev->dev, "Enabling 32-bit DMA addresses.\n");
+               dma_set_coherent_mask(pdev->dev, DMA_BIT_MASK(32));
+       }
+
+       spin_lock_init(&pdev->lock);
+
+       ret = cdnsp_mem_init(pdev);
+       if (ret)
+               return ret;
+
+       /*
+        * Software workaround for U1: after transition
+        * to U1 the controller starts gating clock, and in some cases,
+        * it causes that controller stack.
+        */
+       reg = readl(&pdev->port3x_regs->mode_2);
+       reg &= ~CFG_3XPORT_U1_PIPE_CLK_GATE_EN;
+       writel(reg, &pdev->port3x_regs->mode_2);
+
+       return 0;
+}
+
+static int __cdnsp_gadget_init(struct cdns *cdns)
+{
+       struct cdnsp_device *pdev;
+       u32 max_speed;
+       int ret = -ENOMEM;
+
+       cdns_drd_gadget_on(cdns);
+
+       pdev = kzalloc(sizeof(*pdev), GFP_KERNEL);
+       if (!pdev)
+               return -ENOMEM;
+
+       pm_runtime_get_sync(cdns->dev);
+
+       cdns->gadget_dev = pdev;
+       pdev->dev = cdns->dev;
+       pdev->regs = cdns->dev_regs;
+       max_speed = usb_get_maximum_speed(cdns->dev);
+
+       switch (max_speed) {
+       case USB_SPEED_FULL:
+       case USB_SPEED_HIGH:
+       case USB_SPEED_SUPER:
+       case USB_SPEED_SUPER_PLUS:
+               break;
+       default:
+               dev_err(cdns->dev, "invalid speed parameter %d\n", max_speed);
+               fallthrough;
+       case USB_SPEED_UNKNOWN:
+               /* Default to SSP */
+               max_speed = USB_SPEED_SUPER_PLUS;
+               break;
+       }
+
+       pdev->gadget.ops = &cdnsp_gadget_ops;
+       pdev->gadget.name = "cdnsp-gadget";
+       pdev->gadget.speed = USB_SPEED_UNKNOWN;
+       pdev->gadget.sg_supported = 1;
+       pdev->gadget.max_speed = USB_SPEED_SUPER_PLUS;
+       pdev->gadget.lpm_capable = 1;
+
+       pdev->setup_buf = kzalloc(CDNSP_EP0_SETUP_SIZE, GFP_KERNEL);
+       if (!pdev->setup_buf)
+               goto free_pdev;
+
+       /*
+        * Controller supports not aligned buffer but it should improve
+        * performance.
+        */
+       pdev->gadget.quirk_ep_out_aligned_size = true;
+
+       ret = cdnsp_gen_setup(pdev);
+       if (ret) {
+               dev_err(pdev->dev, "Generic initialization failed %d\n", ret);
+               goto free_setup;
+       }
+
+       ret = cdnsp_gadget_init_endpoints(pdev);
+       if (ret) {
+               dev_err(pdev->dev, "failed to initialize endpoints\n");
+               goto halt_pdev;
+       }
+
+       ret = usb_add_gadget_udc(pdev->dev, &pdev->gadget);
+       if (ret) {
+               dev_err(pdev->dev, "failed to register udc\n");
+               goto free_endpoints;
+       }
+
+       ret = devm_request_threaded_irq(pdev->dev, cdns->dev_irq,
+                                       cdnsp_irq_handler,
+                                       cdnsp_thread_irq_handler, IRQF_SHARED,
+                                       dev_name(pdev->dev), pdev);
+       if (ret)
+               goto del_gadget;
+
+       return 0;
+
+del_gadget:
+       usb_del_gadget_udc(&pdev->gadget);
+free_endpoints:
+       cdnsp_gadget_free_endpoints(pdev);
+halt_pdev:
+       cdnsp_halt(pdev);
+       cdnsp_reset(pdev);
+       cdnsp_mem_cleanup(pdev);
+free_setup:
+       kfree(pdev->setup_buf);
+free_pdev:
+       kfree(pdev);
+
+       return ret;
+}
+
+static void cdnsp_gadget_exit(struct cdns *cdns)
+{
+       struct cdnsp_device *pdev = cdns->gadget_dev;
+
+       devm_free_irq(pdev->dev, cdns->dev_irq, pdev);
+       pm_runtime_mark_last_busy(cdns->dev);
+       pm_runtime_put_autosuspend(cdns->dev);
+       usb_del_gadget_udc(&pdev->gadget);
+       cdnsp_gadget_free_endpoints(pdev);
+       cdnsp_mem_cleanup(pdev);
+       kfree(pdev);
+       cdns->gadget_dev = NULL;
+       cdns_drd_gadget_off(cdns);
+}
+
+static int cdnsp_gadget_suspend(struct cdns *cdns, bool do_wakeup)
+{
+       struct cdnsp_device *pdev = cdns->gadget_dev;
+       unsigned long flags;
+
+       if (pdev->link_state == XDEV_U3)
+               return 0;
+
+       spin_lock_irqsave(&pdev->lock, flags);
+       cdnsp_disconnect_gadget(pdev);
+       cdnsp_stop(pdev);
+       spin_unlock_irqrestore(&pdev->lock, flags);
+
+       return 0;
+}
+
+static int cdnsp_gadget_resume(struct cdns *cdns, bool hibernated)
+{
+       struct cdnsp_device *pdev = cdns->gadget_dev;
+       enum usb_device_speed max_speed;
+       unsigned long flags;
+       int ret;
+
+       if (!pdev->gadget_driver)
+               return 0;
+
+       spin_lock_irqsave(&pdev->lock, flags);
+       max_speed = pdev->gadget_driver->max_speed;
+
+       /* Limit speed if necessary. */
+       max_speed = min(max_speed, pdev->gadget.max_speed);
+
+       ret = cdnsp_run(pdev, max_speed);
+
+       if (pdev->link_state == XDEV_U3)
+               __cdnsp_gadget_wakeup(pdev);
+
+       spin_unlock_irqrestore(&pdev->lock, flags);
+
+       return ret;
+}
+
+/**
+ * cdnsp_gadget_init - initialize device structure
+ * @cdns: cdnsp instance
+ *
+ * This function initializes the gadget.
+ */
+int cdnsp_gadget_init(struct cdns *cdns)
+{
+       struct cdns_role_driver *rdrv;
+
+       rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
+       if (!rdrv)
+               return -ENOMEM;
+
+       rdrv->start     = __cdnsp_gadget_init;
+       rdrv->stop      = cdnsp_gadget_exit;
+       rdrv->suspend   = cdnsp_gadget_suspend;
+       rdrv->resume    = cdnsp_gadget_resume;
+       rdrv->state     = CDNS_ROLE_STATE_INACTIVE;
+       rdrv->name      = "gadget";
+       cdns->roles[USB_ROLE_DEVICE] = rdrv;
+
+       return 0;
+}
diff --git a/drivers/usb/cdns3/cdnsp-gadget.h b/drivers/usb/cdns3/cdnsp-gadget.h
new file mode 100644 (file)
index 0000000..6bbb265
--- /dev/null
@@ -0,0 +1,1601 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ * Code based on Linux XHCI driver.
+ * Origin: Copyright (C) 2008 Intel Corp.
+ */
+#ifndef __LINUX_CDNSP_GADGET_H
+#define __LINUX_CDNSP_GADGET_H
+
+#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/usb/gadget.h>
+#include <linux/irq.h>
+
+/* Max number slots - only 1 is allowed. */
+#define CDNSP_DEV_MAX_SLOTS    1
+
+#define CDNSP_EP0_SETUP_SIZE   512
+
+/* One control and 15 for in and 15 for out endpoints. */
+#define CDNSP_ENDPOINTS_NUM    31
+
+/* Best Effort Service Latency. */
+#define CDNSP_DEFAULT_BESL     0
+
+/* Device Controller command default timeout value in us */
+#define CDNSP_CMD_TIMEOUT      (15 * 1000)
+
+/* Up to 16 ms to halt an device controller */
+#define CDNSP_MAX_HALT_USEC    (16 * 1000)
+
+#define CDNSP_CTX_SIZE 2112
+
+/*
+ * Controller register interface.
+ */
+
+/**
+ * struct cdnsp_cap_regs - CDNSP Registers.
+ * @hc_capbase:        Length of the capabilities register and controller
+ *              version number
+ * @hcs_params1: HCSPARAMS1 - Structural Parameters 1
+ * @hcs_params2: HCSPARAMS2 - Structural Parameters 2
+ * @hcs_params3: HCSPARAMS3 - Structural Parameters 3
+ * @hcc_params: HCCPARAMS - Capability Parameters
+ * @db_off: DBOFF - Doorbell array offset
+ * @run_regs_off: RTSOFF - Runtime register space offset
+ * @hcc_params2: HCCPARAMS2 Capability Parameters 2,
+ */
+struct cdnsp_cap_regs {
+       __le32 hc_capbase;
+       __le32 hcs_params1;
+       __le32 hcs_params2;
+       __le32 hcs_params3;
+       __le32 hcc_params;
+       __le32 db_off;
+       __le32 run_regs_off;
+       __le32 hcc_params2;
+       /* Reserved up to (CAPLENGTH - 0x1C) */
+};
+
+/* hc_capbase bitmasks. */
+/* bits 7:0 - how long is the Capabilities register. */
+#define HC_LENGTH(p)           (((p) >> 00) & GENMASK(7, 0))
+/* bits 31:16  */
+#define HC_VERSION(p)          (((p) >> 16) & GENMASK(15, 1))
+
+/* HCSPARAMS1 - hcs_params1 - bitmasks */
+/* bits 0:7, Max Device Endpoints */
+#define HCS_ENDPOINTS_MASK     GENMASK(7, 0)
+#define HCS_ENDPOINTS(p)       (((p) & HCS_ENDPOINTS_MASK) >> 0)
+
+/* HCCPARAMS offset from PCI base address */
+#define HCC_PARAMS_OFFSET      0x10
+
+/* HCCPARAMS - hcc_params - bitmasks */
+/* 1: device controller can use 64-bit address pointers. */
+#define HCC_64BIT_ADDR(p)      ((p) & BIT(0))
+/* 1: device controller uses 64-byte Device Context structures. */
+#define HCC_64BYTE_CONTEXT(p)  ((p) & BIT(2))
+/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15. */
+#define HCC_MAX_PSA(p)         ((((p) >> 12) & 0xf) + 1)
+/* Extended Capabilities pointer from PCI base. */
+#define HCC_EXT_CAPS(p)                (((p) & GENMASK(31, 16)) >> 16)
+
+#define CTX_SIZE(_hcc)         (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32)
+
+/* db_off bitmask - bits 0:1 reserved. */
+#define DBOFF_MASK     GENMASK(31, 2)
+
+/* run_regs_off bitmask - bits 0:4 reserved. */
+#define RTSOFF_MASK    GENMASK(31, 5)
+
+/**
+ * struct cdnsp_op_regs - Device Controller Operational Registers.
+ * @command: USBCMD - Controller command register.
+ * @status: USBSTS - Controller status register.
+ * @page_size: This indicates the page size that the device controller supports.
+ *             If bit n is set, the controller supports a page size of 2^(n+12),
+ *             up to a 128MB page size. 4K is the minimum page size.
+ * @dnctrl: DNCTRL - Device notification control register.
+ * @cmd_ring: CRP - 64-bit Command Ring Pointer.
+ * @dcbaa_ptr: DCBAAP - 64-bit Device Context Base Address Array Pointer.
+ * @config_reg: CONFIG - Configure Register
+ * @port_reg_base: PORTSCn - base address for Port Status and Control
+ *                 Each port has a Port Status and Control register,
+ *                 followed by a Port Power Management Status and Control
+ *                 register, a Port Link Info register, and a reserved
+ *                 register.
+ */
+struct cdnsp_op_regs {
+       __le32 command;
+       __le32 status;
+       __le32 page_size;
+       __le32 reserved1;
+       __le32 reserved2;
+       __le32 dnctrl;
+       __le64 cmd_ring;
+       /* rsvd: offset 0x20-2F. */
+       __le32 reserved3[4];
+       __le64 dcbaa_ptr;
+       __le32 config_reg;
+       /* rsvd: offset 0x3C-3FF. */
+       __le32 reserved4[241];
+       /* port 1 registers, which serve as a base address for other ports. */
+       __le32 port_reg_base;
+};
+
+/* Number of registers per port. */
+#define NUM_PORT_REGS  4
+
+/**
+ * struct cdnsp_port_regs - Port Registers.
+ * @portsc: PORTSC - Port Status and Control Register.
+ * @portpmsc: PORTPMSC - Port Power Managements Status and Control Register.
+ * @portli: PORTLI - Port Link Info register.
+ */
+struct cdnsp_port_regs {
+       __le32 portsc;
+       __le32 portpmsc;
+       __le32 portli;
+       __le32 reserved;
+};
+
+/*
+ * These bits are Read Only (RO) and should be saved and written to the
+ * registers: 0 (connect status) and  10:13 (port speed).
+ * These bits are also sticky - meaning they're in the AUX well and they aren't
+ * changed by a hot and warm.
+ */
+#define CDNSP_PORT_RO  (PORT_CONNECT | DEV_SPEED_MASK)
+
+/*
+ * These bits are RW; writing a 0 clears the bit, writing a 1 sets the bit:
+ * bits 5:8 (link state), 25:26  ("wake on" enable state)
+ */
+#define CDNSP_PORT_RWS (PORT_PLS_MASK | PORT_WKCONN_E | PORT_WKDISC_E)
+
+/*
+ * These bits are RW; writing a 1 clears the bit, writing a 0 has no effect:
+ * bits 1 (port enable/disable), 17  ( connect changed),
+ * 21 (port reset changed) , 22 (Port Link State Change),
+ */
+#define CDNSP_PORT_RW1CS (PORT_PED | PORT_CSC | PORT_RC | PORT_PLC)
+
+/* USBCMD - USB command - bitmasks. */
+/* Run/Stop, controller execution - do not write unless controller is halted.*/
+#define CMD_R_S                BIT(0)
+/*
+ * Reset device controller - resets internal controller state machine and all
+ * registers (except PCI config regs).
+ */
+#define CMD_RESET      BIT(1)
+/* Event Interrupt Enable - a '1' allows interrupts from the controller. */
+#define CMD_INTE       BIT(2)
+/*
+ * Device System Error Interrupt Enable - get out-of-band signal for
+ * controller errors.
+ */
+#define CMD_DSEIE      BIT(3)
+/* device controller save/restore state. */
+#define CMD_CSS                BIT(8)
+#define CMD_CRS                BIT(9)
+/*
+ * Enable Wrap Event - '1' means device controller generates an event
+ * when MFINDEX wraps.
+ */
+#define CMD_EWE                BIT(10)
+/* 1: device enabled */
+#define CMD_DEVEN      BIT(17)
+/* bits 18:31 are reserved (and should be preserved on writes). */
+
+/* Command register values to disable interrupts. */
+#define CDNSP_IRQS     (CMD_INTE | CMD_DSEIE | CMD_EWE)
+
+/* USBSTS - USB status - bitmasks */
+/* controller not running - set to 1 when run/stop bit is cleared. */
+#define STS_HALT       BIT(0)
+/*
+ * serious error, e.g. PCI parity error. The controller will clear
+ * the run/stop bit.
+ */
+#define STS_FATAL      BIT(2)
+/* event interrupt - clear this prior to clearing any IP flags in IR set.*/
+#define STS_EINT       BIT(3)
+/* port change detect */
+#define STS_PCD                BIT(4)
+/* save state status - '1' means device controller is saving state. */
+#define STS_SSS                BIT(8)
+/* restore state status - '1' means controllers is restoring state. */
+#define STS_RSS                BIT(9)
+/* 1: save or restore error */
+#define STS_SRE                BIT(10)
+/* 1: device Not Ready to accept doorbell or op reg writes after reset. */
+#define STS_CNR                BIT(11)
+/* 1: internal Device Controller Error.*/
+#define STS_HCE                BIT(12)
+
+/* CRCR - Command Ring Control Register - cmd_ring bitmasks. */
+/* bit 0 is the command ring cycle state. */
+#define CMD_RING_CS            BIT(0)
+/* stop ring immediately - abort the currently executing command. */
+#define CMD_RING_ABORT         BIT(2)
+/*
+ * Command Ring Busy.
+ * Set when Doorbell register is written with DB for command and cleared when
+ * the controller reached end of CR.
+ */
+#define CMD_RING_BUSY(p)       ((p) & BIT(4))
+/* 1: command ring is running */
+#define CMD_RING_RUNNING       BIT(3)
+/* Command Ring pointer - bit mask for the lower 32 bits. */
+#define CMD_RING_RSVD_BITS     GENMASK(5, 0)
+
+/* CONFIG - Configure Register - config_reg bitmasks. */
+/* bits 0:7 - maximum number of device slots enabled. */
+#define MAX_DEVS               GENMASK(7, 0)
+/* bit 8: U3 Entry Enabled, assert PLC when controller enters U3. */
+#define CONFIG_U3E             BIT(8)
+
+/* PORTSC - Port Status and Control Register - port_reg_base bitmasks */
+/* 1: device connected. */
+#define PORT_CONNECT           BIT(0)
+/* 1: port enabled. */
+#define PORT_PED               BIT(1)
+/* 1: port reset signaling asserted. */
+#define PORT_RESET             BIT(4)
+/*
+ * Port Link State - bits 5:8
+ * A read gives the current link PM state of the port,
+ * a write with Link State Write Strobe sets the link state.
+ */
+#define PORT_PLS_MASK          GENMASK(8, 5)
+#define XDEV_U0                        (0x0 << 5)
+#define XDEV_U1                        (0x1 << 5)
+#define XDEV_U2                        (0x2 << 5)
+#define XDEV_U3                        (0x3 << 5)
+#define XDEV_DISABLED          (0x4 << 5)
+#define XDEV_RXDETECT          (0x5 << 5)
+#define XDEV_INACTIVE          (0x6 << 5)
+#define XDEV_POLLING           (0x7 << 5)
+#define XDEV_RECOVERY          (0x8 << 5)
+#define XDEV_HOT_RESET         (0x9 << 5)
+#define XDEV_COMP_MODE         (0xa << 5)
+#define XDEV_TEST_MODE         (0xb << 5)
+#define XDEV_RESUME            (0xf << 5)
+/* 1: port has power. */
+#define PORT_POWER             BIT(9)
+/*
+ * bits 10:13 indicate device speed:
+ * 0 - undefined speed - port hasn't be initialized by a reset yet
+ * 1 - full speed
+ * 2 - Reserved (Low Speed not supported
+ * 3 - high speed
+ * 4 - super speed
+ * 5 - super speed
+ * 6-15 reserved
+ */
+#define DEV_SPEED_MASK         GENMASK(13, 10)
+#define XDEV_FS                        (0x1 << 10)
+#define XDEV_HS                        (0x3 << 10)
+#define XDEV_SS                        (0x4 << 10)
+#define XDEV_SSP               (0x5 << 10)
+#define DEV_UNDEFSPEED(p)      (((p) & DEV_SPEED_MASK) == (0x0 << 10))
+#define DEV_FULLSPEED(p)       (((p) & DEV_SPEED_MASK) == XDEV_FS)
+#define DEV_HIGHSPEED(p)       (((p) & DEV_SPEED_MASK) == XDEV_HS)
+#define DEV_SUPERSPEED(p)      (((p) & DEV_SPEED_MASK) == XDEV_SS)
+#define DEV_SUPERSPEEDPLUS(p)  (((p) & DEV_SPEED_MASK) == XDEV_SSP)
+#define DEV_SUPERSPEED_ANY(p)  (((p) & DEV_SPEED_MASK) >= XDEV_SS)
+#define DEV_PORT_SPEED(p)      (((p) >> 10) & 0x0f)
+/* Port Link State Write Strobe - set this when changing link state */
+#define PORT_LINK_STROBE       BIT(16)
+/* 1: connect status change */
+#define PORT_CSC               BIT(17)
+/* 1: warm reset for a USB 3.0 device is done. */
+#define PORT_WRC               BIT(19)
+/* 1: reset change - 1 to 0 transition of PORT_RESET */
+#define PORT_RC                        BIT(21)
+/*
+ * port link status change - set on some port link state transitions:
+ * Transition                  Reason
+ * ----------------------------------------------------------------------------
+ * - U3 to Resume              Wakeup signaling from a device
+ * - Resume to Recovery to U0  USB 3.0 device resume
+ * - Resume to U0              USB 2.0 device resume
+ * - U3 to Recovery to U0      Software resume of USB 3.0 device complete
+ * - U3 to U0                  Software resume of USB 2.0 device complete
+ * - U2 to U0                  L1 resume of USB 2.1 device complete
+ * - U0 to U0                  L1 entry rejection by USB 2.1 device
+ * - U0 to disabled            L1 entry error with USB 2.1 device
+ * - Any state to inactive     Error on USB 3.0 port
+ */
+#define PORT_PLC               BIT(22)
+/* Port configure error change - port failed to configure its link partner. */
+#define PORT_CEC               BIT(23)
+/* Wake on connect (enable). */
+#define PORT_WKCONN_E          BIT(25)
+/* Wake on disconnect (enable). */
+#define PORT_WKDISC_E          BIT(26)
+/* Indicates if Warm Reset is being received. */
+#define PORT_WR                        BIT(31)
+
+#define PORT_CHANGE_BITS (PORT_CSC | PORT_WRC | PORT_RC | PORT_PLC | PORT_CEC)
+
+/* PORTPMSCUSB3 - Port Power Management Status and Control - bitmasks. */
+/*  Enables U1 entry. */
+#define PORT_U1_TIMEOUT_MASK   GENMASK(7, 0)
+#define PORT_U1_TIMEOUT(p)     ((p) & PORT_U1_TIMEOUT_MASK)
+/* Enables U2 entry .*/
+#define PORT_U2_TIMEOUT_MASK   GENMASK(14, 8)
+#define PORT_U2_TIMEOUT(p)     (((p) << 8) & PORT_U2_TIMEOUT_MASK)
+
+/* PORTPMSCUSB2 - Port Power Management Status and Control - bitmasks. */
+#define PORT_L1S_MASK          GENMASK(2, 0)
+#define PORT_L1S(p)            ((p) & PORT_L1S_MASK)
+#define PORT_L1S_ACK           PORT_L1S(1)
+#define PORT_L1S_NYET          PORT_L1S(2)
+#define PORT_L1S_STALL         PORT_L1S(3)
+#define PORT_L1S_TIMEOUT       PORT_L1S(4)
+/* Remote Wake Enable. */
+#define PORT_RWE               BIT(3)
+/* Best Effort Service Latency (BESL). */
+#define PORT_BESL(p)           (((p) << 4) & GENMASK(7, 4))
+/* Hardware LPM Enable (HLE). */
+#define PORT_HLE               BIT(16)
+/* Received Best Effort Service Latency (BESL). */
+#define PORT_RRBESL(p)         (((p) & GENMASK(20, 17)) >> 17)
+/* Port Test Control. */
+#define PORT_TEST_MODE_MASK    GENMASK(31, 28)
+#define PORT_TEST_MODE(p)      (((p) << 28) & PORT_TEST_MODE_MASK)
+
+/**
+ * struct cdnsp_intr_reg - Interrupt Register Set.
+ * @irq_pending: IMAN - Interrupt Management Register. Used to enable
+ *               interrupts and check for pending interrupts.
+ * @irq_control: IMOD - Interrupt Moderation Register.
+ *               Used to throttle interrupts.
+ * @erst_size: Number of segments in the Event Ring Segment Table (ERST).
+ * @erst_base: ERST base address.
+ * @erst_dequeue: Event ring dequeue pointer.
+ *
+ * Each interrupter (defined by a MSI-X vector) has an event ring and an Event
+ * Ring Segment Table (ERST) associated with it. The event ring is comprised of
+ * multiple segments of the same size. The controller places events on the ring
+ * and "updates the Cycle bit in the TRBs to indicate to software the current
+ * position of the Enqueue Pointer." The driver processes those events and
+ * updates the dequeue pointer.
+ */
+struct cdnsp_intr_reg {
+       __le32 irq_pending;
+       __le32 irq_control;
+       __le32 erst_size;
+       __le32 rsvd;
+       __le64 erst_base;
+       __le64 erst_dequeue;
+};
+
+/* IMAN - Interrupt Management Register - irq_pending bitmasks l. */
+#define IMAN_IE                        BIT(1)
+#define IMAN_IP                        BIT(0)
+/* bits 2:31 need to be preserved */
+#define IMAN_IE_SET(p)         (((p) & IMAN_IE) | 0x2)
+#define IMAN_IE_CLEAR(p)       (((p) & IMAN_IE) & ~(0x2))
+
+/* IMOD - Interrupter Moderation Register - irq_control bitmasks. */
+/*
+ * Minimum interval between interrupts (in 250ns intervals). The interval
+ * between interrupts will be longer if there are no events on the event ring.
+ * Default is 4000 (1 ms).
+ */
+#define IMOD_INTERVAL_MASK     GENMASK(15, 0)
+/* Counter used to count down the time to the next interrupt - HW use only */
+#define IMOD_COUNTER_MASK      GENMASK(31, 16)
+#define IMOD_DEFAULT_INTERVAL  0
+
+/* erst_size bitmasks. */
+/* Preserve bits 16:31 of erst_size. */
+#define ERST_SIZE_MASK         GENMASK(31, 16)
+
+/* erst_dequeue bitmasks. */
+/*
+ * Dequeue ERST Segment Index (DESI) - Segment number (or alias)
+ * where the current dequeue pointer lies. This is an optional HW hint.
+ */
+#define ERST_DESI_MASK         GENMASK(2, 0)
+/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced. */
+#define ERST_EHB               BIT(3)
+#define ERST_PTR_MASK          GENMASK(3, 0)
+
+/**
+ * struct cdnsp_run_regs
+ * @microframe_index: MFINDEX - current microframe number.
+ * @ir_set: Array of Interrupter registers.
+ *
+ * Device Controller Runtime Registers:
+ * "Software should read and write these registers using only Dword (32 bit)
+ * or larger accesses"
+ */
+struct cdnsp_run_regs {
+       __le32 microframe_index;
+       __le32 rsvd[7];
+       struct cdnsp_intr_reg ir_set[128];
+};
+
+/**
+ * USB2.0 Port Peripheral Configuration Registers.
+ * @ext_cap: Header register for Extended Capability.
+ * @port_reg1: Timer Configuration Register.
+ * @port_reg2: Timer Configuration Register.
+ * @port_reg3: Timer Configuration Register.
+ * @port_reg4: Timer Configuration Register.
+ * @port_reg5: Timer Configuration Register.
+ * @port_reg6: Chicken bits for USB20PPP.
+ */
+struct cdnsp_20port_cap {
+       __le32 ext_cap;
+       __le32 port_reg1;
+       __le32 port_reg2;
+       __le32 port_reg3;
+       __le32 port_reg4;
+       __le32 port_reg5;
+       __le32 port_reg6;
+};
+
+/* Extended capability register fields */
+#define EXT_CAPS_ID(p)                 (((p) >> 0) & GENMASK(7, 0))
+#define EXT_CAPS_NEXT(p)               (((p) >> 8) & GENMASK(7, 0))
+/* Extended capability IDs - ID 0 reserved */
+#define EXT_CAPS_PROTOCOL              2
+
+/* USB 2.0 Port Peripheral Configuration Extended Capability */
+#define EXT_CAP_CFG_DEV_20PORT_CAP_ID  0xC1
+/*
+ * Setting this bit to '1' enables automatic wakeup from L1 state on transfer
+ * TRB prepared when USBSSP operates in USB2.0 mode.
+ */
+#define PORT_REG6_L1_L0_HW_EN          BIT(1)
+/*
+ * Setting this bit to '1' forces Full Speed when USBSSP operates in USB2.0
+ * mode (disables High Speed).
+ */
+#define PORT_REG6_FORCE_FS             BIT(0)
+
+/**
+ * USB3.x Port Peripheral Configuration Registers.
+ * @ext_cap: Header register for Extended Capability.
+ * @mode_addr: Miscellaneous 3xPORT operation mode configuration register.
+ * @mode_2: 3x Port Control Register 2.
+ */
+struct cdnsp_3xport_cap {
+       __le32 ext_cap;
+       __le32 mode_addr;
+       __le32 reserved[52];
+       __le32 mode_2;
+};
+
+/* Extended Capability Header for 3XPort Configuration Registers. */
+#define D_XEC_CFG_3XPORT_CAP           0xC0
+#define CFG_3XPORT_SSP_SUPPORT         BIT(31)
+#define CFG_3XPORT_U1_PIPE_CLK_GATE_EN BIT(0)
+
+/* Revision Extended Capability ID */
+#define RTL_REV_CAP                    0xC4
+#define RTL_REV_CAP_RX_BUFF_CMD_SIZE   BITMASK(31, 24)
+#define RTL_REV_CAP_RX_BUFF_SIZE       BITMASK(15, 0)
+#define RTL_REV_CAP_TX_BUFF_CMD_SIZE   BITMASK(31, 24)
+#define RTL_REV_CAP_TX_BUFF_SIZE       BITMASK(15, 0)
+
+#define CDNSP_VER_1 0x00000000
+#define CDNSP_VER_2 0x10000000
+
+#define CDNSP_IF_EP_EXIST(pdev, ep_num, dir) \
+                        (readl(&(pdev)->rev_cap->ep_supported) & \
+                        (BIT(ep_num) << ((dir) ? 0 : 16)))
+
+/**
+ * struct cdnsp_rev_cap - controller capabilities.
+ * @ext_cap: Header for RTL Revision Extended Capability.
+ * @rtl_revision: RTL revision.
+ * @rx_buff_size: Rx buffer sizes.
+ * @tx_buff_size: Tx buffer sizes.
+ * @ep_supported: Supported endpoints.
+ * @ctrl_revision: Controller revision ID.
+ */
+struct cdnsp_rev_cap {
+       __le32 ext_cap;
+       __le32 rtl_revision;
+       __le32 rx_buff_size;
+       __le32 tx_buff_size;
+       __le32 ep_supported;
+       __le32 ctrl_revision;
+};
+
+/* USB2.0 Port Peripheral Configuration Registers. */
+#define D_XEC_PRE_REGS_CAP             0xC8
+#define REG_CHICKEN_BITS_2_OFFSET      0x48
+#define CHICKEN_XDMA_2_TP_CACHE_DIS    BIT(28)
+
+/* XBUF Extended Capability ID. */
+#define XBUF_CAP_ID                    0xCB
+#define XBUF_RX_TAG_MASK_0_OFFSET      0x1C
+#define XBUF_RX_TAG_MASK_1_OFFSET      0x24
+#define XBUF_TX_CMD_OFFSET             0x2C
+
+/**
+ * struct cdnsp_doorbell_array.
+ * @cmd_db: Command ring doorbell register.
+ * @ep_db: Endpoint ring doorbell register.
+ *         Bits 0 - 7: Endpoint target.
+ *         Bits 8 - 15: RsvdZ.
+ *         Bits 16 - 31: Stream ID.
+ */
+struct cdnsp_doorbell_array {
+       __le32 cmd_db;
+       __le32 ep_db;
+};
+
+#define DB_VALUE(ep, stream)           ((((ep) + 1) & 0xff) | ((stream) << 16))
+#define DB_VALUE_EP0_OUT(ep, stream)   ((ep) & 0xff)
+#define DB_VALUE_CMD                   0x00000000
+
+/**
+ * struct cdnsp_container_ctx.
+ * @type: Type of context. Used to calculated offsets to contained contexts.
+ * @size: Size of the context data.
+ * @ctx_size: context data structure size - 64 or 32 bits.
+ * @dma: dma address of the bytes.
+ * @bytes: The raw context data given to HW.
+ *
+ * Represents either a Device or Input context. Holds a pointer to the raw
+ * memory used for the context (bytes) and dma address of it (dma).
+ */
+struct cdnsp_container_ctx {
+       unsigned int type;
+#define CDNSP_CTX_TYPE_DEVICE  0x1
+#define CDNSP_CTX_TYPE_INPUT   0x2
+       int size;
+       int ctx_size;
+       dma_addr_t dma;
+       u8 *bytes;
+};
+
+/**
+ * struct cdnsp_slot_ctx
+ * @dev_info: Device speed, and last valid endpoint.
+ * @dev_port: Device port number that is needed to access the USB device.
+ * @int_target: Interrupter target number.
+ * @dev_state: Slot state and device address.
+ *
+ * Slot Context - This assumes the controller uses 32-byte context
+ * structures. If the controller uses 64-byte contexts, there is an additional
+ * 32 bytes reserved at the end of the slot context for controller internal use.
+ */
+struct cdnsp_slot_ctx {
+       __le32 dev_info;
+       __le32 dev_port;
+       __le32 int_target;
+       __le32 dev_state;
+       /* offset 0x10 to 0x1f reserved for controller internal use. */
+       __le32 reserved[4];
+};
+
+/* Bits 20:23 in the Slot Context are the speed for the device. */
+#define SLOT_SPEED_FS          (XDEV_FS << 10)
+#define SLOT_SPEED_HS          (XDEV_HS << 10)
+#define SLOT_SPEED_SS          (XDEV_SS << 10)
+#define SLOT_SPEED_SSP         (XDEV_SSP << 10)
+
+/* dev_info bitmasks. */
+/* Device speed - values defined by PORTSC Device Speed field - 20:23. */
+#define DEV_SPEED              GENMASK(23, 20)
+#define GET_DEV_SPEED(n)       (((n) & DEV_SPEED) >> 20)
+/* Index of the last valid endpoint context in this device context - 27:31. */
+#define LAST_CTX_MASK          ((unsigned int)GENMASK(31, 27))
+#define LAST_CTX(p)            ((p) << 27)
+#define LAST_CTX_TO_EP_NUM(p)  (((p) >> 27) - 1)
+#define SLOT_FLAG              BIT(0)
+#define EP0_FLAG               BIT(1)
+
+/* dev_port bitmasks */
+/* Device port number that is needed to access the USB device. */
+#define DEV_PORT(p)            (((p) & 0xff) << 16)
+
+/* dev_state bitmasks */
+/* USB device address - assigned by the controller. */
+#define DEV_ADDR_MASK          GENMASK(7, 0)
+/* Slot state */
+#define SLOT_STATE             GENMASK(31, 27)
+#define GET_SLOT_STATE(p)      (((p) & SLOT_STATE) >> 27)
+
+#define SLOT_STATE_DISABLED    0
+#define SLOT_STATE_ENABLED     SLOT_STATE_DISABLED
+#define SLOT_STATE_DEFAULT     1
+#define SLOT_STATE_ADDRESSED   2
+#define SLOT_STATE_CONFIGURED  3
+
+/**
+ * struct cdnsp_ep_ctx.
+ * @ep_info: Endpoint state, streams, mult, and interval information.
+ * @ep_info2: Information on endpoint type, max packet size, max burst size,
+ *            error count, and whether the controller will force an event for
+ *            all transactions.
+ * @deq: 64-bit ring dequeue pointer address. If the endpoint only
+ *       defines one stream, this points to the endpoint transfer ring.
+ *       Otherwise, it points to a stream context array, which has a
+ *       ring pointer for each flow.
+ * @tx_info: Average TRB lengths for the endpoint ring and
+ *          max payload within an Endpoint Service Interval Time (ESIT).
+ *
+ * Endpoint Context - This assumes the controller uses 32-byte context
+ * structures. If the controller uses 64-byte contexts, there is an additional
+ * 32 bytes reserved at the end of the endpoint context for controller internal
+ * use.
+ */
+struct cdnsp_ep_ctx {
+       __le32 ep_info;
+       __le32 ep_info2;
+       __le64 deq;
+       __le32 tx_info;
+       /* offset 0x14 - 0x1f reserved for controller internal use. */
+       __le32 reserved[3];
+};
+
+/* ep_info bitmasks. */
+/*
+ * Endpoint State - bits 0:2:
+ * 0 - disabled
+ * 1 - running
+ * 2 - halted due to halt condition
+ * 3 - stopped
+ * 4 - TRB error
+ * 5-7 - reserved
+ */
+#define EP_STATE_MASK          GENMASK(3, 0)
+#define EP_STATE_DISABLED      0
+#define EP_STATE_RUNNING       1
+#define EP_STATE_HALTED                2
+#define EP_STATE_STOPPED       3
+#define EP_STATE_ERROR         4
+#define GET_EP_CTX_STATE(ctx)  (le32_to_cpu((ctx)->ep_info) & EP_STATE_MASK)
+
+/* Mult - Max number of burst within an interval, in EP companion desc. */
+#define EP_MULT(p)                     (((p) << 8) & GENMASK(9, 8))
+#define CTX_TO_EP_MULT(p)              (((p) & GENMASK(9, 8)) >> 8)
+/* bits 10:14 are Max Primary Streams. */
+/* bit 15 is Linear Stream Array. */
+/* Interval - period between requests to an endpoint - 125u increments. */
+#define EP_INTERVAL(p)                 (((p) << 16) & GENMASK(23, 16))
+#define EP_INTERVAL_TO_UFRAMES(p)      (1 << (((p) & GENMASK(23, 16)) >> 16))
+#define CTX_TO_EP_INTERVAL(p)          (((p) & GENMASK(23, 16)) >> 16)
+#define EP_MAXPSTREAMS_MASK            GENMASK(14, 10)
+#define EP_MAXPSTREAMS(p)              (((p) << 10) & EP_MAXPSTREAMS_MASK)
+#define CTX_TO_EP_MAXPSTREAMS(p)       (((p) & EP_MAXPSTREAMS_MASK) >> 10)
+/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
+#define EP_HAS_LSA                     BIT(15)
+
+/* ep_info2 bitmasks */
+#define ERROR_COUNT(p)         (((p) & 0x3) << 1)
+#define CTX_TO_EP_TYPE(p)      (((p) >> 3) & 0x7)
+#define EP_TYPE(p)             ((p) << 3)
+#define ISOC_OUT_EP            1
+#define BULK_OUT_EP            2
+#define INT_OUT_EP             3
+#define CTRL_EP                        4
+#define ISOC_IN_EP             5
+#define BULK_IN_EP             6
+#define INT_IN_EP              7
+/* bit 6 reserved. */
+/* bit 7 is Device Initiate Disable - for disabling stream selection. */
+#define MAX_BURST(p)           (((p) << 8) & GENMASK(15, 8))
+#define CTX_TO_MAX_BURST(p)    (((p) & GENMASK(15, 8)) >> 8)
+#define MAX_PACKET(p)          (((p) << 16) & GENMASK(31, 16))
+#define MAX_PACKET_MASK                GENMASK(31, 16)
+#define MAX_PACKET_DECODED(p)  (((p) & GENMASK(31, 16)) >> 16)
+
+/* tx_info bitmasks. */
+#define EP_AVG_TRB_LENGTH(p)           ((p) & GENMASK(15, 0))
+#define EP_MAX_ESIT_PAYLOAD_LO(p)      (((p) << 16) & GENMASK(31, 16))
+#define EP_MAX_ESIT_PAYLOAD_HI(p)      ((((p) & GENMASK(23, 16)) >> 16) << 24)
+#define CTX_TO_MAX_ESIT_PAYLOAD_LO(p)  (((p) & GENMASK(31, 16)) >> 16)
+#define CTX_TO_MAX_ESIT_PAYLOAD_HI(p)  (((p) & GENMASK(31, 24)) >> 24)
+
+/* deq bitmasks. */
+#define EP_CTX_CYCLE_MASK              BIT(0)
+#define CTX_DEQ_MASK                   (~0xfL)
+
+/**
+ * struct cdnsp_input_control_context
+ * Input control context;
+ *
+ * @drop_context: Set the bit of the endpoint context you want to disable.
+ * @add_context: Set the bit of the endpoint context you want to enable.
+ */
+struct cdnsp_input_control_ctx {
+       __le32 drop_flags;
+       __le32 add_flags;
+       __le32 rsvd2[6];
+};
+
+/**
+ * Represents everything that is needed to issue a command on the command ring.
+ *
+ * @in_ctx: Pointer to input context structure.
+ * @status: Command Completion Code for last command.
+ * @command_trb: Pointer to command TRB.
+ */
+struct cdnsp_command {
+       /* Input context for changing device state. */
+       struct cdnsp_container_ctx *in_ctx;
+       u32 status;
+       union cdnsp_trb *command_trb;
+};
+
+/**
+ * Stream context structure.
+ *
+ * @stream_ring: 64-bit stream ring address, cycle state, and stream type.
+ * @reserved: offset 0x14 - 0x1f reserved for controller internal use.
+ */
+struct cdnsp_stream_ctx {
+       __le64 stream_ring;
+       __le32 reserved[2];
+};
+
+/* Stream Context Types - bits 3:1 of stream ctx deq ptr. */
+#define SCT_FOR_CTX(p)         (((p) << 1) & GENMASK(3, 1))
+/* Secondary stream array type, dequeue pointer is to a transfer ring. */
+#define SCT_SEC_TR             0
+/* Primary stream array type, dequeue pointer is to a transfer ring. */
+#define SCT_PRI_TR             1
+
+/**
+ *  struct cdnsp_stream_info: Representing everything that is needed to
+ *                            supports stream capable endpoints.
+ *  @stream_rings: Array of pointers containing Transfer rings for all
+ *                 supported streams.
+ *  @num_streams: Number of streams, including stream 0.
+ *  @stream_ctx_array: The stream context array may be bigger than the number
+ *                     of streams the driver asked for.
+ *  @num_stream_ctxs: Number of streams.
+ *  @ctx_array_dma: Dma address of Context Stream Array.
+ *  @trb_address_map: For mapping physical TRB addresses to segments in
+ *                    stream rings.
+ *  @td_count: Number of TDs associated with endpoint.
+ *  @first_prime_det: First PRIME packet detected.
+ *  @drbls_count: Number of allowed doorbells.
+ */
+struct cdnsp_stream_info {
+       struct cdnsp_ring **stream_rings;
+       unsigned int num_streams;
+       struct cdnsp_stream_ctx *stream_ctx_array;
+       unsigned int num_stream_ctxs;
+       dma_addr_t ctx_array_dma;
+       struct radix_tree_root trb_address_map;
+       int td_count;
+       u8 first_prime_det;
+#define STREAM_DRBL_FIFO_DEPTH 2
+       u8 drbls_count;
+};
+
+#define STREAM_LOG_STREAMS 4
+#define STREAM_NUM_STREAMS BIT(STREAM_LOG_STREAMS)
+
+#if STREAM_LOG_STREAMS > 16 && STREAM_LOG_STREAMS < 1
+#error "Not suupported stream value"
+#endif
+
+/**
+ * struct cdnsp_ep - extended device side representation of USB endpoint.
+ * @endpoint: usb endpoint
+ * @pending_req_list: List of requests queuing on transfer ring.
+ * @pdev: Device associated with this endpoint.
+ * @number: Endpoint number (1 - 15).
+ * idx: The device context index (DCI).
+ * interval: Interval between packets used for ISOC endpoint.
+ * @name: A human readable name e.g. ep1out.
+ * @direction: Endpoint direction.
+ * @buffering: Number of on-chip buffers related to endpoint.
+ * @buffering_period; Number of on-chip buffers related to periodic endpoint.
+ * @in_ctx: Pointer to input endpoint context structure.
+ * @out_ctx: Pointer to output endpoint context structure.
+ * @ring: Pointer to transfer ring.
+ * @stream_info: Hold stream information.
+ * @ep_state: Current state of endpoint.
+ * @skip: Sometimes the controller can not process isochronous endpoint ring
+ *        quickly enough, and it will miss some isoc tds on the ring and
+ *        generate Missed Service Error Event.
+ *        Set skip flag when receive a Missed Service Error Event and
+ *        process the missed tds on the endpoint ring.
+ */
+struct cdnsp_ep {
+       struct usb_ep endpoint;
+       struct list_head pending_list;
+       struct cdnsp_device *pdev;
+       u8 number;
+       u8 idx;
+       u32 interval;
+       char name[20];
+       u8 direction;
+       u8 buffering;
+       u8 buffering_period;
+       struct cdnsp_ep_ctx *in_ctx;
+       struct cdnsp_ep_ctx *out_ctx;
+       struct cdnsp_ring *ring;
+       struct cdnsp_stream_info stream_info;
+       unsigned int ep_state;
+#define EP_ENABLED             BIT(0)
+#define EP_DIS_IN_RROGRESS     BIT(1)
+#define EP_HALTED              BIT(2)
+#define EP_STOPPED             BIT(3)
+#define EP_WEDGE               BIT(4)
+#define EP0_HALTED_STATUS      BIT(5)
+#define EP_HAS_STREAMS         BIT(6)
+
+       bool skip;
+};
+
+/**
+ * struct cdnsp_device_context_array
+ * @dev_context_ptr: Array of 64-bit DMA addresses for device contexts.
+ * @dma: DMA address for device contexts structure.
+ */
+struct cdnsp_device_context_array {
+       __le64 dev_context_ptrs[CDNSP_DEV_MAX_SLOTS + 1];
+       dma_addr_t dma;
+};
+
+/**
+ * struct cdnsp_transfer_event.
+ * @buffer: 64-bit buffer address, or immediate data.
+ * @transfer_len: Data length transferred.
+ * @flags: Field is interpreted differently based on the type of TRB.
+ */
+struct cdnsp_transfer_event {
+       __le64 buffer;
+       __le32 transfer_len;
+       __le32 flags;
+};
+
+/* Invalidate event after disabling endpoint. */
+#define TRB_EVENT_INVALIDATE 8
+
+/* Transfer event TRB length bit mask. */
+/* bits 0:23 */
+#define EVENT_TRB_LEN(p)                       ((p) & GENMASK(23, 0))
+/* Completion Code - only applicable for some types of TRBs */
+#define COMP_CODE_MASK                         (0xff << 24)
+#define GET_COMP_CODE(p)                       (((p) & COMP_CODE_MASK) >> 24)
+#define COMP_INVALID                           0
+#define COMP_SUCCESS                           1
+#define COMP_DATA_BUFFER_ERROR                 2
+#define COMP_BABBLE_DETECTED_ERROR             3
+#define COMP_TRB_ERROR                         5
+#define COMP_RESOURCE_ERROR                    7
+#define COMP_NO_SLOTS_AVAILABLE_ERROR          9
+#define COMP_INVALID_STREAM_TYPE_ERROR         10
+#define COMP_SLOT_NOT_ENABLED_ERROR            11
+#define COMP_ENDPOINT_NOT_ENABLED_ERROR                12
+#define COMP_SHORT_PACKET                      13
+#define COMP_RING_UNDERRUN                     14
+#define COMP_RING_OVERRUN                      15
+#define COMP_VF_EVENT_RING_FULL_ERROR          16
+#define COMP_PARAMETER_ERROR                   17
+#define COMP_CONTEXT_STATE_ERROR               19
+#define COMP_EVENT_RING_FULL_ERROR             21
+#define COMP_INCOMPATIBLE_DEVICE_ERROR         22
+#define COMP_MISSED_SERVICE_ERROR              23
+#define COMP_COMMAND_RING_STOPPED              24
+#define COMP_COMMAND_ABORTED                   25
+#define COMP_STOPPED                           26
+#define COMP_STOPPED_LENGTH_INVALID            27
+#define COMP_STOPPED_SHORT_PACKET              28
+#define COMP_MAX_EXIT_LATENCY_TOO_LARGE_ERROR  29
+#define COMP_ISOCH_BUFFER_OVERRUN              31
+#define COMP_EVENT_LOST_ERROR                  32
+#define COMP_UNDEFINED_ERROR                   33
+#define COMP_INVALID_STREAM_ID_ERROR           34
+
+/*Transfer Event NRDY bit fields */
+#define TRB_TO_DEV_STREAM(p)                   ((p) & GENMASK(16, 0))
+#define TRB_TO_HOST_STREAM(p)                  ((p) & GENMASK(16, 0))
+#define STREAM_PRIME_ACK                       0xFFFE
+#define STREAM_REJECTED                                0xFFFF
+
+/** Transfer Event bit fields **/
+#define TRB_TO_EP_ID(p)                                (((p) & GENMASK(20, 16)) >> 16)
+
+/**
+ * struct cdnsp_link_trb
+ * @segment_ptr: 64-bit segment pointer.
+ * @intr_target: Interrupter target.
+ * @control: Flags.
+ */
+struct cdnsp_link_trb {
+       __le64 segment_ptr;
+       __le32 intr_target;
+       __le32 control;
+};
+
+/* control bitfields */
+#define LINK_TOGGLE    BIT(1)
+
+/**
+ * struct cdnsp_event_cmd - Command completion event TRB.
+ * cmd_trb: Pointer to command TRB, or the value passed by the event data trb
+ * status: Command completion parameters and error code.
+ * flags: Flags.
+ */
+struct cdnsp_event_cmd {
+       __le64 cmd_trb;
+       __le32 status;
+       __le32 flags;
+};
+
+/* flags bitmasks */
+
+/* Address device - disable SetAddress. */
+#define TRB_BSR                BIT(9)
+
+/* Configure Endpoint - Deconfigure. */
+#define TRB_DC         BIT(9)
+
+/* Force Header */
+#define TRB_FH_TO_PACKET_TYPE(p)       ((p) & GENMASK(4, 0))
+#define TRB_FH_TR_PACKET               0x4
+#define TRB_FH_TO_DEVICE_ADDRESS(p)    (((p) << 25) & GENMASK(31, 25))
+#define TRB_FH_TR_PACKET_DEV_NOT       0x6
+#define TRB_FH_TO_NOT_TYPE(p)          (((p) << 4) & GENMASK(7, 4))
+#define TRB_FH_TR_PACKET_FUNCTION_WAKE 0x1
+#define TRB_FH_TO_INTERFACE(p)         (((p) << 8) & GENMASK(15, 8))
+
+enum cdnsp_setup_dev {
+       SETUP_CONTEXT_ONLY,
+       SETUP_CONTEXT_ADDRESS,
+};
+
+/* bits 24:31 are the slot ID. */
+#define TRB_TO_SLOT_ID(p)              (((p) & GENMASK(31, 24)) >> 24)
+#define SLOT_ID_FOR_TRB(p)             (((p) << 24) & GENMASK(31, 24))
+
+/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB. */
+#define TRB_TO_EP_INDEX(p)             (((p) >> 16) & 0x1f)
+
+#define EP_ID_FOR_TRB(p)               ((((p) + 1) << 16) & GENMASK(20, 16))
+
+#define SUSPEND_PORT_FOR_TRB(p)                (((p) & 1) << 23)
+#define TRB_TO_SUSPEND_PORT(p)         (((p) >> 23) & 0x1)
+#define LAST_EP_INDEX                  30
+
+/* Set TR Dequeue Pointer command TRB fields. */
+#define TRB_TO_STREAM_ID(p)            ((((p) & GENMASK(31, 16)) >> 16))
+#define STREAM_ID_FOR_TRB(p)           ((((p)) << 16) & GENMASK(31, 16))
+#define SCT_FOR_TRB(p)                 (((p) << 1) & 0x7)
+
+/* Link TRB specific fields. */
+#define TRB_TC                         BIT(1)
+
+/* Port Status Change Event TRB fields. */
+/* Port ID - bits 31:24. */
+#define GET_PORT_ID(p)                 (((p) & GENMASK(31, 24)) >> 24)
+#define SET_PORT_ID(p)                 (((p) << 24) & GENMASK(31, 24))
+#define EVENT_DATA                     BIT(2)
+
+/* Normal TRB fields. */
+/* transfer_len bitmasks - bits 0:16. */
+#define TRB_LEN(p)                     ((p) & GENMASK(16, 0))
+/* TD Size, packets remaining in this TD, bits 21:17 (5 bits, so max 31). */
+#define TRB_TD_SIZE(p)                 (min((p), (u32)31) << 17)
+#define GET_TD_SIZE(p)                 (((p) & GENMASK(21, 17)) >> 17)
+/*
+ * Controller uses the TD_SIZE field for TBC if Extended TBC
+ * is enabled (ETE).
+ */
+#define TRB_TD_SIZE_TBC(p)             (min((p), (u32)31) << 17)
+/* Interrupter Target - which MSI-X vector to target the completion event at. */
+#define TRB_INTR_TARGET(p)             (((p) << 22) & GENMASK(31, 22))
+#define GET_INTR_TARGET(p)             (((p) & GENMASK(31, 22)) >> 22)
+/*
+ * Total burst count field, Rsvdz on controller with Extended TBC
+ * enabled (ETE).
+ */
+#define TRB_TBC(p)                     (((p) & 0x3) << 7)
+#define TRB_TLBPC(p)                   (((p) & 0xf) << 16)
+
+/* Cycle bit - indicates TRB ownership by driver or driver.*/
+#define TRB_CYCLE                      BIT(0)
+/*
+ * Force next event data TRB to be evaluated before task switch.
+ * Used to pass OS data back after a TD completes.
+ */
+#define TRB_ENT                                BIT(1)
+/* Interrupt on short packet. */
+#define TRB_ISP                                BIT(2)
+/* Set PCIe no snoop attribute. */
+#define TRB_NO_SNOOP                   BIT(3)
+/* Chain multiple TRBs into a TD. */
+#define TRB_CHAIN                      BIT(4)
+/* Interrupt on completion. */
+#define TRB_IOC                                BIT(5)
+/* The buffer pointer contains immediate data. */
+#define TRB_IDT                                BIT(6)
+/* 0 - NRDY during data stage, 1 - NRDY during status stage (only control). */
+#define TRB_STAT                       BIT(7)
+/* Block Event Interrupt. */
+#define TRB_BEI                                BIT(9)
+
+/* Control transfer TRB specific fields. */
+#define TRB_DIR_IN                     BIT(16)
+
+/* TRB bit mask in Data Stage TRB */
+#define TRB_SETUPID_BITMASK            GENMASK(9, 8)
+#define TRB_SETUPID(p)                 ((p) << 8)
+#define TRB_SETUPID_TO_TYPE(p)         (((p) & TRB_SETUPID_BITMASK) >> 8)
+
+#define TRB_SETUP_SPEEDID_USB3         0x1
+#define TRB_SETUP_SPEEDID_USB2         0x0
+#define TRB_SETUP_SPEEDID(p)           ((p) & (1 << 7))
+
+#define TRB_SETUPSTAT_ACK              0x1
+#define TRB_SETUPSTAT_STALL            0x0
+#define TRB_SETUPSTAT(p)               ((p) << 6)
+
+/* Isochronous TRB specific fields */
+#define TRB_SIA                                BIT(31)
+#define TRB_FRAME_ID(p)                        (((p) << 20) & GENMASK(30, 20))
+
+struct cdnsp_generic_trb {
+       __le32 field[4];
+};
+
+union cdnsp_trb {
+       struct cdnsp_link_trb link;
+       struct cdnsp_transfer_event trans_event;
+       struct cdnsp_event_cmd event_cmd;
+       struct cdnsp_generic_trb generic;
+};
+
+/* TRB bit mask. */
+#define TRB_TYPE_BITMASK       GENMASK(15, 10)
+#define TRB_TYPE(p)            ((p) << 10)
+#define TRB_FIELD_TO_TYPE(p)   (((p) & TRB_TYPE_BITMASK) >> 10)
+
+/* TRB type IDs. */
+/* bulk, interrupt, isoc scatter/gather, and control data stage. */
+#define TRB_NORMAL             1
+/* Setup Stage for control transfers. */
+#define TRB_SETUP              2
+/* Data Stage for control transfers. */
+#define TRB_DATA               3
+/* Status Stage for control transfers. */
+#define TRB_STATUS             4
+/* ISOC transfers. */
+#define TRB_ISOC               5
+/* TRB for linking ring segments. */
+#define TRB_LINK               6
+#define TRB_EVENT_DATA         7
+/* Transfer Ring No-op (not for the command ring). */
+#define TRB_TR_NOOP            8
+
+/* Command TRBs */
+/* Enable Slot Command. */
+#define TRB_ENABLE_SLOT                9
+/* Disable Slot Command. */
+#define TRB_DISABLE_SLOT       10
+/* Address Device Command. */
+#define TRB_ADDR_DEV           11
+/* Configure Endpoint Command. */
+#define TRB_CONFIG_EP          12
+/* Evaluate Context Command. */
+#define TRB_EVAL_CONTEXT       13
+/* Reset Endpoint Command. */
+#define TRB_RESET_EP           14
+/* Stop Transfer Ring Command. */
+#define TRB_STOP_RING          15
+/* Set Transfer Ring Dequeue Pointer Command. */
+#define TRB_SET_DEQ            16
+/* Reset Device Command. */
+#define TRB_RESET_DEV          17
+/* Force Event Command (opt). */
+#define TRB_FORCE_EVENT                18
+/* Force Header Command - generate a transaction or link management packet. */
+#define TRB_FORCE_HEADER       22
+/* No-op Command - not for transfer rings. */
+#define TRB_CMD_NOOP           23
+/* TRB IDs 24-31 reserved. */
+
+/* Event TRBS. */
+/* Transfer Event. */
+#define TRB_TRANSFER           32
+/* Command Completion Event. */
+#define TRB_COMPLETION         33
+/* Port Status Change Event. */
+#define TRB_PORT_STATUS                34
+/* Device Controller Event. */
+#define TRB_HC_EVENT           37
+/* MFINDEX Wrap Event - microframe counter wrapped. */
+#define TRB_MFINDEX_WRAP       39
+/* TRB IDs 40-47 reserved. */
+/* Endpoint Not Ready Event. */
+#define TRB_ENDPOINT_NRDY      48
+/* TRB IDs 49-53 reserved. */
+/* Halt Endpoint Command. */
+#define TRB_HALT_ENDPOINT      54
+/* Doorbell Overflow Event. */
+#define TRB_DRB_OVERFLOW       57
+/* Flush Endpoint Command. */
+#define TRB_FLUSH_ENDPOINT     58
+
+#define TRB_TYPE_LINK(x)       (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
+#define TRB_TYPE_LINK_LE32(x)  (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
+                                       cpu_to_le32(TRB_TYPE(TRB_LINK)))
+#define TRB_TYPE_NOOP_LE32(x)  (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
+                                       cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)))
+
+/*
+ * TRBS_PER_SEGMENT must be a multiple of 4.
+ * The command ring is 64-byte aligned, so it must also be greater than 16.
+ */
+#define TRBS_PER_SEGMENT               256
+#define TRBS_PER_EVENT_SEGMENT         256
+#define TRBS_PER_EV_DEQ_UPDATE         100
+#define TRB_SEGMENT_SIZE               (TRBS_PER_SEGMENT * 16)
+#define TRB_SEGMENT_SHIFT              (ilog2(TRB_SEGMENT_SIZE))
+/* TRB buffer pointers can't cross 64KB boundaries. */
+#define TRB_MAX_BUFF_SHIFT             16
+#define TRB_MAX_BUFF_SIZE              BIT(TRB_MAX_BUFF_SHIFT)
+/* How much data is left before the 64KB boundary? */
+#define TRB_BUFF_LEN_UP_TO_BOUNDARY(addr) (TRB_MAX_BUFF_SIZE - \
+                                       ((addr) & (TRB_MAX_BUFF_SIZE - 1)))
+
+/**
+ * struct cdnsp_segment - segment related data.
+ * @trbs: Array of Transfer Request Blocks.
+ * @next: Pointer to the next segment.
+ * @dma: DMA address of current segment.
+ * @bounce_dma: Bounce  buffer DMA address .
+ * @bounce_buf: Bounce buffer virtual address.
+ * bounce_offs: Bounce buffer offset.
+ * bounce_len: Bounce buffer length.
+ */
+struct cdnsp_segment {
+       union cdnsp_trb *trbs;
+       struct cdnsp_segment *next;
+       dma_addr_t dma;
+       /* Max packet sized bounce buffer for td-fragmant alignment */
+       dma_addr_t bounce_dma;
+       void *bounce_buf;
+       unsigned int bounce_offs;
+       unsigned int bounce_len;
+};
+
+/**
+ * struct cdnsp_td - Transfer Descriptor object.
+ * @td_list: Used for binding TD with ep_ring->td_list.
+ * @preq: Request associated with this TD
+ * @start_seg: Segment containing the first_trb in TD.
+ * @first_trb: First TRB for this TD.
+ * @last_trb: Last TRB related with TD.
+ * @bounce_seg: Bounce segment for this TD.
+ * @request_length_set: actual_length of the request has already been set.
+ * @drbl - TD has been added to HW scheduler - only for stream capable
+ *         endpoints.
+ */
+struct cdnsp_td {
+       struct list_head td_list;
+       struct cdnsp_request *preq;
+       struct cdnsp_segment *start_seg;
+       union cdnsp_trb *first_trb;
+       union cdnsp_trb *last_trb;
+       struct cdnsp_segment *bounce_seg;
+       bool request_length_set;
+       bool drbl;
+};
+
+/**
+ * struct cdnsp_dequeue_state - New dequeue pointer for Transfer Ring.
+ * @new_deq_seg: New dequeue segment.
+ * @new_deq_ptr: New dequeue pointer.
+ * @new_cycle_state: New cycle state.
+ * @stream_id: stream id for which new dequeue pointer has been selected.
+ */
+struct cdnsp_dequeue_state {
+       struct cdnsp_segment *new_deq_seg;
+       union cdnsp_trb *new_deq_ptr;
+       int new_cycle_state;
+       unsigned int stream_id;
+};
+
+enum cdnsp_ring_type {
+       TYPE_CTRL = 0,
+       TYPE_ISOC,
+       TYPE_BULK,
+       TYPE_INTR,
+       TYPE_STREAM,
+       TYPE_COMMAND,
+       TYPE_EVENT,
+};
+
+/**
+ * struct cdnsp_ring - information describing transfer, command or event ring.
+ * @first_seg: First segment on transfer ring.
+ * @last_seg: Last segment on transfer ring.
+ * @enqueue: SW enqueue pointer address.
+ * @enq_seg: SW enqueue segment address.
+ * @dequeue: SW dequeue pointer address.
+ * @deq_seg: SW dequeue segment address.
+ * @td_list: transfer descriptor list associated with this ring.
+ * @cycle_state: Current cycle bit. Write the cycle state into the TRB cycle
+ *               field to give ownership of the TRB to the device controller
+ *               (if we are the producer) or to check if we own the TRB
+ *               (if we are the consumer).
+ * @stream_id: Stream id
+ * @stream_active: Stream is active - PRIME packet has been detected.
+ * @stream_rejected: This ring has been rejected by host.
+ * @num_tds: Number of TDs associated with ring.
+ * @num_segs: Number of segments.
+ * @num_trbs_free: Number of free TRBs on the ring.
+ * @bounce_buf_len: Length of bounce buffer.
+ * @type: Ring type - event, transfer, or command ring.
+ * @last_td_was_short - TD is short TD.
+ * @trb_address_map: For mapping physical TRB addresses to segments in
+ *                   stream rings.
+ */
+struct cdnsp_ring {
+       struct cdnsp_segment *first_seg;
+       struct cdnsp_segment *last_seg;
+       union cdnsp_trb  *enqueue;
+       struct cdnsp_segment *enq_seg;
+       union cdnsp_trb  *dequeue;
+       struct cdnsp_segment *deq_seg;
+       struct list_head td_list;
+       u32 cycle_state;
+       unsigned int stream_id;
+       unsigned int stream_active;
+       unsigned int stream_rejected;
+       int num_tds;
+       unsigned int num_segs;
+       unsigned int num_trbs_free;
+       unsigned int bounce_buf_len;
+       enum cdnsp_ring_type type;
+       bool last_td_was_short;
+       struct radix_tree_root *trb_address_map;
+};
+
+/**
+ * struct cdnsp_erst_entry - even ring segment table entry object.
+ * @seg_addr: 64-bit event ring segment address.
+ * seg_size: Number of TRBs in segment.;
+ */
+struct cdnsp_erst_entry {
+       __le64 seg_addr;
+       __le32 seg_size;
+       /* Set to zero */
+       __le32 rsvd;
+};
+
+/**
+ * struct cdnsp_erst - even ring segment table for event ring.
+ * @entries: Array of event ring segments
+ * @num_entries: Number of segments in entries array.
+ * @erst_dma_addr: DMA address for entries array.
+ */
+struct cdnsp_erst {
+       struct cdnsp_erst_entry *entries;
+       unsigned int num_entries;
+       dma_addr_t erst_dma_addr;
+};
+
+/**
+ * struct cdnsp_request - extended device side representation of usb_request
+ *                        object .
+ * @td: Transfer descriptor associated with this request.
+ * @request: Generic usb_request object describing single I/O request.
+ * @list: Used to adding request to endpoint pending_list.
+ * @pep: Extended representation of usb_ep object
+ * @epnum: Endpoint number associated with usb request.
+ * @direction: Endpoint direction for usb request.
+ */
+struct cdnsp_request {
+       struct  cdnsp_td td;
+       struct usb_request request;
+       struct list_head list;
+       struct cdnsp_ep  *pep;
+       u8 epnum;
+       unsigned direction:1;
+};
+
+#define        ERST_NUM_SEGS   1
+
+/* Stages used during enumeration process.*/
+enum cdnsp_ep0_stage {
+       CDNSP_SETUP_STAGE,
+       CDNSP_DATA_STAGE,
+       CDNSP_STATUS_STAGE,
+};
+
+/**
+ * struct cdnsp_port - holds information about detected ports.
+ * @port_num: Port number.
+ * @exist: Indicate if port exist.
+ * maj_rev: Major revision.
+ * min_rev: Minor revision.
+ */
+struct cdnsp_port {
+       struct cdnsp_port_regs __iomem *regs;
+       u8 port_num;
+       u8 exist;
+       u8 maj_rev;
+       u8 min_rev;
+};
+
+#define CDNSP_EXT_PORT_MAJOR(x)                (((x) >> 24) & 0xff)
+#define CDNSP_EXT_PORT_MINOR(x)                (((x) >> 16) & 0xff)
+#define CDNSP_EXT_PORT_OFF(x)          ((x) & 0xff)
+#define CDNSP_EXT_PORT_COUNT(x)                (((x) >> 8) & 0xff)
+
+/**
+ * struct cdnsp_device - represent USB device.
+ * @dev: Pointer to device structure associated whit this controller.
+ * @gadget: Device side representation of the peripheral controller.
+ * @gadget_driver: Pointer to the gadget driver.
+ * @irq: IRQ line number used by device side.
+ * @regs:IO device memory.
+ * @cap_regs: Capability registers.
+ * @op_regs: Operational registers.
+ * @run_regs: Runtime registers.
+ * @dba: Device base address register.
+ * @ir_set: Current interrupter register set.
+ * @port20_regs: Port 2.0 Peripheral Configuration Registers.
+ * @port3x_regs: USB3.x Port Peripheral Configuration Registers.
+ * @rev_cap: Controller Capabilities Registers.
+ * @hcs_params1: Cached register copies of read-only HCSPARAMS1
+ * @hcc_params: Cached register copies of read-only HCCPARAMS1
+ * @setup: Temporary buffer for setup packet.
+ * @ep0_preq: Internal allocated request used during enumeration.
+ * @ep0_stage: ep0 stage during enumeration process.
+ * @three_stage_setup: Three state or two state setup.
+ * @ep0_expect_in: Data IN expected for control transfer.
+ * @setup_id: Setup identifier.
+ * @setup_speed - Speed detected for current SETUP packet.
+ * @setup_buf: Buffer for SETUP packet.
+ * @device_address: Current device address.
+ * @may_wakeup: remote wakeup enabled/disabled.
+ * @lock: Lock used in interrupt thread context.
+ * @hci_version: device controller version.
+ * @dcbaa: Device context base address array.
+ * @cmd_ring: Command ring.
+ * @cmd: Represent all what is needed to issue command on Command Ring.
+ * @event_ring: Event ring.
+ * @erst: Event Ring Segment table
+ * @slot_id: Current Slot ID. Should be 0 or 1.
+ * @out_ctx: Output context.
+ * @in_ctx: Input context.
+ * @eps: array of endpoints object associated with device.
+ * @usb2_hw_lpm_capable: hardware lpm is enabled;
+ * @u1_allowed: Allow device transition to U1 state.
+ * @u2_allowed: Allow device transition to U2 state
+ * @device_pool: DMA pool for allocating input and output context.
+ * @segment_pool: DMA pool for allocating new segments.
+ * @cdnsp_state: Current state of controller.
+ * @link_state: Current link state.
+ * @usb2_port - Port USB 2.0.
+ * @usb3_port - Port USB 3.0.
+ * @active_port - Current selected Port.
+ * @test_mode: selected Test Mode.
+ */
+struct cdnsp_device {
+       struct device *dev;
+       struct usb_gadget gadget;
+       struct usb_gadget_driver *gadget_driver;
+       unsigned int irq;
+       void __iomem *regs;
+
+       /* Registers map */
+       struct cdnsp_cap_regs __iomem *cap_regs;
+       struct cdnsp_op_regs __iomem *op_regs;
+       struct cdnsp_run_regs __iomem *run_regs;
+       struct cdnsp_doorbell_array __iomem *dba;
+       struct  cdnsp_intr_reg __iomem *ir_set;
+       struct cdnsp_20port_cap __iomem *port20_regs;
+       struct cdnsp_3xport_cap __iomem *port3x_regs;
+       struct cdnsp_rev_cap __iomem *rev_cap;
+
+       /* Cached register copies of read-only CDNSP data */
+       __u32 hcs_params1;
+       __u32 hcs_params3;
+       __u32 hcc_params;
+       /* Lock used in interrupt thread context. */
+       spinlock_t lock;
+       struct usb_ctrlrequest setup;
+       struct cdnsp_request ep0_preq;
+       enum cdnsp_ep0_stage ep0_stage;
+       u8 three_stage_setup;
+       u8 ep0_expect_in;
+       u8 setup_id;
+       u8 setup_speed;
+       void *setup_buf;
+       u8 device_address;
+       int may_wakeup;
+       u16 hci_version;
+
+       /* data structures */
+       struct cdnsp_device_context_array *dcbaa;
+       struct cdnsp_ring *cmd_ring;
+       struct cdnsp_command cmd;
+       struct cdnsp_ring *event_ring;
+       struct cdnsp_erst erst;
+       int slot_id;
+
+       /*
+        * Commands to the hardware are passed an "input context" that
+        * tells the hardware what to change in its data structures.
+        * The hardware will return changes in an "output context" that
+        * software must allocate for the hardware. .
+        */
+       struct cdnsp_container_ctx out_ctx;
+       struct cdnsp_container_ctx in_ctx;
+       struct cdnsp_ep eps[CDNSP_ENDPOINTS_NUM];
+       u8 usb2_hw_lpm_capable:1;
+       u8 u1_allowed:1;
+       u8 u2_allowed:1;
+
+       /* DMA pools */
+       struct dma_pool *device_pool;
+       struct dma_pool *segment_pool;
+
+#define CDNSP_STATE_HALTED             BIT(1)
+#define CDNSP_STATE_DYING              BIT(2)
+#define CDNSP_STATE_DISCONNECT_PENDING BIT(3)
+#define CDNSP_WAKEUP_PENDING           BIT(4)
+       unsigned int cdnsp_state;
+       unsigned int link_state;
+
+       struct cdnsp_port usb2_port;
+       struct cdnsp_port usb3_port;
+       struct cdnsp_port *active_port;
+       u16 test_mode;
+};
+
+/*
+ * Registers should always be accessed with double word or quad word accesses.
+ *
+ * Registers with 64-bit address pointers should be written to with
+ * dword accesses by writing the low dword first (ptr[0]), then the high dword
+ * (ptr[1]) second. controller implementations that do not support 64-bit
+ * address pointers will ignore the high dword, and write order is irrelevant.
+ */
+static inline u64 cdnsp_read_64(__le64 __iomem *regs)
+{
+       return lo_hi_readq(regs);
+}
+
+static inline void cdnsp_write_64(const u64 val, __le64 __iomem *regs)
+{
+       lo_hi_writeq(val, regs);
+}
+
+/* CDNSP memory management functions. */
+void cdnsp_mem_cleanup(struct cdnsp_device *pdev);
+int cdnsp_mem_init(struct cdnsp_device *pdev);
+int cdnsp_setup_addressable_priv_dev(struct cdnsp_device *pdev);
+void cdnsp_copy_ep0_dequeue_into_input_ctx(struct cdnsp_device *pdev);
+void cdnsp_endpoint_zero(struct cdnsp_device *pdev, struct cdnsp_ep *ep);
+int cdnsp_endpoint_init(struct cdnsp_device *pdev,
+                       struct cdnsp_ep *pep,
+                       gfp_t mem_flags);
+int cdnsp_ring_expansion(struct cdnsp_device *pdev,
+                        struct cdnsp_ring *ring,
+                        unsigned int num_trbs, gfp_t flags);
+struct cdnsp_ring *cdnsp_dma_to_transfer_ring(struct cdnsp_ep *ep, u64 address);
+int cdnsp_alloc_stream_info(struct cdnsp_device *pdev,
+                           struct cdnsp_ep *pep,
+                           unsigned int num_stream_ctxs,
+                           unsigned int num_streams);
+int cdnsp_alloc_streams(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
+void cdnsp_free_endpoint_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
+
+/* Device controller glue. */
+int cdnsp_find_next_ext_cap(void __iomem *base, u32 start, int id);
+int cdnsp_halt(struct cdnsp_device *pdev);
+void cdnsp_died(struct cdnsp_device *pdev);
+int cdnsp_reset(struct cdnsp_device *pdev);
+irqreturn_t cdnsp_irq_handler(int irq, void *priv);
+int cdnsp_setup_device(struct cdnsp_device *pdev, enum cdnsp_setup_dev setup);
+void cdnsp_set_usb2_hardware_lpm(struct cdnsp_device *usbsssp_data,
+                                struct usb_request *req, int enable);
+irqreturn_t cdnsp_thread_irq_handler(int irq, void *data);
+
+/* Ring, segment, TRB, and TD functions. */
+dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg,
+                                union cdnsp_trb *trb);
+bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb);
+bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring,
+                           struct cdnsp_segment *seg,
+                           union cdnsp_trb *trb);
+int cdnsp_wait_for_cmd_compl(struct cdnsp_device *pdev);
+void cdnsp_update_erst_dequeue(struct cdnsp_device *pdev,
+                              union cdnsp_trb *event_ring_deq,
+                              u8 clear_ehb);
+void cdnsp_initialize_ring_info(struct cdnsp_ring *ring);
+void cdnsp_ring_cmd_db(struct cdnsp_device *pdev);
+void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type);
+void cdnsp_queue_address_device(struct cdnsp_device *pdev,
+                               dma_addr_t in_ctx_ptr,
+                               enum cdnsp_setup_dev setup);
+void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev,
+                              unsigned int ep_index);
+int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq);
+int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq);
+int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev,
+                               struct cdnsp_request *preq);
+void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
+                                   dma_addr_t in_ctx_ptr);
+void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index);
+void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev,
+                              unsigned int ep_index);
+void cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev,
+                               unsigned int ep_index);
+void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num);
+void cdnsp_queue_reset_device(struct cdnsp_device *pdev);
+void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev,
+                                  struct cdnsp_ep *pep,
+                                  struct cdnsp_dequeue_state *deq_state);
+void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev,
+                                         struct cdnsp_ep *pep);
+void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring);
+void cdnsp_set_link_state(struct cdnsp_device *pdev,
+                         __le32 __iomem *port_regs, u32 link_state);
+u32 cdnsp_port_state_to_neutral(u32 state);
+
+/* CDNSP device controller contexts. */
+int cdnsp_enable_slot(struct cdnsp_device *pdev);
+int cdnsp_disable_slot(struct cdnsp_device *pdev);
+struct cdnsp_input_control_ctx
+       *cdnsp_get_input_control_ctx(struct cdnsp_container_ctx *ctx);
+struct cdnsp_slot_ctx *cdnsp_get_slot_ctx(struct cdnsp_container_ctx *ctx);
+struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx,
+                                     unsigned int ep_index);
+/* CDNSP gadget interface. */
+void cdnsp_suspend_gadget(struct cdnsp_device *pdev);
+void cdnsp_resume_gadget(struct cdnsp_device *pdev);
+void cdnsp_disconnect_gadget(struct cdnsp_device *pdev);
+void cdnsp_gadget_giveback(struct cdnsp_ep *pep, struct cdnsp_request *preq,
+                          int status);
+int cdnsp_ep_enqueue(struct cdnsp_ep *pep, struct cdnsp_request *preq);
+int cdnsp_ep_dequeue(struct cdnsp_ep *pep, struct cdnsp_request *preq);
+unsigned int cdnsp_port_speed(unsigned int port_status);
+void cdnsp_irq_reset(struct cdnsp_device *pdev);
+int cdnsp_halt_endpoint(struct cdnsp_device *pdev,
+                       struct cdnsp_ep *pep, int value);
+int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
+int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep);
+void cdnsp_setup_analyze(struct cdnsp_device *pdev);
+int cdnsp_status_stage(struct cdnsp_device *pdev);
+int cdnsp_reset_device(struct cdnsp_device *pdev);
+
+/**
+ * next_request - gets the next request on the given list
+ * @list: the request list to operate on
+ *
+ * Caller should take care of locking. This function return NULL or the first
+ * request available on list.
+ */
+static inline struct cdnsp_request *next_request(struct list_head *list)
+{
+       return list_first_entry_or_null(list, struct cdnsp_request, list);
+}
+
+#define to_cdnsp_ep(ep) (container_of(ep, struct cdnsp_ep, endpoint))
+#define gadget_to_cdnsp(g) (container_of(g, struct cdnsp_device, gadget))
+#define request_to_cdnsp_request(r) (container_of(r, struct cdnsp_request, \
+                                    request))
+#define to_cdnsp_request(r) (container_of(r, struct cdnsp_request, request))
+int cdnsp_remove_request(struct cdnsp_device *pdev, struct cdnsp_request *preq,
+                        struct cdnsp_ep *pep);
+
+#endif /* __LINUX_CDNSP_GADGET_H */
diff --git a/drivers/usb/cdns3/cdnsp-mem.c b/drivers/usb/cdns3/cdnsp-mem.c
new file mode 100644 (file)
index 0000000..7a84e92
--- /dev/null
@@ -0,0 +1,1336 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ * Code based on Linux XHCI driver.
+ * Origin: Copyright (C) 2008 Intel Corp.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/slab.h>
+#include <linux/usb.h>
+
+#include "cdnsp-gadget.h"
+#include "cdnsp-trace.h"
+
+static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
+                                  struct cdnsp_ep *pep);
+/*
+ * Allocates a generic ring segment from the ring pool, sets the dma address,
+ * initializes the segment to zero, and sets the private next pointer to NULL.
+ *
+ * "All components of all Command and Transfer TRBs shall be initialized to '0'"
+ */
+static struct cdnsp_segment *cdnsp_segment_alloc(struct cdnsp_device *pdev,
+                                                unsigned int cycle_state,
+                                                unsigned int max_packet,
+                                                gfp_t flags)
+{
+       struct cdnsp_segment *seg;
+       dma_addr_t dma;
+       int i;
+
+       seg = kzalloc(sizeof(*seg), flags);
+       if (!seg)
+               return NULL;
+
+       seg->trbs = dma_pool_zalloc(pdev->segment_pool, flags, &dma);
+       if (!seg->trbs) {
+               kfree(seg);
+               return NULL;
+       }
+
+       if (max_packet) {
+               seg->bounce_buf = kzalloc(max_packet, flags | GFP_DMA);
+               if (!seg->bounce_buf)
+                       goto free_dma;
+       }
+
+       /* If the cycle state is 0, set the cycle bit to 1 for all the TRBs. */
+       if (cycle_state == 0) {
+               for (i = 0; i < TRBS_PER_SEGMENT; i++)
+                       seg->trbs[i].link.control |= cpu_to_le32(TRB_CYCLE);
+       }
+       seg->dma = dma;
+       seg->next = NULL;
+
+       return seg;
+
+free_dma:
+       dma_pool_free(pdev->segment_pool, seg->trbs, dma);
+       kfree(seg);
+
+       return NULL;
+}
+
+static void cdnsp_segment_free(struct cdnsp_device *pdev,
+                              struct cdnsp_segment *seg)
+{
+       if (seg->trbs)
+               dma_pool_free(pdev->segment_pool, seg->trbs, seg->dma);
+
+       kfree(seg->bounce_buf);
+       kfree(seg);
+}
+
+static void cdnsp_free_segments_for_ring(struct cdnsp_device *pdev,
+                                        struct cdnsp_segment *first)
+{
+       struct cdnsp_segment *seg;
+
+       seg = first->next;
+
+       while (seg != first) {
+               struct cdnsp_segment *next = seg->next;
+
+               cdnsp_segment_free(pdev, seg);
+               seg = next;
+       }
+
+       cdnsp_segment_free(pdev, first);
+}
+
+/*
+ * Make the prev segment point to the next segment.
+ *
+ * Change the last TRB in the prev segment to be a Link TRB which points to the
+ * DMA address of the next segment. The caller needs to set any Link TRB
+ * related flags, such as End TRB, Toggle Cycle, and no snoop.
+ */
+static void cdnsp_link_segments(struct cdnsp_device *pdev,
+                               struct cdnsp_segment *prev,
+                               struct cdnsp_segment *next,
+                               enum cdnsp_ring_type type)
+{
+       struct cdnsp_link_trb *link;
+       u32 val;
+
+       if (!prev || !next)
+               return;
+
+       prev->next = next;
+       if (type != TYPE_EVENT) {
+               link = &prev->trbs[TRBS_PER_SEGMENT - 1].link;
+               link->segment_ptr = cpu_to_le64(next->dma);
+
+               /*
+                * Set the last TRB in the segment to have a TRB type ID
+                * of Link TRB
+                */
+               val = le32_to_cpu(link->control);
+               val &= ~TRB_TYPE_BITMASK;
+               val |= TRB_TYPE(TRB_LINK);
+               link->control = cpu_to_le32(val);
+       }
+}
+
+/*
+ * Link the ring to the new segments.
+ * Set Toggle Cycle for the new ring if needed.
+ */
+static void cdnsp_link_rings(struct cdnsp_device *pdev,
+                            struct cdnsp_ring *ring,
+                            struct cdnsp_segment *first,
+                            struct cdnsp_segment *last,
+                            unsigned int num_segs)
+{
+       struct cdnsp_segment *next;
+
+       if (!ring || !first || !last)
+               return;
+
+       next = ring->enq_seg->next;
+       cdnsp_link_segments(pdev, ring->enq_seg, first, ring->type);
+       cdnsp_link_segments(pdev, last, next, ring->type);
+       ring->num_segs += num_segs;
+       ring->num_trbs_free += (TRBS_PER_SEGMENT - 1) * num_segs;
+
+       if (ring->type != TYPE_EVENT && ring->enq_seg == ring->last_seg) {
+               ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control &=
+                       ~cpu_to_le32(LINK_TOGGLE);
+               last->trbs[TRBS_PER_SEGMENT - 1].link.control |=
+                       cpu_to_le32(LINK_TOGGLE);
+               ring->last_seg = last;
+       }
+}
+
+/*
+ * We need a radix tree for mapping physical addresses of TRBs to which stream
+ * ID they belong to. We need to do this because the device controller won't
+ * tell us which stream ring the TRB came from. We could store the stream ID
+ * in an event data TRB, but that doesn't help us for the cancellation case,
+ * since the endpoint may stop before it reaches that event data TRB.
+ *
+ * The radix tree maps the upper portion of the TRB DMA address to a ring
+ * segment that has the same upper portion of DMA addresses. For example,
+ * say I have segments of size 1KB, that are always 1KB aligned. A segment may
+ * start at 0x10c91000 and end at 0x10c913f0. If I use the upper 10 bits, the
+ * key to the stream ID is 0x43244. I can use the DMA address of the TRB to
+ * pass the radix tree a key to get the right stream ID:
+ *
+ *     0x10c90fff >> 10 = 0x43243
+ *     0x10c912c0 >> 10 = 0x43244
+ *     0x10c91400 >> 10 = 0x43245
+ *
+ * Obviously, only those TRBs with DMA addresses that are within the segment
+ * will make the radix tree return the stream ID for that ring.
+ *
+ * Caveats for the radix tree:
+ *
+ * The radix tree uses an unsigned long as a key pair. On 32-bit systems, an
+ * unsigned long will be 32-bits; on a 64-bit system an unsigned long will be
+ * 64-bits. Since we only request 32-bit DMA addresses, we can use that as the
+ * key on 32-bit or 64-bit systems (it would also be fine if we asked for 64-bit
+ * PCI DMA addresses on a 64-bit system). There might be a problem on 32-bit
+ * extended systems (where the DMA address can be bigger than 32-bits),
+ * if we allow the PCI dma mask to be bigger than 32-bits. So don't do that.
+ */
+static int cdnsp_insert_segment_mapping(struct radix_tree_root *trb_address_map,
+                                       struct cdnsp_ring *ring,
+                                       struct cdnsp_segment *seg,
+                                       gfp_t mem_flags)
+{
+       unsigned long key;
+       int ret;
+
+       key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
+
+       /* Skip any segments that were already added. */
+       if (radix_tree_lookup(trb_address_map, key))
+               return 0;
+
+       ret = radix_tree_maybe_preload(mem_flags);
+       if (ret)
+               return ret;
+
+       ret = radix_tree_insert(trb_address_map, key, ring);
+       radix_tree_preload_end();
+
+       return ret;
+}
+
+static void cdnsp_remove_segment_mapping(struct radix_tree_root *trb_address_map,
+                                        struct cdnsp_segment *seg)
+{
+       unsigned long key;
+
+       key = (unsigned long)(seg->dma >> TRB_SEGMENT_SHIFT);
+       if (radix_tree_lookup(trb_address_map, key))
+               radix_tree_delete(trb_address_map, key);
+}
+
+static int cdnsp_update_stream_segment_mapping(struct radix_tree_root *trb_address_map,
+                                              struct cdnsp_ring *ring,
+                                              struct cdnsp_segment *first_seg,
+                                              struct cdnsp_segment *last_seg,
+                                              gfp_t mem_flags)
+{
+       struct cdnsp_segment *failed_seg;
+       struct cdnsp_segment *seg;
+       int ret;
+
+       seg = first_seg;
+       do {
+               ret = cdnsp_insert_segment_mapping(trb_address_map, ring, seg,
+                                                  mem_flags);
+               if (ret)
+                       goto remove_streams;
+               if (seg == last_seg)
+                       return 0;
+               seg = seg->next;
+       } while (seg != first_seg);
+
+       return 0;
+
+remove_streams:
+       failed_seg = seg;
+       seg = first_seg;
+       do {
+               cdnsp_remove_segment_mapping(trb_address_map, seg);
+               if (seg == failed_seg)
+                       return ret;
+               seg = seg->next;
+       } while (seg != first_seg);
+
+       return ret;
+}
+
+static void cdnsp_remove_stream_mapping(struct cdnsp_ring *ring)
+{
+       struct cdnsp_segment *seg;
+
+       seg = ring->first_seg;
+       do {
+               cdnsp_remove_segment_mapping(ring->trb_address_map, seg);
+               seg = seg->next;
+       } while (seg != ring->first_seg);
+}
+
+static int cdnsp_update_stream_mapping(struct cdnsp_ring *ring)
+{
+       return cdnsp_update_stream_segment_mapping(ring->trb_address_map, ring,
+                       ring->first_seg, ring->last_seg, GFP_ATOMIC);
+}
+
+static void cdnsp_ring_free(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
+{
+       if (!ring)
+               return;
+
+       trace_cdnsp_ring_free(ring);
+
+       if (ring->first_seg) {
+               if (ring->type == TYPE_STREAM)
+                       cdnsp_remove_stream_mapping(ring);
+
+               cdnsp_free_segments_for_ring(pdev, ring->first_seg);
+       }
+
+       kfree(ring);
+}
+
+void cdnsp_initialize_ring_info(struct cdnsp_ring *ring)
+{
+       ring->enqueue = ring->first_seg->trbs;
+       ring->enq_seg = ring->first_seg;
+       ring->dequeue = ring->enqueue;
+       ring->deq_seg = ring->first_seg;
+
+       /*
+        * The ring is initialized to 0. The producer must write 1 to the cycle
+        * bit to handover ownership of the TRB, so PCS = 1. The consumer must
+        * compare CCS to the cycle bit to check ownership, so CCS = 1.
+        *
+        * New rings are initialized with cycle state equal to 1; if we are
+        * handling ring expansion, set the cycle state equal to the old ring.
+        */
+       ring->cycle_state = 1;
+
+       /*
+        * Each segment has a link TRB, and leave an extra TRB for SW
+        * accounting purpose
+        */
+       ring->num_trbs_free = ring->num_segs * (TRBS_PER_SEGMENT - 1) - 1;
+}
+
+/* Allocate segments and link them for a ring. */
+static int cdnsp_alloc_segments_for_ring(struct cdnsp_device *pdev,
+                                        struct cdnsp_segment **first,
+                                        struct cdnsp_segment **last,
+                                        unsigned int num_segs,
+                                        unsigned int cycle_state,
+                                        enum cdnsp_ring_type type,
+                                        unsigned int max_packet,
+                                        gfp_t flags)
+{
+       struct cdnsp_segment *prev;
+
+       /* Allocate first segment. */
+       prev = cdnsp_segment_alloc(pdev, cycle_state, max_packet, flags);
+       if (!prev)
+               return -ENOMEM;
+
+       num_segs--;
+       *first = prev;
+
+       /* Allocate all other segments. */
+       while (num_segs > 0) {
+               struct cdnsp_segment    *next;
+
+               next = cdnsp_segment_alloc(pdev, cycle_state,
+                                          max_packet, flags);
+               if (!next) {
+                       cdnsp_free_segments_for_ring(pdev, *first);
+                       return -ENOMEM;
+               }
+
+               cdnsp_link_segments(pdev, prev, next, type);
+
+               prev = next;
+               num_segs--;
+       }
+
+       cdnsp_link_segments(pdev, prev, *first, type);
+       *last = prev;
+
+       return 0;
+}
+
+/*
+ * Create a new ring with zero or more segments.
+ *
+ * Link each segment together into a ring.
+ * Set the end flag and the cycle toggle bit on the last segment.
+ */
+static struct cdnsp_ring *cdnsp_ring_alloc(struct cdnsp_device *pdev,
+                                          unsigned int num_segs,
+                                          enum cdnsp_ring_type type,
+                                          unsigned int max_packet,
+                                          gfp_t flags)
+{
+       struct cdnsp_ring *ring;
+       int ret;
+
+       ring = kzalloc(sizeof *(ring), flags);
+       if (!ring)
+               return NULL;
+
+       ring->num_segs = num_segs;
+       ring->bounce_buf_len = max_packet;
+       INIT_LIST_HEAD(&ring->td_list);
+       ring->type = type;
+
+       if (num_segs == 0)
+               return ring;
+
+       ret = cdnsp_alloc_segments_for_ring(pdev, &ring->first_seg,
+                                           &ring->last_seg, num_segs,
+                                           1, type, max_packet, flags);
+       if (ret)
+               goto fail;
+
+       /* Only event ring does not use link TRB. */
+       if (type != TYPE_EVENT)
+               ring->last_seg->trbs[TRBS_PER_SEGMENT - 1].link.control |=
+                       cpu_to_le32(LINK_TOGGLE);
+
+       cdnsp_initialize_ring_info(ring);
+       trace_cdnsp_ring_alloc(ring);
+       return ring;
+fail:
+       kfree(ring);
+       return NULL;
+}
+
+void cdnsp_free_endpoint_rings(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+       cdnsp_ring_free(pdev, pep->ring);
+       pep->ring = NULL;
+       cdnsp_free_stream_info(pdev, pep);
+}
+
+/*
+ * Expand an existing ring.
+ * Allocate a new ring which has same segment numbers and link the two rings.
+ */
+int cdnsp_ring_expansion(struct cdnsp_device *pdev,
+                        struct cdnsp_ring *ring,
+                        unsigned int num_trbs,
+                        gfp_t flags)
+{
+       unsigned int num_segs_needed;
+       struct cdnsp_segment *first;
+       struct cdnsp_segment *last;
+       unsigned int num_segs;
+       int ret;
+
+       num_segs_needed = (num_trbs + (TRBS_PER_SEGMENT - 1) - 1) /
+                       (TRBS_PER_SEGMENT - 1);
+
+       /* Allocate number of segments we needed, or double the ring size. */
+       num_segs = max(ring->num_segs, num_segs_needed);
+
+       ret = cdnsp_alloc_segments_for_ring(pdev, &first, &last, num_segs,
+                                           ring->cycle_state, ring->type,
+                                           ring->bounce_buf_len, flags);
+       if (ret)
+               return -ENOMEM;
+
+       if (ring->type == TYPE_STREAM)
+               ret = cdnsp_update_stream_segment_mapping(ring->trb_address_map,
+                                                         ring, first,
+                                                         last, flags);
+
+       if (ret) {
+               cdnsp_free_segments_for_ring(pdev, first);
+
+               return ret;
+       }
+
+       cdnsp_link_rings(pdev, ring, first, last, num_segs);
+       trace_cdnsp_ring_expansion(ring);
+
+       return 0;
+}
+
+static int cdnsp_init_device_ctx(struct cdnsp_device *pdev)
+{
+       int size = HCC_64BYTE_CONTEXT(pdev->hcc_params) ? 2048 : 1024;
+
+       pdev->out_ctx.type = CDNSP_CTX_TYPE_DEVICE;
+       pdev->out_ctx.size = size;
+       pdev->out_ctx.ctx_size = CTX_SIZE(pdev->hcc_params);
+       pdev->out_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
+                                             &pdev->out_ctx.dma);
+
+       if (!pdev->out_ctx.bytes)
+               return -ENOMEM;
+
+       pdev->in_ctx.type = CDNSP_CTX_TYPE_INPUT;
+       pdev->in_ctx.ctx_size = pdev->out_ctx.ctx_size;
+       pdev->in_ctx.size = size + pdev->out_ctx.ctx_size;
+       pdev->in_ctx.bytes = dma_pool_zalloc(pdev->device_pool, GFP_ATOMIC,
+                                            &pdev->in_ctx.dma);
+
+       if (!pdev->in_ctx.bytes) {
+               dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
+                             pdev->out_ctx.dma);
+               return -ENOMEM;
+       }
+
+       return 0;
+}
+
+struct cdnsp_input_control_ctx
+       *cdnsp_get_input_control_ctx(struct cdnsp_container_ctx *ctx)
+{
+       if (ctx->type != CDNSP_CTX_TYPE_INPUT)
+               return NULL;
+
+       return (struct cdnsp_input_control_ctx *)ctx->bytes;
+}
+
+struct cdnsp_slot_ctx *cdnsp_get_slot_ctx(struct cdnsp_container_ctx *ctx)
+{
+       if (ctx->type == CDNSP_CTX_TYPE_DEVICE)
+               return (struct cdnsp_slot_ctx *)ctx->bytes;
+
+       return (struct cdnsp_slot_ctx *)(ctx->bytes + ctx->ctx_size);
+}
+
+struct cdnsp_ep_ctx *cdnsp_get_ep_ctx(struct cdnsp_container_ctx *ctx,
+                                     unsigned int ep_index)
+{
+       /* Increment ep index by offset of start of ep ctx array. */
+       ep_index++;
+       if (ctx->type == CDNSP_CTX_TYPE_INPUT)
+               ep_index++;
+
+       return (struct cdnsp_ep_ctx *)(ctx->bytes + (ep_index * ctx->ctx_size));
+}
+
+static void cdnsp_free_stream_ctx(struct cdnsp_device *pdev,
+                                 struct cdnsp_ep *pep)
+{
+       dma_pool_free(pdev->device_pool, pep->stream_info.stream_ctx_array,
+                     pep->stream_info.ctx_array_dma);
+}
+
+/* The stream context array must be a power of 2. */
+static struct cdnsp_stream_ctx
+       *cdnsp_alloc_stream_ctx(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+       size_t size = sizeof(struct cdnsp_stream_ctx) *
+                     pep->stream_info.num_stream_ctxs;
+
+       if (size > CDNSP_CTX_SIZE)
+               return NULL;
+
+       /**
+        * Driver uses intentionally the device_pool to allocated stream
+        * context array. Device Pool has 2048 bytes of size what gives us
+        * 128 entries.
+        */
+       return dma_pool_zalloc(pdev->device_pool, GFP_DMA32 | GFP_ATOMIC,
+                              &pep->stream_info.ctx_array_dma);
+}
+
+struct cdnsp_ring *cdnsp_dma_to_transfer_ring(struct cdnsp_ep *pep, u64 address)
+{
+       if (pep->ep_state & EP_HAS_STREAMS)
+               return radix_tree_lookup(&pep->stream_info.trb_address_map,
+                                        address >> TRB_SEGMENT_SHIFT);
+
+       return pep->ring;
+}
+
+/*
+ * Change an endpoint's internal structure so it supports stream IDs.
+ * The number of requested streams includes stream 0, which cannot be used by
+ * driver.
+ *
+ * The number of stream contexts in the stream context array may be bigger than
+ * the number of streams the driver wants to use. This is because the number of
+ * stream context array entries must be a power of two.
+ */
+int cdnsp_alloc_stream_info(struct cdnsp_device *pdev,
+                           struct cdnsp_ep *pep,
+                           unsigned int num_stream_ctxs,
+                           unsigned int num_streams)
+{
+       struct cdnsp_stream_info *stream_info;
+       struct cdnsp_ring *cur_ring;
+       u32 cur_stream;
+       u64 addr;
+       int ret;
+       int mps;
+
+       stream_info = &pep->stream_info;
+       stream_info->num_streams = num_streams;
+       stream_info->num_stream_ctxs = num_stream_ctxs;
+
+       /* Initialize the array of virtual pointers to stream rings. */
+       stream_info->stream_rings = kcalloc(num_streams,
+                                           sizeof(struct cdnsp_ring *),
+                                           GFP_ATOMIC);
+       if (!stream_info->stream_rings)
+               return -ENOMEM;
+
+       /* Initialize the array of DMA addresses for stream rings for the HW. */
+       stream_info->stream_ctx_array = cdnsp_alloc_stream_ctx(pdev, pep);
+       if (!stream_info->stream_ctx_array)
+               goto cleanup_stream_rings;
+
+       memset(stream_info->stream_ctx_array, 0,
+              sizeof(struct cdnsp_stream_ctx) * num_stream_ctxs);
+       INIT_RADIX_TREE(&stream_info->trb_address_map, GFP_ATOMIC);
+       mps = usb_endpoint_maxp(pep->endpoint.desc);
+
+       /*
+        * Allocate rings for all the streams that the driver will use,
+        * and add their segment DMA addresses to the radix tree.
+        * Stream 0 is reserved.
+        */
+       for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+               cur_ring = cdnsp_ring_alloc(pdev, 2, TYPE_STREAM, mps,
+                                           GFP_ATOMIC);
+               stream_info->stream_rings[cur_stream] = cur_ring;
+
+               if (!cur_ring)
+                       goto cleanup_rings;
+
+               cur_ring->stream_id = cur_stream;
+               cur_ring->trb_address_map = &stream_info->trb_address_map;
+
+               /* Set deq ptr, cycle bit, and stream context type. */
+               addr = cur_ring->first_seg->dma | SCT_FOR_CTX(SCT_PRI_TR) |
+                      cur_ring->cycle_state;
+
+               stream_info->stream_ctx_array[cur_stream].stream_ring =
+                       cpu_to_le64(addr);
+
+               trace_cdnsp_set_stream_ring(cur_ring);
+
+               ret = cdnsp_update_stream_mapping(cur_ring);
+               if (ret)
+                       goto cleanup_rings;
+       }
+
+       return 0;
+
+cleanup_rings:
+       for (cur_stream = 1; cur_stream < num_streams; cur_stream++) {
+               cur_ring = stream_info->stream_rings[cur_stream];
+               if (cur_ring) {
+                       cdnsp_ring_free(pdev, cur_ring);
+                       stream_info->stream_rings[cur_stream] = NULL;
+               }
+       }
+
+cleanup_stream_rings:
+       kfree(pep->stream_info.stream_rings);
+
+       return -ENOMEM;
+}
+
+/* Frees all stream contexts associated with the endpoint. */
+static void cdnsp_free_stream_info(struct cdnsp_device *pdev,
+                                  struct cdnsp_ep *pep)
+{
+       struct cdnsp_stream_info *stream_info = &pep->stream_info;
+       struct cdnsp_ring *cur_ring;
+       int cur_stream;
+
+       if (!(pep->ep_state & EP_HAS_STREAMS))
+               return;
+
+       for (cur_stream = 1; cur_stream < stream_info->num_streams;
+            cur_stream++) {
+               cur_ring = stream_info->stream_rings[cur_stream];
+               if (cur_ring) {
+                       cdnsp_ring_free(pdev, cur_ring);
+                       stream_info->stream_rings[cur_stream] = NULL;
+               }
+       }
+
+       if (stream_info->stream_ctx_array)
+               cdnsp_free_stream_ctx(pdev, pep);
+
+       kfree(stream_info->stream_rings);
+       pep->ep_state &= ~EP_HAS_STREAMS;
+}
+
+/* All the cdnsp_tds in the ring's TD list should be freed at this point.*/
+static void cdnsp_free_priv_device(struct cdnsp_device *pdev)
+{
+       pdev->dcbaa->dev_context_ptrs[1] = 0;
+
+       cdnsp_free_endpoint_rings(pdev, &pdev->eps[0]);
+
+       if (pdev->in_ctx.bytes)
+               dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
+                             pdev->in_ctx.dma);
+
+       if (pdev->out_ctx.bytes)
+               dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
+                             pdev->out_ctx.dma);
+
+       pdev->in_ctx.bytes = NULL;
+       pdev->out_ctx.bytes = NULL;
+}
+
+static int cdnsp_alloc_priv_device(struct cdnsp_device *pdev)
+{
+       int ret = -ENOMEM;
+
+       ret = cdnsp_init_device_ctx(pdev);
+       if (ret)
+               return ret;
+
+       /* Allocate endpoint 0 ring. */
+       pdev->eps[0].ring = cdnsp_ring_alloc(pdev, 2, TYPE_CTRL, 0, GFP_ATOMIC);
+       if (!pdev->eps[0].ring)
+               goto fail;
+
+       /* Point to output device context in dcbaa. */
+       pdev->dcbaa->dev_context_ptrs[1] = cpu_to_le64(pdev->out_ctx.dma);
+       pdev->cmd.in_ctx = &pdev->in_ctx;
+
+       trace_cdnsp_alloc_priv_device(pdev);
+       return 0;
+fail:
+       dma_pool_free(pdev->device_pool, pdev->out_ctx.bytes,
+                     pdev->out_ctx.dma);
+       dma_pool_free(pdev->device_pool, pdev->in_ctx.bytes,
+                     pdev->in_ctx.dma);
+
+       return ret;
+}
+
+void cdnsp_copy_ep0_dequeue_into_input_ctx(struct cdnsp_device *pdev)
+{
+       struct cdnsp_ep_ctx *ep0_ctx = pdev->eps[0].in_ctx;
+       struct cdnsp_ring *ep_ring = pdev->eps[0].ring;
+       dma_addr_t dma;
+
+       dma = cdnsp_trb_virt_to_dma(ep_ring->enq_seg, ep_ring->enqueue);
+       ep0_ctx->deq = cpu_to_le64(dma | ep_ring->cycle_state);
+}
+
+/* Setup an controller private device for a Set Address command. */
+int cdnsp_setup_addressable_priv_dev(struct cdnsp_device *pdev)
+{
+       struct cdnsp_slot_ctx *slot_ctx;
+       struct cdnsp_ep_ctx *ep0_ctx;
+       u32 max_packets, port;
+
+       ep0_ctx = cdnsp_get_ep_ctx(&pdev->in_ctx, 0);
+       slot_ctx = cdnsp_get_slot_ctx(&pdev->in_ctx);
+
+       /* Only the control endpoint is valid - one endpoint context. */
+       slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1));
+
+       switch (pdev->gadget.speed) {
+       case USB_SPEED_SUPER_PLUS:
+               slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SSP);
+               max_packets = MAX_PACKET(512);
+               break;
+       case USB_SPEED_SUPER:
+               slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_SS);
+               max_packets = MAX_PACKET(512);
+               break;
+       case USB_SPEED_HIGH:
+               slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_HS);
+               max_packets = MAX_PACKET(64);
+               break;
+       case USB_SPEED_FULL:
+               slot_ctx->dev_info |= cpu_to_le32(SLOT_SPEED_FS);
+               max_packets = MAX_PACKET(64);
+               break;
+       default:
+               /* Speed was not set , this shouldn't happen. */
+               return -EINVAL;
+       }
+
+       port = DEV_PORT(pdev->active_port->port_num);
+       slot_ctx->dev_port |= cpu_to_le32(port);
+       slot_ctx->dev_state = cpu_to_le32((pdev->device_address &
+                                          DEV_ADDR_MASK));
+       ep0_ctx->tx_info = cpu_to_le32(EP_AVG_TRB_LENGTH(0x8));
+       ep0_ctx->ep_info2 = cpu_to_le32(EP_TYPE(CTRL_EP));
+       ep0_ctx->ep_info2 |= cpu_to_le32(MAX_BURST(0) | ERROR_COUNT(3) |
+                                        max_packets);
+
+       ep0_ctx->deq = cpu_to_le64(pdev->eps[0].ring->first_seg->dma |
+                                  pdev->eps[0].ring->cycle_state);
+
+       trace_cdnsp_setup_addressable_priv_device(pdev);
+
+       return 0;
+}
+
+/*
+ * Convert interval expressed as 2^(bInterval - 1) == interval into
+ * straight exponent value 2^n == interval.
+ */
+static unsigned int cdnsp_parse_exponent_interval(struct usb_gadget *g,
+                                                 struct cdnsp_ep *pep)
+{
+       unsigned int interval;
+
+       interval = clamp_val(pep->endpoint.desc->bInterval, 1, 16) - 1;
+       if (interval != pep->endpoint.desc->bInterval - 1)
+               dev_warn(&g->dev, "ep %s - rounding interval to %d %sframes\n",
+                        pep->name, 1 << interval,
+                        g->speed == USB_SPEED_FULL ? "" : "micro");
+
+       /*
+        * Full speed isoc endpoints specify interval in frames,
+        * not microframes. We are using microframes everywhere,
+        * so adjust accordingly.
+        */
+       if (g->speed == USB_SPEED_FULL)
+               interval += 3;  /* 1 frame = 2^3 uframes */
+
+       /* Controller handles only up to 512ms (2^12). */
+       if (interval > 12)
+               interval = 12;
+
+       return interval;
+}
+
+/*
+ * Convert bInterval expressed in microframes (in 1-255 range) to exponent of
+ * microframes, rounded down to nearest power of 2.
+ */
+static unsigned int cdnsp_microframes_to_exponent(struct usb_gadget *g,
+                                                 struct cdnsp_ep *pep,
+                                                 unsigned int desc_interval,
+                                                 unsigned int min_exponent,
+                                                 unsigned int max_exponent)
+{
+       unsigned int interval;
+
+       interval = fls(desc_interval) - 1;
+       return clamp_val(interval, min_exponent, max_exponent);
+}
+
+/*
+ * Return the polling interval.
+ *
+ * The polling interval is expressed in "microframes". If controllers's Interval
+ * field is set to N, it will service the endpoint every 2^(Interval)*125us.
+ */
+static unsigned int cdnsp_get_endpoint_interval(struct usb_gadget *g,
+                                               struct cdnsp_ep *pep)
+{
+       unsigned int interval = 0;
+
+       switch (g->speed) {
+       case USB_SPEED_HIGH:
+       case USB_SPEED_SUPER_PLUS:
+       case USB_SPEED_SUPER:
+               if (usb_endpoint_xfer_int(pep->endpoint.desc) ||
+                   usb_endpoint_xfer_isoc(pep->endpoint.desc))
+                       interval = cdnsp_parse_exponent_interval(g, pep);
+               break;
+       case USB_SPEED_FULL:
+               if (usb_endpoint_xfer_isoc(pep->endpoint.desc)) {
+                       interval = cdnsp_parse_exponent_interval(g, pep);
+               } else if (usb_endpoint_xfer_int(pep->endpoint.desc)) {
+                       interval = pep->endpoint.desc->bInterval << 3;
+                       interval = cdnsp_microframes_to_exponent(g, pep,
+                                                                interval,
+                                                                3, 10);
+               }
+
+               break;
+       default:
+               WARN_ON(1);
+       }
+
+       return interval;
+}
+
+/*
+ * The "Mult" field in the endpoint context is only set for SuperSpeed isoc eps.
+ * High speed endpoint descriptors can define "the number of additional
+ * transaction opportunities per microframe", but that goes in the Max Burst
+ * endpoint context field.
+ */
+static u32 cdnsp_get_endpoint_mult(struct usb_gadget *g, struct cdnsp_ep *pep)
+{
+       if (g->speed < USB_SPEED_SUPER ||
+           !usb_endpoint_xfer_isoc(pep->endpoint.desc))
+               return 0;
+
+       return pep->endpoint.comp_desc->bmAttributes;
+}
+
+static u32 cdnsp_get_endpoint_max_burst(struct usb_gadget *g,
+                                       struct cdnsp_ep *pep)
+{
+       /* Super speed and Plus have max burst in ep companion desc */
+       if (g->speed >= USB_SPEED_SUPER)
+               return pep->endpoint.comp_desc->bMaxBurst;
+
+       if (g->speed == USB_SPEED_HIGH &&
+           (usb_endpoint_xfer_isoc(pep->endpoint.desc) ||
+            usb_endpoint_xfer_int(pep->endpoint.desc)))
+               return (usb_endpoint_maxp(pep->endpoint.desc) & 0x1800) >> 11;
+
+       return 0;
+}
+
+static u32 cdnsp_get_endpoint_type(const struct usb_endpoint_descriptor *desc)
+{
+       int in;
+
+       in = usb_endpoint_dir_in(desc);
+
+       switch (usb_endpoint_type(desc)) {
+       case USB_ENDPOINT_XFER_CONTROL:
+               return CTRL_EP;
+       case USB_ENDPOINT_XFER_BULK:
+               return in ? BULK_IN_EP : BULK_OUT_EP;
+       case USB_ENDPOINT_XFER_ISOC:
+               return in ? ISOC_IN_EP : ISOC_OUT_EP;
+       case USB_ENDPOINT_XFER_INT:
+               return in ? INT_IN_EP : INT_OUT_EP;
+       }
+
+       return 0;
+}
+
+/*
+ * Return the maximum endpoint service interval time (ESIT) payload.
+ * Basically, this is the maxpacket size, multiplied by the burst size
+ * and mult size.
+ */
+static u32 cdnsp_get_max_esit_payload(struct usb_gadget *g,
+                                     struct cdnsp_ep *pep)
+{
+       int max_packet;
+       int max_burst;
+
+       /* Only applies for interrupt or isochronous endpoints*/
+       if (usb_endpoint_xfer_control(pep->endpoint.desc) ||
+           usb_endpoint_xfer_bulk(pep->endpoint.desc))
+               return 0;
+
+       /* SuperSpeedPlus Isoc ep sending over 48k per EIST. */
+       if (g->speed >= USB_SPEED_SUPER_PLUS &&
+           USB_SS_SSP_ISOC_COMP(pep->endpoint.desc->bmAttributes))
+               return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
+       /* SuperSpeed or SuperSpeedPlus Isoc ep with less than 48k per esit */
+       else if (g->speed >= USB_SPEED_SUPER)
+               return le16_to_cpu(pep->endpoint.comp_desc->wBytesPerInterval);
+
+       max_packet = usb_endpoint_maxp(pep->endpoint.desc);
+       max_burst = usb_endpoint_maxp_mult(pep->endpoint.desc);
+
+       /* A 0 in max burst means 1 transfer per ESIT */
+       return max_packet * max_burst;
+}
+
+int cdnsp_endpoint_init(struct cdnsp_device *pdev,
+                       struct cdnsp_ep *pep,
+                       gfp_t mem_flags)
+{
+       enum cdnsp_ring_type ring_type;
+       struct cdnsp_ep_ctx *ep_ctx;
+       unsigned int err_count = 0;
+       unsigned int avg_trb_len;
+       unsigned int max_packet;
+       unsigned int max_burst;
+       unsigned int interval;
+       u32 max_esit_payload;
+       unsigned int mult;
+       u32 endpoint_type;
+       int ret;
+
+       ep_ctx = pep->in_ctx;
+
+       endpoint_type = cdnsp_get_endpoint_type(pep->endpoint.desc);
+       if (!endpoint_type)
+               return -EINVAL;
+
+       ring_type = usb_endpoint_type(pep->endpoint.desc);
+
+       /*
+        * Get values to fill the endpoint context, mostly from ep descriptor.
+        * The average TRB buffer length for bulk endpoints is unclear as we
+        * have no clue on scatter gather list entry size. For Isoc and Int,
+        * set it to max available.
+        */
+       max_esit_payload = cdnsp_get_max_esit_payload(&pdev->gadget, pep);
+       interval = cdnsp_get_endpoint_interval(&pdev->gadget, pep);
+       mult = cdnsp_get_endpoint_mult(&pdev->gadget, pep);
+       max_packet = usb_endpoint_maxp(pep->endpoint.desc);
+       max_burst = cdnsp_get_endpoint_max_burst(&pdev->gadget, pep);
+       avg_trb_len = max_esit_payload;
+
+       /* Allow 3 retries for everything but isoc, set CErr = 3. */
+       if (!usb_endpoint_xfer_isoc(pep->endpoint.desc))
+               err_count = 3;
+       if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
+           pdev->gadget.speed == USB_SPEED_HIGH)
+               max_packet = 512;
+       /* Controller spec indicates that ctrl ep avg TRB Length should be 8. */
+       if (usb_endpoint_xfer_control(pep->endpoint.desc))
+               avg_trb_len = 8;
+
+       /* Set up the endpoint ring. */
+       pep->ring = cdnsp_ring_alloc(pdev, 2, ring_type, max_packet, mem_flags);
+       pep->skip = false;
+
+       /* Fill the endpoint context */
+       ep_ctx->ep_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_HI(max_esit_payload) |
+                               EP_INTERVAL(interval) | EP_MULT(mult));
+       ep_ctx->ep_info2 = cpu_to_le32(EP_TYPE(endpoint_type) |
+                               MAX_PACKET(max_packet) | MAX_BURST(max_burst) |
+                               ERROR_COUNT(err_count));
+       ep_ctx->deq = cpu_to_le64(pep->ring->first_seg->dma |
+                                 pep->ring->cycle_state);
+
+       ep_ctx->tx_info = cpu_to_le32(EP_MAX_ESIT_PAYLOAD_LO(max_esit_payload) |
+                               EP_AVG_TRB_LENGTH(avg_trb_len));
+
+       if (usb_endpoint_xfer_bulk(pep->endpoint.desc) &&
+           pdev->gadget.speed > USB_SPEED_HIGH) {
+               ret = cdnsp_alloc_streams(pdev, pep);
+               if (ret < 0)
+                       return ret;
+       }
+
+       return 0;
+}
+
+void cdnsp_endpoint_zero(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+       pep->in_ctx->ep_info = 0;
+       pep->in_ctx->ep_info2 = 0;
+       pep->in_ctx->deq = 0;
+       pep->in_ctx->tx_info = 0;
+}
+
+static int cdnsp_alloc_erst(struct cdnsp_device *pdev,
+                           struct cdnsp_ring *evt_ring,
+                           struct cdnsp_erst *erst)
+{
+       struct cdnsp_erst_entry *entry;
+       struct cdnsp_segment *seg;
+       unsigned int val;
+       size_t size;
+
+       size = sizeof(struct cdnsp_erst_entry) * evt_ring->num_segs;
+       erst->entries = dma_alloc_coherent(pdev->dev, size,
+                                          &erst->erst_dma_addr, GFP_KERNEL);
+       if (!erst->entries)
+               return -ENOMEM;
+
+       erst->num_entries = evt_ring->num_segs;
+
+       seg = evt_ring->first_seg;
+       for (val = 0; val < evt_ring->num_segs; val++) {
+               entry = &erst->entries[val];
+               entry->seg_addr = cpu_to_le64(seg->dma);
+               entry->seg_size = cpu_to_le32(TRBS_PER_SEGMENT);
+               entry->rsvd = 0;
+               seg = seg->next;
+       }
+
+       return 0;
+}
+
+static void cdnsp_free_erst(struct cdnsp_device *pdev, struct cdnsp_erst *erst)
+{
+       size_t size = sizeof(struct cdnsp_erst_entry) * (erst->num_entries);
+       struct device *dev = pdev->dev;
+
+       if (erst->entries)
+               dma_free_coherent(dev, size, erst->entries,
+                                 erst->erst_dma_addr);
+
+       erst->entries = NULL;
+}
+
+void cdnsp_mem_cleanup(struct cdnsp_device *pdev)
+{
+       struct device *dev = pdev->dev;
+
+       cdnsp_free_priv_device(pdev);
+       cdnsp_free_erst(pdev, &pdev->erst);
+
+       if (pdev->event_ring)
+               cdnsp_ring_free(pdev, pdev->event_ring);
+
+       pdev->event_ring = NULL;
+
+       if (pdev->cmd_ring)
+               cdnsp_ring_free(pdev, pdev->cmd_ring);
+
+       pdev->cmd_ring = NULL;
+
+       dma_pool_destroy(pdev->segment_pool);
+       pdev->segment_pool = NULL;
+       dma_pool_destroy(pdev->device_pool);
+       pdev->device_pool = NULL;
+
+       if (pdev->dcbaa)
+               dma_free_coherent(dev, sizeof(*pdev->dcbaa),
+                                 pdev->dcbaa, pdev->dcbaa->dma);
+
+       pdev->dcbaa = NULL;
+
+       pdev->usb2_port.exist = 0;
+       pdev->usb3_port.exist = 0;
+       pdev->usb2_port.port_num = 0;
+       pdev->usb3_port.port_num = 0;
+       pdev->active_port = NULL;
+}
+
+static void cdnsp_set_event_deq(struct cdnsp_device *pdev)
+{
+       dma_addr_t deq;
+       u64 temp;
+
+       deq = cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
+                                   pdev->event_ring->dequeue);
+
+       /* Update controller event ring dequeue pointer */
+       temp = cdnsp_read_64(&pdev->ir_set->erst_dequeue);
+       temp &= ERST_PTR_MASK;
+
+       /*
+        * Don't clear the EHB bit (which is RW1C) because
+        * there might be more events to service.
+        */
+       temp &= ~ERST_EHB;
+
+       cdnsp_write_64(((u64)deq & (u64)~ERST_PTR_MASK) | temp,
+                      &pdev->ir_set->erst_dequeue);
+}
+
+static void cdnsp_add_in_port(struct cdnsp_device *pdev,
+                             struct cdnsp_port *port,
+                             __le32 __iomem *addr)
+{
+       u32 temp, port_offset, port_count;
+
+       temp = readl(addr);
+       port->maj_rev = CDNSP_EXT_PORT_MAJOR(temp);
+       port->min_rev = CDNSP_EXT_PORT_MINOR(temp);
+
+       /* Port offset and count in the third dword.*/
+       temp = readl(addr + 2);
+       port_offset = CDNSP_EXT_PORT_OFF(temp);
+       port_count = CDNSP_EXT_PORT_COUNT(temp);
+
+       trace_cdnsp_port_info(addr, port_offset, port_count, port->maj_rev);
+
+       port->port_num = port_offset;
+       port->exist = 1;
+}
+
+/*
+ * Scan the Extended Capabilities for the "Supported Protocol Capabilities" that
+ * specify what speeds each port is supposed to be.
+ */
+static int cdnsp_setup_port_arrays(struct cdnsp_device *pdev)
+{
+       void __iomem *base;
+       u32 offset;
+       int i;
+
+       base = &pdev->cap_regs->hc_capbase;
+       offset = cdnsp_find_next_ext_cap(base, 0,
+                                        EXT_CAP_CFG_DEV_20PORT_CAP_ID);
+       pdev->port20_regs = base + offset;
+
+       offset = cdnsp_find_next_ext_cap(base, 0, D_XEC_CFG_3XPORT_CAP);
+       pdev->port3x_regs =  base + offset;
+
+       offset = 0;
+       base = &pdev->cap_regs->hc_capbase;
+
+       /* Driver expects max 2 extended protocol capability. */
+       for (i = 0; i < 2; i++) {
+               u32 temp;
+
+               offset = cdnsp_find_next_ext_cap(base, offset,
+                                                EXT_CAPS_PROTOCOL);
+               temp = readl(base + offset);
+
+               if (CDNSP_EXT_PORT_MAJOR(temp) == 0x03 &&
+                   !pdev->usb3_port.port_num)
+                       cdnsp_add_in_port(pdev, &pdev->usb3_port,
+                                         base + offset);
+
+               if (CDNSP_EXT_PORT_MAJOR(temp) == 0x02 &&
+                   !pdev->usb2_port.port_num)
+                       cdnsp_add_in_port(pdev, &pdev->usb2_port,
+                                         base + offset);
+       }
+
+       if (!pdev->usb2_port.exist || !pdev->usb3_port.exist) {
+               dev_err(pdev->dev, "Error: Only one port detected\n");
+               return -ENODEV;
+       }
+
+       trace_cdnsp_init("Found USB 2.0 ports and  USB 3.0 ports.");
+
+       pdev->usb2_port.regs = (struct cdnsp_port_regs __iomem *)
+                              (&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
+                               (pdev->usb2_port.port_num - 1));
+
+       pdev->usb3_port.regs = (struct cdnsp_port_regs __iomem *)
+                              (&pdev->op_regs->port_reg_base + NUM_PORT_REGS *
+                               (pdev->usb3_port.port_num - 1));
+
+       return 0;
+}
+
+/*
+ * Initialize memory for CDNSP (one-time init).
+ *
+ * Program the PAGESIZE register, initialize the device context array, create
+ * device contexts, set up a command ring segment, create event
+ * ring (one for now).
+ */
+int cdnsp_mem_init(struct cdnsp_device *pdev)
+{
+       struct device *dev = pdev->dev;
+       int ret = -ENOMEM;
+       unsigned int val;
+       dma_addr_t dma;
+       u32 page_size;
+       u64 val_64;
+
+       /*
+        * Use 4K pages, since that's common and the minimum the
+        * controller supports
+        */
+       page_size = 1 << 12;
+
+       val = readl(&pdev->op_regs->config_reg);
+       val |= ((val & ~MAX_DEVS) | CDNSP_DEV_MAX_SLOTS) | CONFIG_U3E;
+       writel(val, &pdev->op_regs->config_reg);
+
+       /*
+        * Doorbell array must be physically contiguous
+        * and 64-byte (cache line) aligned.
+        */
+       pdev->dcbaa = dma_alloc_coherent(dev, sizeof(*pdev->dcbaa),
+                                        &dma, GFP_KERNEL);
+       if (!pdev->dcbaa)
+               return -ENOMEM;
+
+       memset(pdev->dcbaa, 0, sizeof(*pdev->dcbaa));
+       pdev->dcbaa->dma = dma;
+
+       cdnsp_write_64(dma, &pdev->op_regs->dcbaa_ptr);
+
+       /*
+        * Initialize the ring segment pool.  The ring must be a contiguous
+        * structure comprised of TRBs. The TRBs must be 16 byte aligned,
+        * however, the command ring segment needs 64-byte aligned segments
+        * and our use of dma addresses in the trb_address_map radix tree needs
+        * TRB_SEGMENT_SIZE alignment, so driver pick the greater alignment
+        * need.
+        */
+       pdev->segment_pool = dma_pool_create("CDNSP ring segments", dev,
+                                            TRB_SEGMENT_SIZE, TRB_SEGMENT_SIZE,
+                                            page_size);
+       if (!pdev->segment_pool)
+               goto release_dcbaa;
+
+       pdev->device_pool = dma_pool_create("CDNSP input/output contexts", dev,
+                                           CDNSP_CTX_SIZE, 64, page_size);
+       if (!pdev->device_pool)
+               goto destroy_segment_pool;
+
+
+       /* Set up the command ring to have one segments for now. */
+       pdev->cmd_ring = cdnsp_ring_alloc(pdev, 1, TYPE_COMMAND, 0, GFP_KERNEL);
+       if (!pdev->cmd_ring)
+               goto destroy_device_pool;
+
+       /* Set the address in the Command Ring Control register */
+       val_64 = cdnsp_read_64(&pdev->op_regs->cmd_ring);
+       val_64 = (val_64 & (u64)CMD_RING_RSVD_BITS) |
+                (pdev->cmd_ring->first_seg->dma & (u64)~CMD_RING_RSVD_BITS) |
+                pdev->cmd_ring->cycle_state;
+       cdnsp_write_64(val_64, &pdev->op_regs->cmd_ring);
+
+       val = readl(&pdev->cap_regs->db_off);
+       val &= DBOFF_MASK;
+       pdev->dba = (void __iomem *)pdev->cap_regs + val;
+
+       /* Set ir_set to interrupt register set 0 */
+       pdev->ir_set = &pdev->run_regs->ir_set[0];
+
+       /*
+        * Event ring setup: Allocate a normal ring, but also setup
+        * the event ring segment table (ERST).
+        */
+       pdev->event_ring = cdnsp_ring_alloc(pdev, ERST_NUM_SEGS, TYPE_EVENT,
+                                           0, GFP_KERNEL);
+       if (!pdev->event_ring)
+               goto free_cmd_ring;
+
+       ret = cdnsp_alloc_erst(pdev, pdev->event_ring, &pdev->erst);
+       if (ret)
+               goto free_event_ring;
+
+       /* Set ERST count with the number of entries in the segment table. */
+       val = readl(&pdev->ir_set->erst_size);
+       val &= ERST_SIZE_MASK;
+       val |= ERST_NUM_SEGS;
+       writel(val, &pdev->ir_set->erst_size);
+
+       /* Set the segment table base address. */
+       val_64 = cdnsp_read_64(&pdev->ir_set->erst_base);
+       val_64 &= ERST_PTR_MASK;
+       val_64 |= (pdev->erst.erst_dma_addr & (u64)~ERST_PTR_MASK);
+       cdnsp_write_64(val_64, &pdev->ir_set->erst_base);
+
+       /* Set the event ring dequeue address. */
+       cdnsp_set_event_deq(pdev);
+
+       ret = cdnsp_setup_port_arrays(pdev);
+       if (ret)
+               goto free_erst;
+
+       ret = cdnsp_alloc_priv_device(pdev);
+       if (ret) {
+               dev_err(pdev->dev,
+                       "Could not allocate cdnsp_device data structures\n");
+               goto free_erst;
+       }
+
+       return 0;
+
+free_erst:
+       cdnsp_free_erst(pdev, &pdev->erst);
+free_event_ring:
+       cdnsp_ring_free(pdev, pdev->event_ring);
+free_cmd_ring:
+       cdnsp_ring_free(pdev, pdev->cmd_ring);
+destroy_device_pool:
+       dma_pool_destroy(pdev->device_pool);
+destroy_segment_pool:
+       dma_pool_destroy(pdev->segment_pool);
+release_dcbaa:
+       dma_free_coherent(dev, sizeof(*pdev->dcbaa), pdev->dcbaa,
+                         pdev->dcbaa->dma);
+
+       cdnsp_reset(pdev);
+
+       return ret;
+}
diff --git a/drivers/usb/cdns3/cdnsp-pci.c b/drivers/usb/cdns3/cdnsp-pci.c
new file mode 100644 (file)
index 0000000..fe8a114
--- /dev/null
@@ -0,0 +1,254 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence PCI Glue driver.
+ *
+ * Copyright (C) 2019 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+
+#include "core.h"
+#include "gadget-export.h"
+
+#define PCI_BAR_HOST           0
+#define PCI_BAR_OTG            0
+#define PCI_BAR_DEV            2
+
+#define PCI_DEV_FN_HOST_DEVICE 0
+#define PCI_DEV_FN_OTG         1
+
+#define PCI_DRIVER_NAME                "cdns-pci-usbssp"
+#define PLAT_DRIVER_NAME       "cdns-usbssp"
+
+#define CDNS_VENDOR_ID         0x17cd
+#define CDNS_DEVICE_ID         0x0100
+#define CDNS_DRD_IF            (PCI_CLASS_SERIAL_USB << 8 | 0x80)
+
+static struct pci_dev *cdnsp_get_second_fun(struct pci_dev *pdev)
+{
+       struct pci_dev *func;
+
+       /*
+        * Gets the second function.
+        * It's little tricky, but this platform has two function.
+        * The fist keeps resources for Host/Device while the second
+        * keeps resources for DRD/OTG.
+        */
+       func = pci_get_device(pdev->vendor, pdev->device, NULL);
+       if (!func)
+               return NULL;
+
+       if (func->devfn == pdev->devfn) {
+               func = pci_get_device(pdev->vendor, pdev->device, func);
+               if (!func)
+                       return NULL;
+       }
+
+       return func;
+}
+
+static int cdnsp_pci_probe(struct pci_dev *pdev,
+                          const struct pci_device_id *id)
+{
+       struct device *dev = &pdev->dev;
+       struct pci_dev *func;
+       struct resource *res;
+       struct cdns *cdnsp;
+       int ret;
+
+       /*
+        * For GADGET/HOST PCI (devfn) function number is 0,
+        * for OTG PCI (devfn) function number is 1.
+        */
+       if (!id || (pdev->devfn != PCI_DEV_FN_HOST_DEVICE &&
+                   pdev->devfn != PCI_DEV_FN_OTG))
+               return -EINVAL;
+
+       func = cdnsp_get_second_fun(pdev);
+       if (!func)
+               return -EINVAL;
+
+       if (func->class == PCI_CLASS_SERIAL_USB_XHCI ||
+           pdev->class == PCI_CLASS_SERIAL_USB_XHCI) {
+               ret = -EINVAL;
+               goto put_pci;
+       }
+
+       ret = pcim_enable_device(pdev);
+       if (ret) {
+               dev_err(&pdev->dev, "Enabling PCI device has failed %d\n", ret);
+               goto put_pci;
+       }
+
+       pci_set_master(pdev);
+       if (pci_is_enabled(func)) {
+               cdnsp = pci_get_drvdata(func);
+       } else {
+               cdnsp = kzalloc(sizeof(*cdnsp), GFP_KERNEL);
+               if (!cdnsp) {
+                       ret = -ENOMEM;
+                       goto disable_pci;
+               }
+       }
+
+       /* For GADGET device function number is 0. */
+       if (pdev->devfn == 0) {
+               resource_size_t rsrc_start, rsrc_len;
+
+               /* Function 0: host(BAR_0) + device(BAR_1).*/
+               dev_dbg(dev, "Initialize resources\n");
+               rsrc_start = pci_resource_start(pdev, PCI_BAR_DEV);
+               rsrc_len = pci_resource_len(pdev, PCI_BAR_DEV);
+               res = devm_request_mem_region(dev, rsrc_start, rsrc_len, "dev");
+               if (!res) {
+                       dev_dbg(dev, "controller already in use\n");
+                       ret = -EBUSY;
+                       goto free_cdnsp;
+               }
+
+               cdnsp->dev_regs = devm_ioremap(dev, rsrc_start, rsrc_len);
+               if (!cdnsp->dev_regs) {
+                       dev_dbg(dev, "error mapping memory\n");
+                       ret = -EFAULT;
+                       goto free_cdnsp;
+               }
+
+               cdnsp->dev_irq = pdev->irq;
+               dev_dbg(dev, "USBSS-DEV physical base addr: %pa\n",
+                       &rsrc_start);
+
+               res = &cdnsp->xhci_res[0];
+               res->start = pci_resource_start(pdev, PCI_BAR_HOST);
+               res->end = pci_resource_end(pdev, PCI_BAR_HOST);
+               res->name = "xhci";
+               res->flags = IORESOURCE_MEM;
+               dev_dbg(dev, "USBSS-XHCI physical base addr: %pa\n",
+                       &res->start);
+
+               /* Interrupt for XHCI, */
+               res = &cdnsp->xhci_res[1];
+               res->start = pdev->irq;
+               res->name = "host";
+               res->flags = IORESOURCE_IRQ;
+       } else {
+               res = &cdnsp->otg_res;
+               res->start = pci_resource_start(pdev, PCI_BAR_OTG);
+               res->end =   pci_resource_end(pdev, PCI_BAR_OTG);
+               res->name = "otg";
+               res->flags = IORESOURCE_MEM;
+               dev_dbg(dev, "CDNSP-DRD physical base addr: %pa\n",
+                       &res->start);
+
+               /* Interrupt for OTG/DRD. */
+               cdnsp->otg_irq = pdev->irq;
+       }
+
+       if (pci_is_enabled(func)) {
+               cdnsp->dev = dev;
+               cdnsp->gadget_init = cdnsp_gadget_init;
+
+               ret = cdns_init(cdnsp);
+               if (ret)
+                       goto free_cdnsp;
+       }
+
+       pci_set_drvdata(pdev, cdnsp);
+
+       device_wakeup_enable(&pdev->dev);
+       if (pci_dev_run_wake(pdev))
+               pm_runtime_put_noidle(&pdev->dev);
+
+       return 0;
+
+free_cdnsp:
+       if (!pci_is_enabled(func))
+               kfree(cdnsp);
+
+disable_pci:
+       pci_disable_device(pdev);
+
+put_pci:
+       pci_dev_put(func);
+
+       return ret;
+}
+
+static void cdnsp_pci_remove(struct pci_dev *pdev)
+{
+       struct cdns *cdnsp;
+       struct pci_dev *func;
+
+       func = cdnsp_get_second_fun(pdev);
+       cdnsp = (struct cdns *)pci_get_drvdata(pdev);
+
+       if (pci_dev_run_wake(pdev))
+               pm_runtime_get_noresume(&pdev->dev);
+
+       if (!pci_is_enabled(func)) {
+               kfree(cdnsp);
+               goto pci_put;
+       }
+
+       cdns_remove(cdnsp);
+
+pci_put:
+       pci_dev_put(func);
+}
+
+static int __maybe_unused cdnsp_pci_suspend(struct device *dev)
+{
+       struct cdns *cdns = dev_get_drvdata(dev);
+
+       return cdns_suspend(cdns);
+}
+
+static int __maybe_unused cdnsp_pci_resume(struct device *dev)
+{
+       struct cdns *cdns = dev_get_drvdata(dev);
+       unsigned long flags;
+       int ret;
+
+       spin_lock_irqsave(&cdns->lock, flags);
+       ret = cdns_resume(cdns, 1);
+       spin_unlock_irqrestore(&cdns->lock, flags);
+
+       return ret;
+}
+
+static const struct dev_pm_ops cdnsp_pci_pm_ops = {
+       SET_SYSTEM_SLEEP_PM_OPS(cdnsp_pci_suspend, cdnsp_pci_resume)
+};
+
+static const struct pci_device_id cdnsp_pci_ids[] = {
+       { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+         PCI_CLASS_SERIAL_USB_DEVICE, PCI_ANY_ID },
+       { PCI_VENDOR_ID_CDNS, CDNS_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,
+         CDNS_DRD_IF, PCI_ANY_ID },
+       { 0, }
+};
+
+static struct pci_driver cdnsp_pci_driver = {
+       .name = "cdnsp-pci",
+       .id_table = &cdnsp_pci_ids[0],
+       .probe = cdnsp_pci_probe,
+       .remove = cdnsp_pci_remove,
+       .driver = {
+               .pm = &cdnsp_pci_pm_ops,
+       }
+};
+
+module_pci_driver(cdnsp_pci_driver);
+MODULE_DEVICE_TABLE(pci, cdnsp_pci_ids);
+
+MODULE_ALIAS("pci:cdnsp");
+MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Cadence CDNSP PCI driver");
diff --git a/drivers/usb/cdns3/cdnsp-ring.c b/drivers/usb/cdns3/cdnsp-ring.c
new file mode 100644 (file)
index 0000000..e15e13b
--- /dev/null
@@ -0,0 +1,2439 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ * Code based on Linux XHCI driver.
+ * Origin: Copyright (C) 2008 Intel Corp
+ */
+
+/*
+ * Ring initialization rules:
+ * 1. Each segment is initialized to zero, except for link TRBs.
+ * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
+ *    Consumer Cycle State (CCS), depending on ring function.
+ * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
+ *
+ * Ring behavior rules:
+ * 1. A ring is empty if enqueue == dequeue. This means there will always be at
+ *    least one free TRB in the ring. This is useful if you want to turn that
+ *    into a link TRB and expand the ring.
+ * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
+ *    link TRB, then load the pointer with the address in the link TRB. If the
+ *    link TRB had its toggle bit set, you may need to update the ring cycle
+ *    state (see cycle bit rules). You may have to do this multiple times
+ *    until you reach a non-link TRB.
+ * 3. A ring is full if enqueue++ (for the definition of increment above)
+ *    equals the dequeue pointer.
+ *
+ * Cycle bit rules:
+ * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
+ *    in a link TRB, it must toggle the ring cycle state.
+ * 2. When a producer increments an enqueue pointer and encounters a toggle bit
+ *    in a link TRB, it must toggle the ring cycle state.
+ *
+ * Producer rules:
+ * 1. Check if ring is full before you enqueue.
+ * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
+ *    Update enqueue pointer between each write (which may update the ring
+ *    cycle state).
+ * 3. Notify consumer. If SW is producer, it rings the doorbell for command
+ *    and endpoint rings. If controller is the producer for the event ring,
+ *    and it generates an interrupt according to interrupt modulation rules.
+ *
+ * Consumer rules:
+ * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
+ *    the TRB is owned by the consumer.
+ * 2. Update dequeue pointer (which may update the ring cycle state) and
+ *    continue processing TRBs until you reach a TRB which is not owned by you.
+ * 3. Notify the producer. SW is the consumer for the event ring, and it
+ *    updates event ring dequeue pointer. Controller is the consumer for the
+ *    command and endpoint rings; it generates events on the event ring
+ *    for these.
+ */
+
+#include <linux/scatterlist.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#include "cdnsp-trace.h"
+#include "cdnsp-gadget.h"
+
+/*
+ * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
+ * address of the TRB.
+ */
+dma_addr_t cdnsp_trb_virt_to_dma(struct cdnsp_segment *seg,
+                                union cdnsp_trb *trb)
+{
+       unsigned long segment_offset = trb - seg->trbs;
+
+       if (trb < seg->trbs || segment_offset >= TRBS_PER_SEGMENT)
+               return 0;
+
+       return seg->dma + (segment_offset * sizeof(*trb));
+}
+
+static bool cdnsp_trb_is_noop(union cdnsp_trb *trb)
+{
+       return TRB_TYPE_NOOP_LE32(trb->generic.field[3]);
+}
+
+static bool cdnsp_trb_is_link(union cdnsp_trb *trb)
+{
+       return TRB_TYPE_LINK_LE32(trb->link.control);
+}
+
+bool cdnsp_last_trb_on_seg(struct cdnsp_segment *seg, union cdnsp_trb *trb)
+{
+       return trb == &seg->trbs[TRBS_PER_SEGMENT - 1];
+}
+
+bool cdnsp_last_trb_on_ring(struct cdnsp_ring *ring,
+                           struct cdnsp_segment *seg,
+                           union cdnsp_trb *trb)
+{
+       return cdnsp_last_trb_on_seg(seg, trb) && (seg->next == ring->first_seg);
+}
+
+static bool cdnsp_link_trb_toggles_cycle(union cdnsp_trb *trb)
+{
+       return le32_to_cpu(trb->link.control) & LINK_TOGGLE;
+}
+
+static void cdnsp_trb_to_noop(union cdnsp_trb *trb, u32 noop_type)
+{
+       if (cdnsp_trb_is_link(trb)) {
+               /* Unchain chained link TRBs. */
+               trb->link.control &= cpu_to_le32(~TRB_CHAIN);
+       } else {
+               trb->generic.field[0] = 0;
+               trb->generic.field[1] = 0;
+               trb->generic.field[2] = 0;
+               /* Preserve only the cycle bit of this TRB. */
+               trb->generic.field[3] &= cpu_to_le32(TRB_CYCLE);
+               trb->generic.field[3] |= cpu_to_le32(TRB_TYPE(noop_type));
+       }
+}
+
+/*
+ * Updates trb to point to the next TRB in the ring, and updates seg if the next
+ * TRB is in a new segment. This does not skip over link TRBs, and it does not
+ * effect the ring dequeue or enqueue pointers.
+ */
+static void cdnsp_next_trb(struct cdnsp_device *pdev,
+                          struct cdnsp_ring *ring,
+                          struct cdnsp_segment **seg,
+                          union cdnsp_trb **trb)
+{
+       if (cdnsp_trb_is_link(*trb)) {
+               *seg = (*seg)->next;
+               *trb = ((*seg)->trbs);
+       } else {
+               (*trb)++;
+       }
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs. That would be dumb and this would loop.
+ */
+void cdnsp_inc_deq(struct cdnsp_device *pdev, struct cdnsp_ring *ring)
+{
+       /* event ring doesn't have link trbs, check for last trb. */
+       if (ring->type == TYPE_EVENT) {
+               if (!cdnsp_last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
+                       ring->dequeue++;
+                       goto out;
+               }
+
+               if (cdnsp_last_trb_on_ring(ring, ring->deq_seg, ring->dequeue))
+                       ring->cycle_state ^= 1;
+
+               ring->deq_seg = ring->deq_seg->next;
+               ring->dequeue = ring->deq_seg->trbs;
+               goto out;
+       }
+
+       /* All other rings have link trbs. */
+       if (!cdnsp_trb_is_link(ring->dequeue)) {
+               ring->dequeue++;
+               ring->num_trbs_free++;
+       }
+       while (cdnsp_trb_is_link(ring->dequeue)) {
+               ring->deq_seg = ring->deq_seg->next;
+               ring->dequeue = ring->deq_seg->trbs;
+       }
+out:
+       trace_cdnsp_inc_deq(ring);
+}
+
+/*
+ * See Cycle bit rules. SW is the consumer for the event ring only.
+ * Don't make a ring full of link TRBs. That would be dumb and this would loop.
+ *
+ * If we've just enqueued a TRB that is in the middle of a TD (meaning the
+ * chain bit is set), then set the chain bit in all the following link TRBs.
+ * If we've enqueued the last TRB in a TD, make sure the following link TRBs
+ * have their chain bit cleared (so that each Link TRB is a separate TD).
+ *
+ * @more_trbs_coming:  Will you enqueue more TRBs before ringing the doorbell.
+ */
+static void cdnsp_inc_enq(struct cdnsp_device *pdev,
+                         struct cdnsp_ring *ring,
+                         bool more_trbs_coming)
+{
+       union cdnsp_trb *next;
+       u32 chain;
+
+       chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
+
+       /* If this is not event ring, there is one less usable TRB. */
+       if (!cdnsp_trb_is_link(ring->enqueue))
+               ring->num_trbs_free--;
+       next = ++(ring->enqueue);
+
+       /* Update the dequeue pointer further if that was a link TRB */
+       while (cdnsp_trb_is_link(next)) {
+               /*
+                * If the caller doesn't plan on enqueuing more TDs before
+                * ringing the doorbell, then we don't want to give the link TRB
+                * to the hardware just yet. We'll give the link TRB back in
+                * cdnsp_prepare_ring() just before we enqueue the TD at the
+                * top of the ring.
+                */
+               if (!chain && !more_trbs_coming)
+                       break;
+
+               next->link.control &= cpu_to_le32(~TRB_CHAIN);
+               next->link.control |= cpu_to_le32(chain);
+
+               /* Give this link TRB to the hardware */
+               wmb();
+               next->link.control ^= cpu_to_le32(TRB_CYCLE);
+
+               /* Toggle the cycle bit after the last ring segment. */
+               if (cdnsp_link_trb_toggles_cycle(next))
+                       ring->cycle_state ^= 1;
+
+               ring->enq_seg = ring->enq_seg->next;
+               ring->enqueue = ring->enq_seg->trbs;
+               next = ring->enqueue;
+       }
+
+       trace_cdnsp_inc_enq(ring);
+}
+
+/*
+ * Check to see if there's room to enqueue num_trbs on the ring and make sure
+ * enqueue pointer will not advance into dequeue segment.
+ */
+static bool cdnsp_room_on_ring(struct cdnsp_device *pdev,
+                              struct cdnsp_ring *ring,
+                              unsigned int num_trbs)
+{
+       int num_trbs_in_deq_seg;
+
+       if (ring->num_trbs_free < num_trbs)
+               return false;
+
+       if (ring->type != TYPE_COMMAND && ring->type != TYPE_EVENT) {
+               num_trbs_in_deq_seg = ring->dequeue - ring->deq_seg->trbs;
+
+               if (ring->num_trbs_free < num_trbs + num_trbs_in_deq_seg)
+                       return false;
+       }
+
+       return true;
+}
+
+/*
+ * Workaround for L1: controller has issue with resuming from L1 after
+ * setting doorbell for endpoint during L1 state. This function forces
+ * resume signal in such case.
+ */
+static void cdnsp_force_l0_go(struct cdnsp_device *pdev)
+{
+       if (pdev->active_port == &pdev->usb2_port && pdev->gadget.lpm_capable)
+               cdnsp_set_link_state(pdev, &pdev->active_port->regs->portsc, XDEV_U0);
+}
+
+/* Ring the doorbell after placing a command on the ring. */
+void cdnsp_ring_cmd_db(struct cdnsp_device *pdev)
+{
+       trace_cdnsp_cmd_drbl("Ding Dong");
+       writel(DB_VALUE_CMD, &pdev->dba->cmd_db);
+}
+
+/*
+ * Ring the doorbell after placing a transfer on the ring.
+ * Returns true if doorbell was set, otherwise false.
+ */
+static bool cdnsp_ring_ep_doorbell(struct cdnsp_device *pdev,
+                                  struct cdnsp_ep *pep,
+                                  unsigned int stream_id)
+{
+       __le32 __iomem *reg_addr = &pdev->dba->ep_db;
+       unsigned int ep_state = pep->ep_state;
+       unsigned int db_value;
+
+       /*
+        * Don't ring the doorbell for this endpoint if endpoint is halted or
+        * disabled.
+        */
+       if (ep_state & EP_HALTED || !(ep_state & EP_ENABLED))
+               return false;
+
+       /* For stream capable endpoints driver can ring doorbell only twice. */
+       if (pep->ep_state & EP_HAS_STREAMS) {
+               if (pep->stream_info.drbls_count >= 2)
+                       return false;
+
+               pep->stream_info.drbls_count++;
+       }
+
+       pep->ep_state &= ~EP_STOPPED;
+
+       if (pep->idx == 0 && pdev->ep0_stage == CDNSP_DATA_STAGE &&
+           !pdev->ep0_expect_in)
+               db_value = DB_VALUE_EP0_OUT(pep->idx, stream_id);
+       else
+               db_value = DB_VALUE(pep->idx, stream_id);
+
+       trace_cdnsp_tr_drbl(pep, stream_id);
+
+       writel(db_value, reg_addr);
+
+       cdnsp_force_l0_go(pdev);
+
+       /* Doorbell was set. */
+       return true;
+}
+
+/*
+ * Get the right ring for the given pep and stream_id.
+ * If the endpoint supports streams, boundary check the USB request's stream ID.
+ * If the endpoint doesn't support streams, return the singular endpoint ring.
+ */
+static struct cdnsp_ring *cdnsp_get_transfer_ring(struct cdnsp_device *pdev,
+                                                 struct cdnsp_ep *pep,
+                                                 unsigned int stream_id)
+{
+       if (!(pep->ep_state & EP_HAS_STREAMS))
+               return pep->ring;
+
+       if (stream_id == 0 || stream_id >= pep->stream_info.num_streams) {
+               dev_err(pdev->dev, "ERR: %s ring doesn't exist for SID: %d.\n",
+                       pep->name, stream_id);
+               return NULL;
+       }
+
+       return pep->stream_info.stream_rings[stream_id];
+}
+
+static struct cdnsp_ring *
+       cdnsp_request_to_transfer_ring(struct cdnsp_device *pdev,
+                                      struct cdnsp_request *preq)
+{
+       return cdnsp_get_transfer_ring(pdev, preq->pep,
+                                      preq->request.stream_id);
+}
+
+/* Ring the doorbell for any rings with pending requests. */
+void cdnsp_ring_doorbell_for_active_rings(struct cdnsp_device *pdev,
+                                         struct cdnsp_ep *pep)
+{
+       struct cdnsp_stream_info *stream_info;
+       unsigned int stream_id;
+       int ret;
+
+       if (pep->ep_state & EP_DIS_IN_RROGRESS)
+               return;
+
+       /* A ring has pending Request if its TD list is not empty. */
+       if (!(pep->ep_state & EP_HAS_STREAMS) && pep->number) {
+               if (pep->ring && !list_empty(&pep->ring->td_list))
+                       cdnsp_ring_ep_doorbell(pdev, pep, 0);
+               return;
+       }
+
+       stream_info = &pep->stream_info;
+
+       for (stream_id = 1; stream_id < stream_info->num_streams; stream_id++) {
+               struct cdnsp_td *td, *td_temp;
+               struct cdnsp_ring *ep_ring;
+
+               if (stream_info->drbls_count >= 2)
+                       return;
+
+               ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
+               if (!ep_ring)
+                       continue;
+
+               if (!ep_ring->stream_active || ep_ring->stream_rejected)
+                       continue;
+
+               list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
+                                        td_list) {
+                       if (td->drbl)
+                               continue;
+
+                       ret = cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
+                       if (ret)
+                               td->drbl = 1;
+               }
+       }
+}
+
+/*
+ * Get the hw dequeue pointer controller stopped on, either directly from the
+ * endpoint context, or if streams are in use from the stream context.
+ * The returned hw_dequeue contains the lowest four bits with cycle state
+ * and possible stream context type.
+ */
+static u64 cdnsp_get_hw_deq(struct cdnsp_device *pdev,
+                           unsigned int ep_index,
+                           unsigned int stream_id)
+{
+       struct cdnsp_stream_ctx *st_ctx;
+       struct cdnsp_ep *pep;
+
+       pep = &pdev->eps[stream_id];
+
+       if (pep->ep_state & EP_HAS_STREAMS) {
+               st_ctx = &pep->stream_info.stream_ctx_array[stream_id];
+               return le64_to_cpu(st_ctx->stream_ring);
+       }
+
+       return le64_to_cpu(pep->out_ctx->deq);
+}
+
+/*
+ * Move the controller endpoint ring dequeue pointer past cur_td.
+ * Record the new state of the controller endpoint ring dequeue segment,
+ * dequeue pointer, and new consumer cycle state in state.
+ * Update internal representation of the ring's dequeue pointer.
+ *
+ * We do this in three jumps:
+ *  - First we update our new ring state to be the same as when the
+ *    controller stopped.
+ *  - Then we traverse the ring to find the segment that contains
+ *    the last TRB in the TD. We toggle the controller new cycle state
+ *    when we pass any link TRBs with the toggle cycle bit set.
+ *  - Finally we move the dequeue state one TRB further, toggling the cycle bit
+ *    if we've moved it past a link TRB with the toggle cycle bit set.
+ */
+static void cdnsp_find_new_dequeue_state(struct cdnsp_device *pdev,
+                                        struct cdnsp_ep *pep,
+                                        unsigned int stream_id,
+                                        struct cdnsp_td *cur_td,
+                                        struct cdnsp_dequeue_state *state)
+{
+       bool td_last_trb_found = false;
+       struct cdnsp_segment *new_seg;
+       struct cdnsp_ring *ep_ring;
+       union cdnsp_trb *new_deq;
+       bool cycle_found = false;
+       u64 hw_dequeue;
+
+       ep_ring = cdnsp_get_transfer_ring(pdev, pep, stream_id);
+       if (!ep_ring)
+               return;
+
+       /*
+        * Dig out the cycle state saved by the controller during the
+        * stop endpoint command.
+        */
+       hw_dequeue = cdnsp_get_hw_deq(pdev, pep->idx, stream_id);
+       new_seg = ep_ring->deq_seg;
+       new_deq = ep_ring->dequeue;
+       state->new_cycle_state = hw_dequeue & 0x1;
+       state->stream_id = stream_id;
+
+       /*
+        * We want to find the pointer, segment and cycle state of the new trb
+        * (the one after current TD's last_trb). We know the cycle state at
+        * hw_dequeue, so walk the ring until both hw_dequeue and last_trb are
+        * found.
+        */
+       do {
+               if (!cycle_found && cdnsp_trb_virt_to_dma(new_seg, new_deq)
+                   == (dma_addr_t)(hw_dequeue & ~0xf)) {
+                       cycle_found = true;
+
+                       if (td_last_trb_found)
+                               break;
+               }
+
+               if (new_deq == cur_td->last_trb)
+                       td_last_trb_found = true;
+
+               if (cycle_found && cdnsp_trb_is_link(new_deq) &&
+                   cdnsp_link_trb_toggles_cycle(new_deq))
+                       state->new_cycle_state ^= 0x1;
+
+               cdnsp_next_trb(pdev, ep_ring, &new_seg, &new_deq);
+
+               /* Search wrapped around, bail out. */
+               if (new_deq == pep->ring->dequeue) {
+                       dev_err(pdev->dev,
+                               "Error: Failed finding new dequeue state\n");
+                       state->new_deq_seg = NULL;
+                       state->new_deq_ptr = NULL;
+                       return;
+               }
+
+       } while (!cycle_found || !td_last_trb_found);
+
+       state->new_deq_seg = new_seg;
+       state->new_deq_ptr = new_deq;
+
+       trace_cdnsp_new_deq_state(state);
+}
+
+/*
+ * flip_cycle means flip the cycle bit of all but the first and last TRB.
+ * (The last TRB actually points to the ring enqueue pointer, which is not part
+ * of this TD.) This is used to remove partially enqueued isoc TDs from a ring.
+ */
+static void cdnsp_td_to_noop(struct cdnsp_device *pdev,
+                            struct cdnsp_ring *ep_ring,
+                            struct cdnsp_td *td,
+                            bool flip_cycle)
+{
+       struct cdnsp_segment *seg = td->start_seg;
+       union cdnsp_trb *trb = td->first_trb;
+
+       while (1) {
+               cdnsp_trb_to_noop(trb, TRB_TR_NOOP);
+
+               /* flip cycle if asked to */
+               if (flip_cycle && trb != td->first_trb && trb != td->last_trb)
+                       trb->generic.field[3] ^= cpu_to_le32(TRB_CYCLE);
+
+               if (trb == td->last_trb)
+                       break;
+
+               cdnsp_next_trb(pdev, ep_ring, &seg, &trb);
+       }
+}
+
+/*
+ * This TD is defined by the TRBs starting at start_trb in start_seg and ending
+ * at end_trb, which may be in another segment. If the suspect DMA address is a
+ * TRB in this TD, this function returns that TRB's segment. Otherwise it
+ * returns 0.
+ */
+static struct cdnsp_segment *cdnsp_trb_in_td(struct cdnsp_device *pdev,
+                                            struct cdnsp_segment *start_seg,
+                                            union cdnsp_trb *start_trb,
+                                            union cdnsp_trb *end_trb,
+                                            dma_addr_t suspect_dma)
+{
+       struct cdnsp_segment *cur_seg;
+       union cdnsp_trb *temp_trb;
+       dma_addr_t end_seg_dma;
+       dma_addr_t end_trb_dma;
+       dma_addr_t start_dma;
+
+       start_dma = cdnsp_trb_virt_to_dma(start_seg, start_trb);
+       cur_seg = start_seg;
+
+       do {
+               if (start_dma == 0)
+                       return NULL;
+
+               temp_trb = &cur_seg->trbs[TRBS_PER_SEGMENT - 1];
+               /* We may get an event for a Link TRB in the middle of a TD */
+               end_seg_dma = cdnsp_trb_virt_to_dma(cur_seg, temp_trb);
+               /* If the end TRB isn't in this segment, this is set to 0 */
+               end_trb_dma = cdnsp_trb_virt_to_dma(cur_seg, end_trb);
+
+               trace_cdnsp_looking_trb_in_td(suspect_dma, start_dma,
+                                             end_trb_dma, cur_seg->dma,
+                                             end_seg_dma);
+
+               if (end_trb_dma > 0) {
+                       /*
+                        * The end TRB is in this segment, so suspect should
+                        * be here
+                        */
+                       if (start_dma <= end_trb_dma) {
+                               if (suspect_dma >= start_dma &&
+                                   suspect_dma <= end_trb_dma) {
+                                       return cur_seg;
+                               }
+                       } else {
+                               /*
+                                * Case for one segment with a
+                                * TD wrapped around to the top
+                                */
+                               if ((suspect_dma >= start_dma &&
+                                    suspect_dma <= end_seg_dma) ||
+                                   (suspect_dma >= cur_seg->dma &&
+                                    suspect_dma <= end_trb_dma)) {
+                                       return cur_seg;
+                               }
+                       }
+
+                       return NULL;
+               }
+
+               /* Might still be somewhere in this segment */
+               if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
+                       return cur_seg;
+
+               cur_seg = cur_seg->next;
+               start_dma = cdnsp_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
+       } while (cur_seg != start_seg);
+
+       return NULL;
+}
+
+static void cdnsp_unmap_td_bounce_buffer(struct cdnsp_device *pdev,
+                                        struct cdnsp_ring *ring,
+                                        struct cdnsp_td *td)
+{
+       struct cdnsp_segment *seg = td->bounce_seg;
+       struct cdnsp_request *preq;
+       size_t len;
+
+       if (!seg)
+               return;
+
+       preq = td->preq;
+
+       trace_cdnsp_bounce_unmap(td->preq, seg->bounce_len, seg->bounce_offs,
+                                seg->bounce_dma, 0);
+
+       if (!preq->direction) {
+               dma_unmap_single(pdev->dev, seg->bounce_dma,
+                                ring->bounce_buf_len,  DMA_TO_DEVICE);
+               return;
+       }
+
+       dma_unmap_single(pdev->dev, seg->bounce_dma, ring->bounce_buf_len,
+                        DMA_FROM_DEVICE);
+
+       /* For in transfers we need to copy the data from bounce to sg */
+       len = sg_pcopy_from_buffer(preq->request.sg, preq->request.num_sgs,
+                                  seg->bounce_buf, seg->bounce_len,
+                                  seg->bounce_offs);
+       if (len != seg->bounce_len)
+               dev_warn(pdev->dev, "WARN Wrong bounce buffer read length: %zu != %d\n",
+                        len, seg->bounce_len);
+
+       seg->bounce_len = 0;
+       seg->bounce_offs = 0;
+}
+
+static int cdnsp_cmd_set_deq(struct cdnsp_device *pdev,
+                            struct cdnsp_ep *pep,
+                            struct cdnsp_dequeue_state *deq_state)
+{
+       struct cdnsp_ring *ep_ring;
+       int ret;
+
+       if (!deq_state->new_deq_ptr || !deq_state->new_deq_seg) {
+               cdnsp_ring_doorbell_for_active_rings(pdev, pep);
+               return 0;
+       }
+
+       cdnsp_queue_new_dequeue_state(pdev, pep, deq_state);
+       cdnsp_ring_cmd_db(pdev);
+       ret = cdnsp_wait_for_cmd_compl(pdev);
+
+       trace_cdnsp_handle_cmd_set_deq(cdnsp_get_slot_ctx(&pdev->out_ctx));
+       trace_cdnsp_handle_cmd_set_deq_ep(pep->out_ctx);
+
+       /*
+        * Update the ring's dequeue segment and dequeue pointer
+        * to reflect the new position.
+        */
+       ep_ring = cdnsp_get_transfer_ring(pdev, pep, deq_state->stream_id);
+
+       if (cdnsp_trb_is_link(ep_ring->dequeue)) {
+               ep_ring->deq_seg = ep_ring->deq_seg->next;
+               ep_ring->dequeue = ep_ring->deq_seg->trbs;
+       }
+
+       while (ep_ring->dequeue != deq_state->new_deq_ptr) {
+               ep_ring->num_trbs_free++;
+               ep_ring->dequeue++;
+
+               if (cdnsp_trb_is_link(ep_ring->dequeue)) {
+                       if (ep_ring->dequeue == deq_state->new_deq_ptr)
+                               break;
+
+                       ep_ring->deq_seg = ep_ring->deq_seg->next;
+                       ep_ring->dequeue = ep_ring->deq_seg->trbs;
+               }
+       }
+
+       /*
+        * Probably there was TIMEOUT during handling Set Dequeue Pointer
+        * command. It's critical error and controller will be stopped.
+        */
+       if (ret)
+               return -ESHUTDOWN;
+
+       /* Restart any rings with pending requests */
+       cdnsp_ring_doorbell_for_active_rings(pdev, pep);
+
+       return 0;
+}
+
+int cdnsp_remove_request(struct cdnsp_device *pdev,
+                        struct cdnsp_request *preq,
+                        struct cdnsp_ep *pep)
+{
+       struct cdnsp_dequeue_state deq_state;
+       struct cdnsp_td *cur_td = NULL;
+       struct cdnsp_ring *ep_ring;
+       struct cdnsp_segment *seg;
+       int status = -ECONNRESET;
+       int ret = 0;
+       u64 hw_deq;
+
+       memset(&deq_state, 0, sizeof(deq_state));
+
+       trace_cdnsp_remove_request(pep->out_ctx);
+       trace_cdnsp_remove_request_td(preq);
+
+       cur_td = &preq->td;
+       ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
+
+       /*
+        * If we stopped on the TD we need to cancel, then we have to
+        * move the controller endpoint ring dequeue pointer past
+        * this TD.
+        */
+       hw_deq = cdnsp_get_hw_deq(pdev, pep->idx, preq->request.stream_id);
+       hw_deq &= ~0xf;
+
+       seg = cdnsp_trb_in_td(pdev, cur_td->start_seg, cur_td->first_trb,
+                             cur_td->last_trb, hw_deq);
+
+       if (seg && (pep->ep_state & EP_ENABLED))
+               cdnsp_find_new_dequeue_state(pdev, pep, preq->request.stream_id,
+                                            cur_td, &deq_state);
+       else
+               cdnsp_td_to_noop(pdev, ep_ring, cur_td, false);
+
+       /*
+        * The event handler won't see a completion for this TD anymore,
+        * so remove it from the endpoint ring's TD list.
+        */
+       list_del_init(&cur_td->td_list);
+       ep_ring->num_tds--;
+       pep->stream_info.td_count--;
+
+       /*
+        * During disconnecting all endpoint will be disabled so we don't
+        * have to worry about updating dequeue pointer.
+        */
+       if (pdev->cdnsp_state & CDNSP_STATE_DISCONNECT_PENDING) {
+               status = -ESHUTDOWN;
+               ret = cdnsp_cmd_set_deq(pdev, pep, &deq_state);
+       }
+
+       cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, cur_td);
+       cdnsp_gadget_giveback(pep, cur_td->preq, status);
+
+       return ret;
+}
+
+static int cdnsp_update_port_id(struct cdnsp_device *pdev, u32 port_id)
+{
+       struct cdnsp_port *port = pdev->active_port;
+       u8 old_port = 0;
+
+       if (port && port->port_num == port_id)
+               return 0;
+
+       if (port)
+               old_port = port->port_num;
+
+       if (port_id == pdev->usb2_port.port_num) {
+               port = &pdev->usb2_port;
+       } else if (port_id == pdev->usb3_port.port_num) {
+               port  = &pdev->usb3_port;
+       } else {
+               dev_err(pdev->dev, "Port event with invalid port ID %d\n",
+                       port_id);
+               return -EINVAL;
+       }
+
+       if (port_id != old_port) {
+               cdnsp_disable_slot(pdev);
+               pdev->active_port = port;
+               cdnsp_enable_slot(pdev);
+       }
+
+       if (port_id == pdev->usb2_port.port_num)
+               cdnsp_set_usb2_hardware_lpm(pdev, NULL, 1);
+       else
+               writel(PORT_U1_TIMEOUT(1) | PORT_U2_TIMEOUT(1),
+                      &pdev->usb3_port.regs->portpmsc);
+
+       return 0;
+}
+
+static void cdnsp_handle_port_status(struct cdnsp_device *pdev,
+                                    union cdnsp_trb *event)
+{
+       struct cdnsp_port_regs __iomem *port_regs;
+       u32 portsc, cmd_regs;
+       bool port2 = false;
+       u32 link_state;
+       u32 port_id;
+
+       /* Port status change events always have a successful completion code */
+       if (GET_COMP_CODE(le32_to_cpu(event->generic.field[2])) != COMP_SUCCESS)
+               dev_err(pdev->dev, "ERR: incorrect PSC event\n");
+
+       port_id = GET_PORT_ID(le32_to_cpu(event->generic.field[0]));
+
+       if (cdnsp_update_port_id(pdev, port_id))
+               goto cleanup;
+
+       port_regs = pdev->active_port->regs;
+
+       if (port_id == pdev->usb2_port.port_num)
+               port2 = true;
+
+new_event:
+       portsc = readl(&port_regs->portsc);
+       writel(cdnsp_port_state_to_neutral(portsc) |
+              (portsc & PORT_CHANGE_BITS), &port_regs->portsc);
+
+       trace_cdnsp_handle_port_status(pdev->active_port->port_num, portsc);
+
+       pdev->gadget.speed = cdnsp_port_speed(portsc);
+       link_state = portsc & PORT_PLS_MASK;
+
+       /* Port Link State change detected. */
+       if ((portsc & PORT_PLC)) {
+               if (!(pdev->cdnsp_state & CDNSP_WAKEUP_PENDING)  &&
+                   link_state == XDEV_RESUME) {
+                       cmd_regs = readl(&pdev->op_regs->command);
+                       if (!(cmd_regs & CMD_R_S))
+                               goto cleanup;
+
+                       if (DEV_SUPERSPEED_ANY(portsc)) {
+                               cdnsp_set_link_state(pdev, &port_regs->portsc,
+                                                    XDEV_U0);
+
+                               cdnsp_resume_gadget(pdev);
+                       }
+               }
+
+               if ((pdev->cdnsp_state & CDNSP_WAKEUP_PENDING) &&
+                   link_state == XDEV_U0) {
+                       pdev->cdnsp_state &= ~CDNSP_WAKEUP_PENDING;
+
+                       cdnsp_force_header_wakeup(pdev, 1);
+                       cdnsp_ring_cmd_db(pdev);
+                       cdnsp_wait_for_cmd_compl(pdev);
+               }
+
+               if (link_state == XDEV_U0 && pdev->link_state == XDEV_U3 &&
+                   !DEV_SUPERSPEED_ANY(portsc))
+                       cdnsp_resume_gadget(pdev);
+
+               if (link_state == XDEV_U3 &&  pdev->link_state != XDEV_U3)
+                       cdnsp_suspend_gadget(pdev);
+
+               pdev->link_state = link_state;
+       }
+
+       if (portsc & PORT_CSC) {
+               /* Detach device. */
+               if (pdev->gadget.connected && !(portsc & PORT_CONNECT))
+                       cdnsp_disconnect_gadget(pdev);
+
+               /* Attach device. */
+               if (portsc & PORT_CONNECT) {
+                       if (!port2)
+                               cdnsp_irq_reset(pdev);
+
+                       usb_gadget_set_state(&pdev->gadget, USB_STATE_ATTACHED);
+               }
+       }
+
+       /* Port reset. */
+       if ((portsc & (PORT_RC | PORT_WRC)) && (portsc & PORT_CONNECT)) {
+               cdnsp_irq_reset(pdev);
+               pdev->u1_allowed = 0;
+               pdev->u2_allowed = 0;
+               pdev->may_wakeup = 0;
+       }
+
+       if (portsc & PORT_CEC)
+               dev_err(pdev->dev, "Port Over Current detected\n");
+
+       if (portsc & PORT_CEC)
+               dev_err(pdev->dev, "Port Configure Error detected\n");
+
+       if (readl(&port_regs->portsc) & PORT_CHANGE_BITS)
+               goto new_event;
+
+cleanup:
+       cdnsp_inc_deq(pdev, pdev->event_ring);
+}
+
+static void cdnsp_td_cleanup(struct cdnsp_device *pdev,
+                            struct cdnsp_td *td,
+                            struct cdnsp_ring *ep_ring,
+                            int *status)
+{
+       struct cdnsp_request *preq = td->preq;
+
+       /* if a bounce buffer was used to align this td then unmap it */
+       cdnsp_unmap_td_bounce_buffer(pdev, ep_ring, td);
+
+       /*
+        * If the controller said we transferred more data than the buffer
+        * length, Play it safe and say we didn't transfer anything.
+        */
+       if (preq->request.actual > preq->request.length) {
+               preq->request.actual = 0;
+               *status = 0;
+       }
+
+       list_del_init(&td->td_list);
+       ep_ring->num_tds--;
+       preq->pep->stream_info.td_count--;
+
+       cdnsp_gadget_giveback(preq->pep, preq, *status);
+}
+
+static void cdnsp_finish_td(struct cdnsp_device *pdev,
+                           struct cdnsp_td *td,
+                           struct cdnsp_transfer_event *event,
+                           struct cdnsp_ep *ep,
+                           int *status)
+{
+       struct cdnsp_ring *ep_ring;
+       u32 trb_comp_code;
+
+       ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+       trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+
+       if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
+           trb_comp_code == COMP_STOPPED ||
+           trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
+               /*
+                * The Endpoint Stop Command completion will take care of any
+                * stopped TDs. A stopped TD may be restarted, so don't update
+                * the ring dequeue pointer or take this TD off any lists yet.
+                */
+               return;
+       }
+
+       /* Update ring dequeue pointer */
+       while (ep_ring->dequeue != td->last_trb)
+               cdnsp_inc_deq(pdev, ep_ring);
+
+       cdnsp_inc_deq(pdev, ep_ring);
+
+       cdnsp_td_cleanup(pdev, td, ep_ring, status);
+}
+
+/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
+static int cdnsp_sum_trb_lengths(struct cdnsp_device *pdev,
+                                struct cdnsp_ring *ring,
+                                union cdnsp_trb *stop_trb)
+{
+       struct cdnsp_segment *seg = ring->deq_seg;
+       union cdnsp_trb *trb = ring->dequeue;
+       u32 sum;
+
+       for (sum = 0; trb != stop_trb; cdnsp_next_trb(pdev, ring, &seg, &trb)) {
+               if (!cdnsp_trb_is_noop(trb) && !cdnsp_trb_is_link(trb))
+                       sum += TRB_LEN(le32_to_cpu(trb->generic.field[2]));
+       }
+       return sum;
+}
+
+static int cdnsp_giveback_first_trb(struct cdnsp_device *pdev,
+                                   struct cdnsp_ep *pep,
+                                   unsigned int stream_id,
+                                   int start_cycle,
+                                   struct cdnsp_generic_trb *start_trb)
+{
+       /*
+        * Pass all the TRBs to the hardware at once and make sure this write
+        * isn't reordered.
+        */
+       wmb();
+
+       if (start_cycle)
+               start_trb->field[3] |= cpu_to_le32(start_cycle);
+       else
+               start_trb->field[3] &= cpu_to_le32(~TRB_CYCLE);
+
+       if ((pep->ep_state & EP_HAS_STREAMS) &&
+           !pep->stream_info.first_prime_det) {
+               trace_cdnsp_wait_for_prime(pep, stream_id);
+               return 0;
+       }
+
+       return cdnsp_ring_ep_doorbell(pdev, pep, stream_id);
+}
+
+/*
+ * Process control tds, update USB request status and actual_length.
+ */
+static void cdnsp_process_ctrl_td(struct cdnsp_device *pdev,
+                                 struct cdnsp_td *td,
+                                 union cdnsp_trb *event_trb,
+                                 struct cdnsp_transfer_event *event,
+                                 struct cdnsp_ep *pep,
+                                 int *status)
+{
+       struct cdnsp_ring *ep_ring;
+       u32 remaining;
+       u32 trb_type;
+
+       trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event_trb->generic.field[3]));
+       ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
+       remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+
+       /*
+        * if on data stage then update the actual_length of the USB
+        * request and flag it as set, so it won't be overwritten in the event
+        * for the last TRB.
+        */
+       if (trb_type == TRB_DATA) {
+               td->request_length_set = true;
+               td->preq->request.actual = td->preq->request.length - remaining;
+       }
+
+       /* at status stage */
+       if (!td->request_length_set)
+               td->preq->request.actual = td->preq->request.length;
+
+       if (pdev->ep0_stage == CDNSP_DATA_STAGE && pep->number == 0 &&
+           pdev->three_stage_setup) {
+               td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
+                               td_list);
+               pdev->ep0_stage = CDNSP_STATUS_STAGE;
+
+               cdnsp_giveback_first_trb(pdev, pep, 0, ep_ring->cycle_state,
+                                        &td->last_trb->generic);
+               return;
+       }
+
+       cdnsp_finish_td(pdev, td, event, pep, status);
+}
+
+/*
+ * Process isochronous tds, update usb request status and actual_length.
+ */
+static void cdnsp_process_isoc_td(struct cdnsp_device *pdev,
+                                 struct cdnsp_td *td,
+                                 union cdnsp_trb *ep_trb,
+                                 struct cdnsp_transfer_event *event,
+                                 struct cdnsp_ep *pep,
+                                 int status)
+{
+       struct cdnsp_request *preq = td->preq;
+       u32 remaining, requested, ep_trb_len;
+       bool sum_trbs_for_length = false;
+       struct cdnsp_ring *ep_ring;
+       u32 trb_comp_code;
+       u32 td_length;
+
+       ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
+       trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+       remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+       ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
+
+       requested = preq->request.length;
+
+       /* handle completion code */
+       switch (trb_comp_code) {
+       case COMP_SUCCESS:
+               preq->request.status = 0;
+               break;
+       case COMP_SHORT_PACKET:
+               preq->request.status = 0;
+               sum_trbs_for_length = true;
+               break;
+       case COMP_ISOCH_BUFFER_OVERRUN:
+       case COMP_BABBLE_DETECTED_ERROR:
+               preq->request.status = -EOVERFLOW;
+               break;
+       case COMP_STOPPED:
+               sum_trbs_for_length = true;
+               break;
+       case COMP_STOPPED_SHORT_PACKET:
+               /* field normally containing residue now contains transferred */
+               preq->request.status  = 0;
+               requested = remaining;
+               break;
+       case COMP_STOPPED_LENGTH_INVALID:
+               requested = 0;
+               remaining = 0;
+               break;
+       default:
+               sum_trbs_for_length = true;
+               preq->request.status = -1;
+               break;
+       }
+
+       if (sum_trbs_for_length) {
+               td_length = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb);
+               td_length += ep_trb_len - remaining;
+       } else {
+               td_length = requested;
+       }
+
+       td->preq->request.actual += td_length;
+
+       cdnsp_finish_td(pdev, td, event, pep, &status);
+}
+
+static void cdnsp_skip_isoc_td(struct cdnsp_device *pdev,
+                              struct cdnsp_td *td,
+                              struct cdnsp_transfer_event *event,
+                              struct cdnsp_ep *pep,
+                              int status)
+{
+       struct cdnsp_ring *ep_ring;
+
+       ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
+       td->preq->request.status = -EXDEV;
+       td->preq->request.actual = 0;
+
+       /* Update ring dequeue pointer */
+       while (ep_ring->dequeue != td->last_trb)
+               cdnsp_inc_deq(pdev, ep_ring);
+
+       cdnsp_inc_deq(pdev, ep_ring);
+
+       cdnsp_td_cleanup(pdev, td, ep_ring, &status);
+}
+
+/*
+ * Process bulk and interrupt tds, update usb request status and actual_length.
+ */
+static void cdnsp_process_bulk_intr_td(struct cdnsp_device *pdev,
+                                      struct cdnsp_td *td,
+                                      union cdnsp_trb *ep_trb,
+                                      struct cdnsp_transfer_event *event,
+                                      struct cdnsp_ep *ep,
+                                      int *status)
+{
+       u32 remaining, requested, ep_trb_len;
+       struct cdnsp_ring *ep_ring;
+       u32 trb_comp_code;
+
+       ep_ring = cdnsp_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
+       trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+       remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
+       ep_trb_len = TRB_LEN(le32_to_cpu(ep_trb->generic.field[2]));
+       requested = td->preq->request.length;
+
+       switch (trb_comp_code) {
+       case COMP_SUCCESS:
+       case COMP_SHORT_PACKET:
+               *status = 0;
+               break;
+       case COMP_STOPPED_SHORT_PACKET:
+               td->preq->request.actual = remaining;
+               goto finish_td;
+       case COMP_STOPPED_LENGTH_INVALID:
+               /* Stopped on ep trb with invalid length, exclude it. */
+               ep_trb_len = 0;
+               remaining = 0;
+               break;
+       }
+
+       if (ep_trb == td->last_trb)
+               ep_trb_len = requested - remaining;
+       else
+               ep_trb_len = cdnsp_sum_trb_lengths(pdev, ep_ring, ep_trb) +
+                                                  ep_trb_len - remaining;
+       td->preq->request.actual = ep_trb_len;
+
+finish_td:
+       ep->stream_info.drbls_count--;
+
+       cdnsp_finish_td(pdev, td, event, ep, status);
+}
+
+static void cdnsp_handle_tx_nrdy(struct cdnsp_device *pdev,
+                                struct cdnsp_transfer_event *event)
+{
+       struct cdnsp_generic_trb *generic;
+       struct cdnsp_ring *ep_ring;
+       struct cdnsp_ep *pep;
+       int cur_stream;
+       int ep_index;
+       int host_sid;
+       int dev_sid;
+
+       generic = (struct cdnsp_generic_trb *)event;
+       ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+       dev_sid = TRB_TO_DEV_STREAM(le32_to_cpu(generic->field[0]));
+       host_sid = TRB_TO_HOST_STREAM(le32_to_cpu(generic->field[2]));
+
+       pep = &pdev->eps[ep_index];
+
+       if (!(pep->ep_state & EP_HAS_STREAMS))
+               return;
+
+       if (host_sid == STREAM_PRIME_ACK) {
+               pep->stream_info.first_prime_det = 1;
+               for (cur_stream = 1; cur_stream < pep->stream_info.num_streams;
+                   cur_stream++) {
+                       ep_ring = pep->stream_info.stream_rings[cur_stream];
+                       ep_ring->stream_active = 1;
+                       ep_ring->stream_rejected = 0;
+               }
+       }
+
+       if (host_sid == STREAM_REJECTED) {
+               struct cdnsp_td *td, *td_temp;
+
+               pep->stream_info.drbls_count--;
+               ep_ring = pep->stream_info.stream_rings[dev_sid];
+               ep_ring->stream_active = 0;
+               ep_ring->stream_rejected = 1;
+
+               list_for_each_entry_safe(td, td_temp, &ep_ring->td_list,
+                                        td_list) {
+                       td->drbl = 0;
+               }
+       }
+
+       cdnsp_ring_doorbell_for_active_rings(pdev, pep);
+}
+
+/*
+ * If this function returns an error condition, it means it got a Transfer
+ * event with a corrupted TRB DMA address or endpoint is disabled.
+ */
+static int cdnsp_handle_tx_event(struct cdnsp_device *pdev,
+                                struct cdnsp_transfer_event *event)
+{
+       const struct usb_endpoint_descriptor *desc;
+       bool handling_skipped_tds = false;
+       struct cdnsp_segment *ep_seg;
+       struct cdnsp_ring *ep_ring;
+       int status = -EINPROGRESS;
+       union cdnsp_trb *ep_trb;
+       dma_addr_t ep_trb_dma;
+       struct cdnsp_ep *pep;
+       struct cdnsp_td *td;
+       u32 trb_comp_code;
+       int invalidate;
+       int ep_index;
+
+       invalidate = le32_to_cpu(event->flags) & TRB_EVENT_INVALIDATE;
+       ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+       trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
+       ep_trb_dma = le64_to_cpu(event->buffer);
+
+       pep = &pdev->eps[ep_index];
+       ep_ring = cdnsp_dma_to_transfer_ring(pep, le64_to_cpu(event->buffer));
+
+       /*
+        * If device is disconnect then all requests will be dequeued
+        * by upper layers as part of disconnect sequence.
+        * We don't want handle such event to avoid racing.
+        */
+       if (invalidate || !pdev->gadget.connected)
+               goto cleanup;
+
+       if (GET_EP_CTX_STATE(pep->out_ctx) == EP_STATE_DISABLED) {
+               trace_cdnsp_ep_disabled(pep->out_ctx);
+               goto err_out;
+       }
+
+       /* Some transfer events don't always point to a trb*/
+       if (!ep_ring) {
+               switch (trb_comp_code) {
+               case COMP_INVALID_STREAM_TYPE_ERROR:
+               case COMP_INVALID_STREAM_ID_ERROR:
+               case COMP_RING_UNDERRUN:
+               case COMP_RING_OVERRUN:
+                       goto cleanup;
+               default:
+                       dev_err(pdev->dev, "ERROR: %s event for unknown ring\n",
+                               pep->name);
+                       goto err_out;
+               }
+       }
+
+       /* Look for some error cases that need special treatment. */
+       switch (trb_comp_code) {
+       case COMP_BABBLE_DETECTED_ERROR:
+               status = -EOVERFLOW;
+               break;
+       case COMP_RING_UNDERRUN:
+       case COMP_RING_OVERRUN:
+               /*
+                * When the Isoch ring is empty, the controller will generate
+                * a Ring Overrun Event for IN Isoch endpoint or Ring
+                * Underrun Event for OUT Isoch endpoint.
+                */
+               goto cleanup;
+       case COMP_MISSED_SERVICE_ERROR:
+               /*
+                * When encounter missed service error, one or more isoc tds
+                * may be missed by controller.
+                * Set skip flag of the ep_ring; Complete the missed tds as
+                * short transfer when process the ep_ring next time.
+                */
+               pep->skip = true;
+               break;
+       }
+
+       do {
+               /*
+                * This TRB should be in the TD at the head of this ring's TD
+                * list.
+                */
+               if (list_empty(&ep_ring->td_list)) {
+                       /*
+                        * Don't print warnings if it's due to a stopped
+                        * endpoint generating an extra completion event, or
+                        * a event for the last TRB of a short TD we already
+                        * got a short event for.
+                        * The short TD is already removed from the TD list.
+                        */
+                       if (!(trb_comp_code == COMP_STOPPED ||
+                             trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
+                             ep_ring->last_td_was_short))
+                               trace_cdnsp_trb_without_td(ep_ring,
+                                       (struct cdnsp_generic_trb *)event);
+
+                       if (pep->skip) {
+                               pep->skip = false;
+                               trace_cdnsp_ep_list_empty_with_skip(pep, 0);
+                       }
+
+                       goto cleanup;
+               }
+
+               td = list_entry(ep_ring->td_list.next, struct cdnsp_td,
+                               td_list);
+
+               /* Is this a TRB in the currently executing TD? */
+               ep_seg = cdnsp_trb_in_td(pdev, ep_ring->deq_seg,
+                                        ep_ring->dequeue, td->last_trb,
+                                        ep_trb_dma);
+
+               /*
+                * Skip the Force Stopped Event. The event_trb(ep_trb_dma)
+                * of FSE is not in the current TD pointed by ep_ring->dequeue
+                * because that the hardware dequeue pointer still at the
+                * previous TRB of the current TD. The previous TRB maybe a
+                * Link TD or the last TRB of the previous TD. The command
+                * completion handle will take care the rest.
+                */
+               if (!ep_seg && (trb_comp_code == COMP_STOPPED ||
+                               trb_comp_code == COMP_STOPPED_LENGTH_INVALID)) {
+                       pep->skip = false;
+                       goto cleanup;
+               }
+
+               desc = td->preq->pep->endpoint.desc;
+               if (!ep_seg) {
+                       if (!pep->skip || !usb_endpoint_xfer_isoc(desc)) {
+                               /* Something is busted, give up! */
+                               dev_err(pdev->dev,
+                                       "ERROR Transfer event TRB DMA ptr not "
+                                       "part of current TD ep_index %d "
+                                       "comp_code %u\n", ep_index,
+                                       trb_comp_code);
+                               return -EINVAL;
+                       }
+
+                       cdnsp_skip_isoc_td(pdev, td, event, pep, status);
+                       goto cleanup;
+               }
+
+               if (trb_comp_code == COMP_SHORT_PACKET)
+                       ep_ring->last_td_was_short = true;
+               else
+                       ep_ring->last_td_was_short = false;
+
+               if (pep->skip) {
+                       pep->skip = false;
+                       cdnsp_skip_isoc_td(pdev, td, event, pep, status);
+                       goto cleanup;
+               }
+
+               ep_trb = &ep_seg->trbs[(ep_trb_dma - ep_seg->dma)
+                                      / sizeof(*ep_trb)];
+
+               trace_cdnsp_handle_transfer(ep_ring,
+                                           (struct cdnsp_generic_trb *)ep_trb);
+
+               if (cdnsp_trb_is_noop(ep_trb))
+                       goto cleanup;
+
+               if (usb_endpoint_xfer_control(desc))
+                       cdnsp_process_ctrl_td(pdev, td, ep_trb, event, pep,
+                                             &status);
+               else if (usb_endpoint_xfer_isoc(desc))
+                       cdnsp_process_isoc_td(pdev, td, ep_trb, event, pep,
+                                             status);
+               else
+                       cdnsp_process_bulk_intr_td(pdev, td, ep_trb, event, pep,
+                                                  &status);
+cleanup:
+               handling_skipped_tds = pep->skip;
+
+               /*
+                * Do not update event ring dequeue pointer if we're in a loop
+                * processing missed tds.
+                */
+               if (!handling_skipped_tds)
+                       cdnsp_inc_deq(pdev, pdev->event_ring);
+
+       /*
+        * If ep->skip is set, it means there are missed tds on the
+        * endpoint ring need to take care of.
+        * Process them as short transfer until reach the td pointed by
+        * the event.
+        */
+       } while (handling_skipped_tds);
+       return 0;
+
+err_out:
+       dev_err(pdev->dev, "@%016llx %08x %08x %08x %08x\n",
+               (unsigned long long)
+               cdnsp_trb_virt_to_dma(pdev->event_ring->deq_seg,
+                                     pdev->event_ring->dequeue),
+                lower_32_bits(le64_to_cpu(event->buffer)),
+                upper_32_bits(le64_to_cpu(event->buffer)),
+                le32_to_cpu(event->transfer_len),
+                le32_to_cpu(event->flags));
+       return -EINVAL;
+}
+
+/*
+ * This function handles all events on the event ring.
+ * Returns true for "possibly more events to process" (caller should call
+ * again), otherwise false if done.
+ */
+static bool cdnsp_handle_event(struct cdnsp_device *pdev)
+{
+       unsigned int comp_code;
+       union cdnsp_trb *event;
+       bool update_ptrs = true;
+       u32 cycle_bit;
+       int ret = 0;
+       u32 flags;
+
+       event = pdev->event_ring->dequeue;
+       flags = le32_to_cpu(event->event_cmd.flags);
+       cycle_bit = (flags & TRB_CYCLE);
+
+       /* Does the controller or driver own the TRB? */
+       if (cycle_bit != pdev->event_ring->cycle_state)
+               return false;
+
+       trace_cdnsp_handle_event(pdev->event_ring, &event->generic);
+
+       /*
+        * Barrier between reading the TRB_CYCLE (valid) flag above and any
+        * reads of the event's flags/data below.
+        */
+       rmb();
+
+       switch (flags & TRB_TYPE_BITMASK) {
+       case TRB_TYPE(TRB_COMPLETION):
+               /*
+                * Command can't be handled in interrupt context so just
+                * increment command ring dequeue pointer.
+                */
+               cdnsp_inc_deq(pdev, pdev->cmd_ring);
+               break;
+       case TRB_TYPE(TRB_PORT_STATUS):
+               cdnsp_handle_port_status(pdev, event);
+               update_ptrs = false;
+               break;
+       case TRB_TYPE(TRB_TRANSFER):
+               ret = cdnsp_handle_tx_event(pdev, &event->trans_event);
+               if (ret >= 0)
+                       update_ptrs = false;
+               break;
+       case TRB_TYPE(TRB_SETUP):
+               pdev->ep0_stage = CDNSP_SETUP_STAGE;
+               pdev->setup_id = TRB_SETUPID_TO_TYPE(flags);
+               pdev->setup_speed = TRB_SETUP_SPEEDID(flags);
+               pdev->setup = *((struct usb_ctrlrequest *)
+                               &event->trans_event.buffer);
+
+               cdnsp_setup_analyze(pdev);
+               break;
+       case TRB_TYPE(TRB_ENDPOINT_NRDY):
+               cdnsp_handle_tx_nrdy(pdev, &event->trans_event);
+               break;
+       case TRB_TYPE(TRB_HC_EVENT): {
+               comp_code = GET_COMP_CODE(le32_to_cpu(event->generic.field[2]));
+
+               switch (comp_code) {
+               case COMP_EVENT_RING_FULL_ERROR:
+                       dev_err(pdev->dev, "Event Ring Full\n");
+                       break;
+               default:
+                       dev_err(pdev->dev, "Controller error code 0x%02x\n",
+                               comp_code);
+               }
+
+               break;
+       }
+       case TRB_TYPE(TRB_MFINDEX_WRAP):
+       case TRB_TYPE(TRB_DRB_OVERFLOW):
+               break;
+       default:
+               dev_warn(pdev->dev, "ERROR unknown event type %ld\n",
+                        TRB_FIELD_TO_TYPE(flags));
+       }
+
+       if (update_ptrs)
+               /* Update SW event ring dequeue pointer. */
+               cdnsp_inc_deq(pdev, pdev->event_ring);
+
+       /*
+        * Caller will call us again to check if there are more items
+        * on the event ring.
+        */
+       return true;
+}
+
+irqreturn_t cdnsp_thread_irq_handler(int irq, void *data)
+{
+       struct cdnsp_device *pdev = (struct cdnsp_device *)data;
+       union cdnsp_trb *event_ring_deq;
+       int counter = 0;
+
+       spin_lock(&pdev->lock);
+
+       if (pdev->cdnsp_state & (CDNSP_STATE_HALTED | CDNSP_STATE_DYING)) {
+               cdnsp_died(pdev);
+               spin_unlock(&pdev->lock);
+               return IRQ_HANDLED;
+       }
+
+       event_ring_deq = pdev->event_ring->dequeue;
+
+       while (cdnsp_handle_event(pdev)) {
+               if (++counter >= TRBS_PER_EV_DEQ_UPDATE) {
+                       cdnsp_update_erst_dequeue(pdev, event_ring_deq, 0);
+                       event_ring_deq = pdev->event_ring->dequeue;
+                       counter = 0;
+               }
+       }
+
+       cdnsp_update_erst_dequeue(pdev, event_ring_deq, 1);
+
+       spin_unlock(&pdev->lock);
+
+       return IRQ_HANDLED;
+}
+
+irqreturn_t cdnsp_irq_handler(int irq, void *priv)
+{
+       struct cdnsp_device *pdev = (struct cdnsp_device *)priv;
+       u32 irq_pending;
+       u32 status;
+
+       status = readl(&pdev->op_regs->status);
+
+       if (status == ~(u32)0) {
+               cdnsp_died(pdev);
+               return IRQ_HANDLED;
+       }
+
+       if (!(status & STS_EINT))
+               return IRQ_NONE;
+
+       writel(status | STS_EINT, &pdev->op_regs->status);
+       irq_pending = readl(&pdev->ir_set->irq_pending);
+       irq_pending |= IMAN_IP;
+       writel(irq_pending, &pdev->ir_set->irq_pending);
+
+       if (status & STS_FATAL) {
+               cdnsp_died(pdev);
+               return IRQ_HANDLED;
+       }
+
+       return IRQ_WAKE_THREAD;
+}
+
+/*
+ * Generic function for queuing a TRB on a ring.
+ * The caller must have checked to make sure there's room on the ring.
+ *
+ * @more_trbs_coming:  Will you enqueue more TRBs before setting doorbell?
+ */
+static void cdnsp_queue_trb(struct cdnsp_device *pdev, struct cdnsp_ring *ring,
+                           bool more_trbs_coming, u32 field1, u32 field2,
+                           u32 field3, u32 field4)
+{
+       struct cdnsp_generic_trb *trb;
+
+       trb = &ring->enqueue->generic;
+
+       trb->field[0] = cpu_to_le32(field1);
+       trb->field[1] = cpu_to_le32(field2);
+       trb->field[2] = cpu_to_le32(field3);
+       trb->field[3] = cpu_to_le32(field4);
+
+       trace_cdnsp_queue_trb(ring, trb);
+       cdnsp_inc_enq(pdev, ring, more_trbs_coming);
+}
+
+/*
+ * Does various checks on the endpoint ring, and makes it ready to
+ * queue num_trbs.
+ */
+static int cdnsp_prepare_ring(struct cdnsp_device *pdev,
+                             struct cdnsp_ring *ep_ring,
+                             u32 ep_state, unsigned
+                             int num_trbs,
+                             gfp_t mem_flags)
+{
+       unsigned int num_trbs_needed;
+
+       /* Make sure the endpoint has been added to controller schedule. */
+       switch (ep_state) {
+       case EP_STATE_STOPPED:
+       case EP_STATE_RUNNING:
+       case EP_STATE_HALTED:
+               break;
+       default:
+               dev_err(pdev->dev, "ERROR: incorrect endpoint state\n");
+               return -EINVAL;
+       }
+
+       while (1) {
+               if (cdnsp_room_on_ring(pdev, ep_ring, num_trbs))
+                       break;
+
+               trace_cdnsp_no_room_on_ring("try ring expansion");
+
+               num_trbs_needed = num_trbs - ep_ring->num_trbs_free;
+               if (cdnsp_ring_expansion(pdev, ep_ring, num_trbs_needed,
+                                        mem_flags)) {
+                       dev_err(pdev->dev, "Ring expansion failed\n");
+                       return -ENOMEM;
+               }
+       }
+
+       while (cdnsp_trb_is_link(ep_ring->enqueue)) {
+               ep_ring->enqueue->link.control |= cpu_to_le32(TRB_CHAIN);
+               /* The cycle bit must be set as the last operation. */
+               wmb();
+               ep_ring->enqueue->link.control ^= cpu_to_le32(TRB_CYCLE);
+
+               /* Toggle the cycle bit after the last ring segment. */
+               if (cdnsp_link_trb_toggles_cycle(ep_ring->enqueue))
+                       ep_ring->cycle_state ^= 1;
+               ep_ring->enq_seg = ep_ring->enq_seg->next;
+               ep_ring->enqueue = ep_ring->enq_seg->trbs;
+       }
+       return 0;
+}
+
+static int cdnsp_prepare_transfer(struct cdnsp_device *pdev,
+                                 struct cdnsp_request *preq,
+                                 unsigned int num_trbs)
+{
+       struct cdnsp_ring *ep_ring;
+       int ret;
+
+       ep_ring = cdnsp_get_transfer_ring(pdev, preq->pep,
+                                         preq->request.stream_id);
+       if (!ep_ring)
+               return -EINVAL;
+
+       ret = cdnsp_prepare_ring(pdev, ep_ring,
+                                GET_EP_CTX_STATE(preq->pep->out_ctx),
+                                num_trbs, GFP_ATOMIC);
+       if (ret)
+               return ret;
+
+       INIT_LIST_HEAD(&preq->td.td_list);
+       preq->td.preq = preq;
+
+       /* Add this TD to the tail of the endpoint ring's TD list. */
+       list_add_tail(&preq->td.td_list, &ep_ring->td_list);
+       ep_ring->num_tds++;
+       preq->pep->stream_info.td_count++;
+
+       preq->td.start_seg = ep_ring->enq_seg;
+       preq->td.first_trb = ep_ring->enqueue;
+
+       return 0;
+}
+
+static unsigned int cdnsp_count_trbs(u64 addr, u64 len)
+{
+       unsigned int num_trbs;
+
+       num_trbs = DIV_ROUND_UP(len + (addr & (TRB_MAX_BUFF_SIZE - 1)),
+                               TRB_MAX_BUFF_SIZE);
+       if (num_trbs == 0)
+               num_trbs++;
+
+       return num_trbs;
+}
+
+static unsigned int count_trbs_needed(struct cdnsp_request *preq)
+{
+       return cdnsp_count_trbs(preq->request.dma, preq->request.length);
+}
+
+static unsigned int count_sg_trbs_needed(struct cdnsp_request *preq)
+{
+       unsigned int i, len, full_len, num_trbs = 0;
+       struct scatterlist *sg;
+
+       full_len = preq->request.length;
+
+       for_each_sg(preq->request.sg, sg, preq->request.num_sgs, i) {
+               len = sg_dma_len(sg);
+               num_trbs += cdnsp_count_trbs(sg_dma_address(sg), len);
+               len = min(len, full_len);
+               full_len -= len;
+               if (full_len == 0)
+                       break;
+       }
+
+       return num_trbs;
+}
+
+static unsigned int count_isoc_trbs_needed(struct cdnsp_request *preq)
+{
+       return cdnsp_count_trbs(preq->request.dma, preq->request.length);
+}
+
+static void cdnsp_check_trb_math(struct cdnsp_request *preq, int running_total)
+{
+       if (running_total != preq->request.length)
+               dev_err(preq->pep->pdev->dev,
+                       "%s - Miscalculated tx length, "
+                       "queued %#x, asked for %#x (%d)\n",
+                       preq->pep->name, running_total,
+                       preq->request.length, preq->request.actual);
+}
+
+/*
+ * TD size is the number of max packet sized packets remaining in the TD
+ * (*not* including this TRB).
+ *
+ * Total TD packet count = total_packet_count =
+ *     DIV_ROUND_UP(TD size in bytes / wMaxPacketSize)
+ *
+ * Packets transferred up to and including this TRB = packets_transferred =
+ *     rounddown(total bytes transferred including this TRB / wMaxPacketSize)
+ *
+ * TD size = total_packet_count - packets_transferred
+ *
+ * It must fit in bits 21:17, so it can't be bigger than 31.
+ * This is taken care of in the TRB_TD_SIZE() macro
+ *
+ * The last TRB in a TD must have the TD size set to zero.
+ */
+static u32 cdnsp_td_remainder(struct cdnsp_device *pdev,
+                             int transferred,
+                             int trb_buff_len,
+                             unsigned int td_total_len,
+                             struct cdnsp_request *preq,
+                             bool more_trbs_coming)
+{
+       u32 maxp, total_packet_count;
+
+       /* One TRB with a zero-length data packet. */
+       if (!more_trbs_coming || (transferred == 0 && trb_buff_len == 0) ||
+           trb_buff_len == td_total_len)
+               return 0;
+
+       maxp = usb_endpoint_maxp(preq->pep->endpoint.desc);
+       total_packet_count = DIV_ROUND_UP(td_total_len, maxp);
+
+       /* Queuing functions don't count the current TRB into transferred. */
+       return (total_packet_count - ((transferred + trb_buff_len) / maxp));
+}
+
+static int cdnsp_align_td(struct cdnsp_device *pdev,
+                         struct cdnsp_request *preq, u32 enqd_len,
+                         u32 *trb_buff_len, struct cdnsp_segment *seg)
+{
+       struct device *dev = pdev->dev;
+       unsigned int unalign;
+       unsigned int max_pkt;
+       u32 new_buff_len;
+
+       max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
+       unalign = (enqd_len + *trb_buff_len) % max_pkt;
+
+       /* We got lucky, last normal TRB data on segment is packet aligned. */
+       if (unalign == 0)
+               return 0;
+
+       /* Is the last nornal TRB alignable by splitting it. */
+       if (*trb_buff_len > unalign) {
+               *trb_buff_len -= unalign;
+               trace_cdnsp_bounce_align_td_split(preq, *trb_buff_len,
+                                                 enqd_len, 0, unalign);
+               return 0;
+       }
+
+       /*
+        * We want enqd_len + trb_buff_len to sum up to a number aligned to
+        * number which is divisible by the endpoint's wMaxPacketSize. IOW:
+        * (size of currently enqueued TRBs + remainder) % wMaxPacketSize == 0.
+        */
+       new_buff_len = max_pkt - (enqd_len % max_pkt);
+
+       if (new_buff_len > (preq->request.length - enqd_len))
+               new_buff_len = (preq->request.length - enqd_len);
+
+       /* Create a max max_pkt sized bounce buffer pointed to by last trb. */
+       if (preq->direction) {
+               sg_pcopy_to_buffer(preq->request.sg,
+                                  preq->request.num_mapped_sgs,
+                                  seg->bounce_buf, new_buff_len, enqd_len);
+               seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+                                                max_pkt, DMA_TO_DEVICE);
+       } else {
+               seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
+                                                max_pkt, DMA_FROM_DEVICE);
+       }
+
+       if (dma_mapping_error(dev, seg->bounce_dma)) {
+               /* Try without aligning.*/
+               dev_warn(pdev->dev,
+                        "Failed mapping bounce buffer, not aligning\n");
+               return 0;
+       }
+
+       *trb_buff_len = new_buff_len;
+       seg->bounce_len = new_buff_len;
+       seg->bounce_offs = enqd_len;
+
+       trace_cdnsp_bounce_map(preq, new_buff_len, enqd_len, seg->bounce_dma,
+                              unalign);
+
+       /*
+        * Bounce buffer successful aligned and seg->bounce_dma will be used
+        * in transfer TRB as new transfer buffer address.
+        */
+       return 1;
+}
+
+int cdnsp_queue_bulk_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+{
+       unsigned int enqd_len, block_len, trb_buff_len, full_len;
+       unsigned int start_cycle, num_sgs = 0;
+       struct cdnsp_generic_trb *start_trb;
+       u32 field, length_field, remainder;
+       struct scatterlist *sg = NULL;
+       bool more_trbs_coming = true;
+       bool need_zero_pkt = false;
+       bool zero_len_trb = false;
+       struct cdnsp_ring *ring;
+       bool first_trb = true;
+       unsigned int num_trbs;
+       struct cdnsp_ep *pep;
+       u64 addr, send_addr;
+       int sent_len, ret;
+
+       ring = cdnsp_request_to_transfer_ring(pdev, preq);
+       if (!ring)
+               return -EINVAL;
+
+       full_len = preq->request.length;
+
+       if (preq->request.num_sgs) {
+               num_sgs = preq->request.num_sgs;
+               sg = preq->request.sg;
+               addr = (u64)sg_dma_address(sg);
+               block_len = sg_dma_len(sg);
+               num_trbs = count_sg_trbs_needed(preq);
+       } else {
+               num_trbs = count_trbs_needed(preq);
+               addr = (u64)preq->request.dma;
+               block_len = full_len;
+       }
+
+       pep = preq->pep;
+
+       /* Deal with request.zero - need one more td/trb. */
+       if (preq->request.zero && preq->request.length &&
+           IS_ALIGNED(full_len, usb_endpoint_maxp(pep->endpoint.desc))) {
+               need_zero_pkt = true;
+               num_trbs++;
+       }
+
+       ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
+       if (ret)
+               return ret;
+
+       /*
+        * Don't give the first TRB to the hardware (by toggling the cycle bit)
+        * until we've finished creating all the other TRBs. The ring's cycle
+        * state may change as we enqueue the other TRBs, so save it too.
+        */
+       start_trb = &ring->enqueue->generic;
+       start_cycle = ring->cycle_state;
+       send_addr = addr;
+
+       /* Queue the TRBs, even if they are zero-length */
+       for (enqd_len = 0; zero_len_trb || first_trb || enqd_len < full_len;
+            enqd_len += trb_buff_len) {
+               field = TRB_TYPE(TRB_NORMAL);
+
+               /* TRB buffer should not cross 64KB boundaries */
+               trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
+               trb_buff_len = min(trb_buff_len, block_len);
+               if (enqd_len + trb_buff_len > full_len)
+                       trb_buff_len = full_len - enqd_len;
+
+               /* Don't change the cycle bit of the first TRB until later */
+               if (first_trb) {
+                       first_trb = false;
+                       if (start_cycle == 0)
+                               field |= TRB_CYCLE;
+               } else {
+                       field |= ring->cycle_state;
+               }
+
+               /*
+                * Chain all the TRBs together; clear the chain bit in the last
+                * TRB to indicate it's the last TRB in the chain.
+                */
+               if (enqd_len + trb_buff_len < full_len || need_zero_pkt) {
+                       field |= TRB_CHAIN;
+                       if (cdnsp_trb_is_link(ring->enqueue + 1)) {
+                               if (cdnsp_align_td(pdev, preq, enqd_len,
+                                                  &trb_buff_len,
+                                                  ring->enq_seg)) {
+                                       send_addr = ring->enq_seg->bounce_dma;
+                                       /* Assuming TD won't span 2 segs */
+                                       preq->td.bounce_seg = ring->enq_seg;
+                               }
+                       }
+               }
+
+               if (enqd_len + trb_buff_len >= full_len) {
+                       if (need_zero_pkt && zero_len_trb) {
+                               zero_len_trb = true;
+                       } else {
+                               field &= ~TRB_CHAIN;
+                               field |= TRB_IOC;
+                               more_trbs_coming = false;
+                               need_zero_pkt = false;
+                               preq->td.last_trb = ring->enqueue;
+                       }
+               }
+
+               /* Only set interrupt on short packet for OUT endpoints. */
+               if (!preq->direction)
+                       field |= TRB_ISP;
+
+               /* Set the TRB length, TD size, and interrupter fields. */
+               remainder = cdnsp_td_remainder(pdev, enqd_len, trb_buff_len,
+                                              full_len, preq,
+                                              more_trbs_coming);
+
+               length_field = TRB_LEN(trb_buff_len) | TRB_TD_SIZE(remainder) |
+                       TRB_INTR_TARGET(0);
+
+               cdnsp_queue_trb(pdev, ring, more_trbs_coming | need_zero_pkt,
+                               lower_32_bits(send_addr),
+                               upper_32_bits(send_addr),
+                               length_field,
+                               field);
+
+               addr += trb_buff_len;
+               sent_len = trb_buff_len;
+               while (sg && sent_len >= block_len) {
+                       /* New sg entry */
+                       --num_sgs;
+                       sent_len -= block_len;
+                       if (num_sgs != 0) {
+                               sg = sg_next(sg);
+                               block_len = sg_dma_len(sg);
+                               addr = (u64)sg_dma_address(sg);
+                               addr += sent_len;
+                       }
+               }
+               block_len -= sent_len;
+               send_addr = addr;
+       }
+
+       cdnsp_check_trb_math(preq, enqd_len);
+       ret = cdnsp_giveback_first_trb(pdev, pep, preq->request.stream_id,
+                                      start_cycle, start_trb);
+
+       if (ret)
+               preq->td.drbl = 1;
+
+       return 0;
+}
+
+int cdnsp_queue_ctrl_tx(struct cdnsp_device *pdev, struct cdnsp_request *preq)
+{
+       u32 field, length_field, remainder;
+       struct cdnsp_ep *pep = preq->pep;
+       struct cdnsp_ring *ep_ring;
+       int num_trbs;
+       int ret;
+
+       ep_ring = cdnsp_request_to_transfer_ring(pdev, preq);
+       if (!ep_ring)
+               return -EINVAL;
+
+       /* 1 TRB for data, 1 for status */
+       num_trbs = (pdev->three_stage_setup) ? 2 : 1;
+
+       ret = cdnsp_prepare_transfer(pdev, preq, num_trbs);
+       if (ret)
+               return ret;
+
+       /* If there's data, queue data TRBs */
+       if (pdev->ep0_expect_in)
+               field = TRB_TYPE(TRB_DATA) | TRB_IOC;
+       else
+               field = TRB_ISP | TRB_TYPE(TRB_DATA) | TRB_IOC;
+
+       if (preq->request.length > 0) {
+               remainder = cdnsp_td_remainder(pdev, 0, preq->request.length,
+                                              preq->request.length, preq, 1);
+
+               length_field = TRB_LEN(preq->request.length) |
+                               TRB_TD_SIZE(remainder) | TRB_INTR_TARGET(0);
+
+               if (pdev->ep0_expect_in)
+                       field |= TRB_DIR_IN;
+
+               cdnsp_queue_trb(pdev, ep_ring, true,
+                               lower_32_bits(preq->request.dma),
+                               upper_32_bits(preq->request.dma), length_field,
+                               field | ep_ring->cycle_state |
+                               TRB_SETUPID(pdev->setup_id) |
+                               pdev->setup_speed);
+
+               pdev->ep0_stage = CDNSP_DATA_STAGE;
+       }
+
+       /* Save the DMA address of the last TRB in the TD. */
+       preq->td.last_trb = ep_ring->enqueue;
+
+       /* Queue status TRB. */
+       if (preq->request.length == 0)
+               field = ep_ring->cycle_state;
+       else
+               field = (ep_ring->cycle_state ^ 1);
+
+       if (preq->request.length > 0 && pdev->ep0_expect_in)
+               field |= TRB_DIR_IN;
+
+       if (pep->ep_state & EP0_HALTED_STATUS) {
+               pep->ep_state &= ~EP0_HALTED_STATUS;
+               field |= TRB_SETUPSTAT(TRB_SETUPSTAT_STALL);
+       } else {
+               field |= TRB_SETUPSTAT(TRB_SETUPSTAT_ACK);
+       }
+
+       cdnsp_queue_trb(pdev, ep_ring, false, 0, 0, TRB_INTR_TARGET(0),
+                       field | TRB_IOC | TRB_SETUPID(pdev->setup_id) |
+                       TRB_TYPE(TRB_STATUS) | pdev->setup_speed);
+
+       cdnsp_ring_ep_doorbell(pdev, pep, preq->request.stream_id);
+
+       return 0;
+}
+
+int cdnsp_cmd_stop_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+       u32 ep_state = GET_EP_CTX_STATE(pep->out_ctx);
+       int ret = 0;
+
+       if (ep_state == EP_STATE_STOPPED || ep_state == EP_STATE_DISABLED) {
+               trace_cdnsp_ep_stopped_or_disabled(pep->out_ctx);
+               goto ep_stopped;
+       }
+
+       cdnsp_queue_stop_endpoint(pdev, pep->idx);
+       cdnsp_ring_cmd_db(pdev);
+       ret = cdnsp_wait_for_cmd_compl(pdev);
+
+       trace_cdnsp_handle_cmd_stop_ep(pep->out_ctx);
+
+ep_stopped:
+       pep->ep_state |= EP_STOPPED;
+       return ret;
+}
+
+int cdnsp_cmd_flush_ep(struct cdnsp_device *pdev, struct cdnsp_ep *pep)
+{
+       int ret;
+
+       cdnsp_queue_flush_endpoint(pdev, pep->idx);
+       cdnsp_ring_cmd_db(pdev);
+       ret = cdnsp_wait_for_cmd_compl(pdev);
+
+       trace_cdnsp_handle_cmd_flush_ep(pep->out_ctx);
+
+       return ret;
+}
+
+/*
+ * The transfer burst count field of the isochronous TRB defines the number of
+ * bursts that are required to move all packets in this TD. Only SuperSpeed
+ * devices can burst up to bMaxBurst number of packets per service interval.
+ * This field is zero based, meaning a value of zero in the field means one
+ * burst. Basically, for everything but SuperSpeed devices, this field will be
+ * zero.
+ */
+static unsigned int cdnsp_get_burst_count(struct cdnsp_device *pdev,
+                                         struct cdnsp_request *preq,
+                                         unsigned int total_packet_count)
+{
+       unsigned int max_burst;
+
+       if (pdev->gadget.speed < USB_SPEED_SUPER)
+               return 0;
+
+       max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
+       return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
+}
+
+/*
+ * Returns the number of packets in the last "burst" of packets. This field is
+ * valid for all speeds of devices. USB 2.0 devices can only do one "burst", so
+ * the last burst packet count is equal to the total number of packets in the
+ * TD. SuperSpeed endpoints can have up to 3 bursts. All but the last burst
+ * must contain (bMaxBurst + 1) number of packets, but the last burst can
+ * contain 1 to (bMaxBurst + 1) packets.
+ */
+static unsigned int
+       cdnsp_get_last_burst_packet_count(struct cdnsp_device *pdev,
+                                         struct cdnsp_request *preq,
+                                         unsigned int total_packet_count)
+{
+       unsigned int max_burst;
+       unsigned int residue;
+
+       if (pdev->gadget.speed >= USB_SPEED_SUPER) {
+               /* bMaxBurst is zero based: 0 means 1 packet per burst. */
+               max_burst = preq->pep->endpoint.comp_desc->bMaxBurst;
+               residue = total_packet_count % (max_burst + 1);
+
+               /*
+                * If residue is zero, the last burst contains (max_burst + 1)
+                * number of packets, but the TLBPC field is zero-based.
+                */
+               if (residue == 0)
+                       return max_burst;
+
+               return residue - 1;
+       }
+       if (total_packet_count == 0)
+               return 0;
+
+       return total_packet_count - 1;
+}
+
+/* Queue function isoc transfer */
+static int cdnsp_queue_isoc_tx(struct cdnsp_device *pdev,
+                              struct cdnsp_request *preq)
+{
+       int trb_buff_len, td_len, td_remain_len, ret;
+       unsigned int burst_count, last_burst_pkt;
+       unsigned int total_pkt_count, max_pkt;
+       struct cdnsp_generic_trb *start_trb;
+       bool more_trbs_coming = true;
+       struct cdnsp_ring *ep_ring;
+       int running_total = 0;
+       u32 field, length_field;
+       int start_cycle;
+       int trbs_per_td;
+       u64 addr;
+       int i;
+
+       ep_ring = preq->pep->ring;
+       start_trb = &ep_ring->enqueue->generic;
+       start_cycle = ep_ring->cycle_state;
+       td_len = preq->request.length;
+       addr = (u64)preq->request.dma;
+       td_remain_len = td_len;
+
+       max_pkt = usb_endpoint_maxp(preq->pep->endpoint.desc);
+       total_pkt_count = DIV_ROUND_UP(td_len, max_pkt);
+
+       /* A zero-length transfer still involves at least one packet. */
+       if (total_pkt_count == 0)
+               total_pkt_count++;
+
+       burst_count = cdnsp_get_burst_count(pdev, preq, total_pkt_count);
+       last_burst_pkt = cdnsp_get_last_burst_packet_count(pdev, preq,
+                                                          total_pkt_count);
+       trbs_per_td = count_isoc_trbs_needed(preq);
+
+       ret = cdnsp_prepare_transfer(pdev, preq, trbs_per_td);
+       if (ret)
+               goto cleanup;
+
+       /*
+        * Set isoc specific data for the first TRB in a TD.
+        * Prevent HW from getting the TRBs by keeping the cycle state
+        * inverted in the first TDs isoc TRB.
+        */
+       field = TRB_TYPE(TRB_ISOC) | TRB_TLBPC(last_burst_pkt) |
+               start_cycle ? 0 : 1 | TRB_SIA | TRB_TBC(burst_count);
+
+       /* Fill the rest of the TRB fields, and remaining normal TRBs. */
+       for (i = 0; i < trbs_per_td; i++) {
+               u32 remainder;
+
+               /* Calculate TRB length. */
+               trb_buff_len = TRB_BUFF_LEN_UP_TO_BOUNDARY(addr);
+               if (trb_buff_len > td_remain_len)
+                       trb_buff_len = td_remain_len;
+
+               /* Set the TRB length, TD size, & interrupter fields. */
+               remainder = cdnsp_td_remainder(pdev, running_total,
+                                              trb_buff_len, td_len, preq,
+                                              more_trbs_coming);
+
+               length_field = TRB_LEN(trb_buff_len) | TRB_INTR_TARGET(0);
+
+               /* Only first TRB is isoc, overwrite otherwise. */
+               if (i) {
+                       field = TRB_TYPE(TRB_NORMAL) | ep_ring->cycle_state;
+                       length_field |= TRB_TD_SIZE(remainder);
+               } else {
+                       length_field |= TRB_TD_SIZE_TBC(burst_count);
+               }
+
+               /* Only set interrupt on short packet for OUT EPs. */
+               if (usb_endpoint_dir_out(preq->pep->endpoint.desc))
+                       field |= TRB_ISP;
+
+               /* Set the chain bit for all except the last TRB. */
+               if (i < trbs_per_td - 1) {
+                       more_trbs_coming = true;
+                       field |= TRB_CHAIN;
+               } else {
+                       more_trbs_coming = false;
+                       preq->td.last_trb = ep_ring->enqueue;
+                       field |= TRB_IOC;
+               }
+
+               cdnsp_queue_trb(pdev, ep_ring, more_trbs_coming,
+                               lower_32_bits(addr), upper_32_bits(addr),
+                               length_field, field);
+
+               running_total += trb_buff_len;
+               addr += trb_buff_len;
+               td_remain_len -= trb_buff_len;
+       }
+
+       /* Check TD length */
+       if (running_total != td_len) {
+               dev_err(pdev->dev, "ISOC TD length unmatch\n");
+               ret = -EINVAL;
+               goto cleanup;
+       }
+
+       cdnsp_giveback_first_trb(pdev, preq->pep, preq->request.stream_id,
+                                start_cycle, start_trb);
+
+       return 0;
+
+cleanup:
+       /* Clean up a partially enqueued isoc transfer. */
+       list_del_init(&preq->td.td_list);
+       ep_ring->num_tds--;
+
+       /*
+        * Use the first TD as a temporary variable to turn the TDs we've
+        * queued into No-ops with a software-owned cycle bit.
+        * That way the hardware won't accidentally start executing bogus TDs
+        * when we partially overwrite them.
+        * td->first_trb and td->start_seg are already set.
+        */
+       preq->td.last_trb = ep_ring->enqueue;
+       /* Every TRB except the first & last will have its cycle bit flipped. */
+       cdnsp_td_to_noop(pdev, ep_ring, &preq->td, true);
+
+       /* Reset the ring enqueue back to the first TRB and its cycle bit. */
+       ep_ring->enqueue = preq->td.first_trb;
+       ep_ring->enq_seg = preq->td.start_seg;
+       ep_ring->cycle_state = start_cycle;
+       return ret;
+}
+
+int cdnsp_queue_isoc_tx_prepare(struct cdnsp_device *pdev,
+                               struct cdnsp_request *preq)
+{
+       struct cdnsp_ring *ep_ring;
+       u32 ep_state;
+       int num_trbs;
+       int ret;
+
+       ep_ring = preq->pep->ring;
+       ep_state = GET_EP_CTX_STATE(preq->pep->out_ctx);
+       num_trbs = count_isoc_trbs_needed(preq);
+
+       /*
+        * Check the ring to guarantee there is enough room for the whole
+        * request. Do not insert any td of the USB Request to the ring if the
+        * check failed.
+        */
+       ret = cdnsp_prepare_ring(pdev, ep_ring, ep_state, num_trbs, GFP_ATOMIC);
+       if (ret)
+               return ret;
+
+       return cdnsp_queue_isoc_tx(pdev, preq);
+}
+
+/****          Command Ring Operations         ****/
+/*
+ * Generic function for queuing a command TRB on the command ring.
+ * Driver queue only one command to ring in the moment.
+ */
+static void cdnsp_queue_command(struct cdnsp_device *pdev,
+                               u32 field1,
+                               u32 field2,
+                               u32 field3,
+                               u32 field4)
+{
+       cdnsp_prepare_ring(pdev, pdev->cmd_ring, EP_STATE_RUNNING, 1,
+                          GFP_ATOMIC);
+
+       pdev->cmd.command_trb = pdev->cmd_ring->enqueue;
+
+       cdnsp_queue_trb(pdev, pdev->cmd_ring, false, field1, field2,
+                       field3, field4 | pdev->cmd_ring->cycle_state);
+}
+
+/* Queue a slot enable or disable request on the command ring */
+void cdnsp_queue_slot_control(struct cdnsp_device *pdev, u32 trb_type)
+{
+       cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(trb_type) |
+                           SLOT_ID_FOR_TRB(pdev->slot_id));
+}
+
+/* Queue an address device command TRB */
+void cdnsp_queue_address_device(struct cdnsp_device *pdev,
+                               dma_addr_t in_ctx_ptr,
+                               enum cdnsp_setup_dev setup)
+{
+       cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
+                           upper_32_bits(in_ctx_ptr), 0,
+                           TRB_TYPE(TRB_ADDR_DEV) |
+                           SLOT_ID_FOR_TRB(pdev->slot_id) |
+                           (setup == SETUP_CONTEXT_ONLY ? TRB_BSR : 0));
+}
+
+/* Queue a reset device command TRB */
+void cdnsp_queue_reset_device(struct cdnsp_device *pdev)
+{
+       cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_RESET_DEV) |
+                           SLOT_ID_FOR_TRB(pdev->slot_id));
+}
+
+/* Queue a configure endpoint command TRB */
+void cdnsp_queue_configure_endpoint(struct cdnsp_device *pdev,
+                                   dma_addr_t in_ctx_ptr)
+{
+       cdnsp_queue_command(pdev, lower_32_bits(in_ctx_ptr),
+                           upper_32_bits(in_ctx_ptr), 0,
+                           TRB_TYPE(TRB_CONFIG_EP) |
+                           SLOT_ID_FOR_TRB(pdev->slot_id));
+}
+
+/*
+ * Suspend is set to indicate "Stop Endpoint Command" is being issued to stop
+ * activity on an endpoint that is about to be suspended.
+ */
+void cdnsp_queue_stop_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
+{
+       cdnsp_queue_command(pdev, 0, 0, 0, SLOT_ID_FOR_TRB(pdev->slot_id) |
+                           EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_STOP_RING));
+}
+
+/* Set Transfer Ring Dequeue Pointer command. */
+void cdnsp_queue_new_dequeue_state(struct cdnsp_device *pdev,
+                                  struct cdnsp_ep *pep,
+                                  struct cdnsp_dequeue_state *deq_state)
+{
+       u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
+       u32 trb_slot_id = SLOT_ID_FOR_TRB(pdev->slot_id);
+       u32 type = TRB_TYPE(TRB_SET_DEQ);
+       u32 trb_sct = 0;
+       dma_addr_t addr;
+
+       addr = cdnsp_trb_virt_to_dma(deq_state->new_deq_seg,
+                                    deq_state->new_deq_ptr);
+
+       if (deq_state->stream_id)
+               trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
+
+       cdnsp_queue_command(pdev, lower_32_bits(addr) | trb_sct |
+                           deq_state->new_cycle_state, upper_32_bits(addr),
+                           trb_stream_id, trb_slot_id |
+                           EP_ID_FOR_TRB(pep->idx) | type);
+}
+
+void cdnsp_queue_reset_ep(struct cdnsp_device *pdev, unsigned int ep_index)
+{
+       return cdnsp_queue_command(pdev, 0, 0, 0,
+                                  SLOT_ID_FOR_TRB(pdev->slot_id) |
+                                  EP_ID_FOR_TRB(ep_index) |
+                                  TRB_TYPE(TRB_RESET_EP));
+}
+
+/*
+ * Queue a halt endpoint request on the command ring.
+ */
+void cdnsp_queue_halt_endpoint(struct cdnsp_device *pdev, unsigned int ep_index)
+{
+       cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_HALT_ENDPOINT) |
+                           SLOT_ID_FOR_TRB(pdev->slot_id) |
+                           EP_ID_FOR_TRB(ep_index));
+}
+
+/*
+ * Queue a flush endpoint request on the command ring.
+ */
+void  cdnsp_queue_flush_endpoint(struct cdnsp_device *pdev,
+                                unsigned int ep_index)
+{
+       cdnsp_queue_command(pdev, 0, 0, 0, TRB_TYPE(TRB_FLUSH_ENDPOINT) |
+                           SLOT_ID_FOR_TRB(pdev->slot_id) |
+                           EP_ID_FOR_TRB(ep_index));
+}
+
+void cdnsp_force_header_wakeup(struct cdnsp_device *pdev, int intf_num)
+{
+       u32 lo, mid;
+
+       lo = TRB_FH_TO_PACKET_TYPE(TRB_FH_TR_PACKET) |
+            TRB_FH_TO_DEVICE_ADDRESS(pdev->device_address);
+       mid = TRB_FH_TR_PACKET_DEV_NOT |
+             TRB_FH_TO_NOT_TYPE(TRB_FH_TR_PACKET_FUNCTION_WAKE) |
+             TRB_FH_TO_INTERFACE(intf_num);
+
+       cdnsp_queue_command(pdev, lo, mid, 0,
+                           TRB_TYPE(TRB_FORCE_HEADER) | SET_PORT_ID(2));
+}
diff --git a/drivers/usb/cdns3/cdnsp-trace.c b/drivers/usb/cdns3/cdnsp-trace.c
new file mode 100644 (file)
index 0000000..e50ab79
--- /dev/null
@@ -0,0 +1,12 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Cadence CDNSP DRD Driver.
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#define CREATE_TRACE_POINTS
+#include "cdnsp-trace.h"
diff --git a/drivers/usb/cdns3/cdnsp-trace.h b/drivers/usb/cdns3/cdnsp-trace.h
new file mode 100644 (file)
index 0000000..a9de1da
--- /dev/null
@@ -0,0 +1,840 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Cadence CDNSP DRD Driver.
+ * Trace support header file
+ *
+ * Copyright (C) 2020 Cadence.
+ *
+ * Author: Pawel Laszczak <pawell@cadence.com>
+ *
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cdnsp-dev
+
+/*
+ * The TRACE_SYSTEM_VAR defaults to TRACE_SYSTEM, but must be a
+ * legitimate C variable. It is not exported to user space.
+ */
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR cdnsp_dev
+
+#if !defined(__CDNSP_DEV_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __CDNSP_DEV_TRACE_H
+
+#include <linux/tracepoint.h>
+#include "cdnsp-gadget.h"
+#include "cdnsp-debug.h"
+
+/*
+ * There is limitation for single buffer size in TRACEPOINT subsystem.
+ * By default TRACE_BUF_SIZE is 1024, so no all data will be logged.
+ * To show more data this must be increased. In most cases the default
+ * value is sufficient.
+ */
+#define CDNSP_MSG_MAX 500
+
+DECLARE_EVENT_CLASS(cdnsp_log_ep,
+       TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+       TP_ARGS(pep, stream_id),
+       TP_STRUCT__entry(
+               __string(name, pep->name)
+               __field(unsigned int, state)
+               __field(u32, stream_id)
+               __field(u8, enabled)
+               __field(unsigned int, num_streams)
+               __field(int, td_count)
+               __field(u8, first_prime_det)
+               __field(u8, drbls_count)
+       ),
+       TP_fast_assign(
+               __assign_str(name, pep->name);
+               __entry->state = pep->ep_state;
+               __entry->stream_id = stream_id;
+               __entry->enabled = pep->ep_state & EP_HAS_STREAMS;
+               __entry->num_streams = pep->stream_info.num_streams;
+               __entry->td_count = pep->stream_info.td_count;
+               __entry->first_prime_det = pep->stream_info.first_prime_det;
+               __entry->drbls_count = pep->stream_info.drbls_count;
+       ),
+       TP_printk("%s: SID: %08x ep state: %x stream: enabled: %d num  %d "
+                 "tds %d, first prime: %d drbls %d",
+                 __get_str(name), __entry->state, __entry->stream_id,
+                 __entry->enabled, __entry->num_streams, __entry->td_count,
+                 __entry->first_prime_det, __entry->drbls_count)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_tr_drbl,
+       TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+       TP_ARGS(pep, stream_id)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_wait_for_prime,
+       TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+       TP_ARGS(pep, stream_id)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_list_empty_with_skip,
+       TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+       TP_ARGS(pep, stream_id)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_enable_end,
+       TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+       TP_ARGS(pep, stream_id)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_disable_end,
+       TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+       TP_ARGS(pep, stream_id)
+);
+
+DEFINE_EVENT(cdnsp_log_ep, cdnsp_ep_busy_try_halt_again,
+       TP_PROTO(struct cdnsp_ep *pep, u32 stream_id),
+       TP_ARGS(pep, stream_id)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_enable_disable,
+       TP_PROTO(int set),
+       TP_ARGS(set),
+       TP_STRUCT__entry(
+               __field(int, set)
+       ),
+       TP_fast_assign(
+               __entry->set = set;
+       ),
+       TP_printk("%s", __entry->set ? "enabled" : "disabled")
+);
+
+DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_pullup,
+       TP_PROTO(int set),
+       TP_ARGS(set)
+);
+
+DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_u1,
+       TP_PROTO(int set),
+       TP_ARGS(set)
+);
+
+DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_u2,
+       TP_PROTO(int set),
+       TP_ARGS(set)
+);
+
+DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_lpm,
+       TP_PROTO(int set),
+       TP_ARGS(set)
+);
+
+DEFINE_EVENT(cdnsp_log_enable_disable, cdnsp_may_wakeup,
+       TP_PROTO(int set),
+       TP_ARGS(set)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_simple,
+       TP_PROTO(char *msg),
+       TP_ARGS(msg),
+       TP_STRUCT__entry(
+               __string(text, msg)
+       ),
+       TP_fast_assign(
+               __assign_str(text, msg)
+       ),
+       TP_printk("%s", __get_str(text))
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_exit,
+       TP_PROTO(char *msg),
+       TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_init,
+       TP_PROTO(char *msg),
+       TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_slot_id,
+       TP_PROTO(char *msg),
+       TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_cmd_drbl,
+       TP_PROTO(char *msg),
+       TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_no_room_on_ring,
+       TP_PROTO(char *msg),
+       TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_status_stage,
+       TP_PROTO(char *msg),
+       TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_data_stage,
+       TP_PROTO(char *msg),
+       TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_request,
+       TP_PROTO(char *msg),
+       TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_set_config,
+       TP_PROTO(char *msg),
+       TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep0_halted,
+       TP_PROTO(char *msg),
+       TP_ARGS(msg)
+);
+
+DEFINE_EVENT(cdnsp_log_simple, cdnsp_ep_halt,
+       TP_PROTO(char *msg),
+       TP_ARGS(msg)
+);
+
+TRACE_EVENT(cdnsp_looking_trb_in_td,
+       TP_PROTO(dma_addr_t suspect, dma_addr_t trb_start, dma_addr_t trb_end,
+                dma_addr_t curr_seg, dma_addr_t end_seg),
+       TP_ARGS(suspect, trb_start, trb_end, curr_seg, end_seg),
+       TP_STRUCT__entry(
+               __field(dma_addr_t, suspect)
+               __field(dma_addr_t, trb_start)
+               __field(dma_addr_t, trb_end)
+               __field(dma_addr_t, curr_seg)
+               __field(dma_addr_t, end_seg)
+       ),
+       TP_fast_assign(
+               __entry->suspect = suspect;
+               __entry->trb_start = trb_start;
+               __entry->trb_end = trb_end;
+               __entry->curr_seg = curr_seg;
+               __entry->end_seg = end_seg;
+       ),
+       TP_printk("DMA: suspect event: %pad, trb-start: %pad, trb-end %pad, "
+                 "seg-start %pad, seg-end %pad",
+                 &__entry->suspect, &__entry->trb_start, &__entry->trb_end,
+                 &__entry->curr_seg, &__entry->end_seg)
+);
+
+TRACE_EVENT(cdnsp_port_info,
+       TP_PROTO(__le32 __iomem *addr, u32 offset, u32 count, u32 rev),
+       TP_ARGS(addr, offset, count, rev),
+       TP_STRUCT__entry(
+               __field(__le32 __iomem *, addr)
+               __field(u32, offset)
+               __field(u32, count)
+               __field(u32, rev)
+       ),
+       TP_fast_assign(
+               __entry->addr = addr;
+               __entry->offset = offset;
+               __entry->count = count;
+               __entry->rev = rev;
+       ),
+       TP_printk("Ext Cap %p, port offset = %u, count = %u, rev = 0x%x",
+                 __entry->addr, __entry->offset, __entry->count, __entry->rev)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_deq_state,
+       TP_PROTO(struct cdnsp_dequeue_state *state),
+       TP_ARGS(state),
+       TP_STRUCT__entry(
+               __field(int, new_cycle_state)
+               __field(struct cdnsp_segment *, new_deq_seg)
+               __field(dma_addr_t, deq_seg_dma)
+               __field(union cdnsp_trb *, new_deq_ptr)
+               __field(dma_addr_t, deq_ptr_dma)
+       ),
+       TP_fast_assign(
+               __entry->new_cycle_state = state->new_cycle_state;
+               __entry->new_deq_seg = state->new_deq_seg;
+               __entry->deq_seg_dma = state->new_deq_seg->dma;
+               __entry->new_deq_ptr = state->new_deq_ptr,
+               __entry->deq_ptr_dma = cdnsp_trb_virt_to_dma(state->new_deq_seg,
+                                                            state->new_deq_ptr);
+       ),
+       TP_printk("New cycle state = 0x%x, New dequeue segment = %p (0x%pad dma), "
+                 "New dequeue pointer = %p (0x%pad dma)",
+                 __entry->new_cycle_state, __entry->new_deq_seg,
+                 &__entry->deq_seg_dma, __entry->new_deq_ptr,
+                 &__entry->deq_ptr_dma
+       )
+);
+
+DEFINE_EVENT(cdnsp_log_deq_state, cdnsp_new_deq_state,
+       TP_PROTO(struct cdnsp_dequeue_state *state),
+       TP_ARGS(state)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_ctrl,
+       TP_PROTO(struct usb_ctrlrequest *ctrl),
+       TP_ARGS(ctrl),
+       TP_STRUCT__entry(
+               __field(u8, bRequestType)
+               __field(u8, bRequest)
+               __field(u16, wValue)
+               __field(u16, wIndex)
+               __field(u16, wLength)
+               __dynamic_array(char, str, CDNSP_MSG_MAX)
+       ),
+       TP_fast_assign(
+               __entry->bRequestType = ctrl->bRequestType;
+               __entry->bRequest = ctrl->bRequest;
+               __entry->wValue = le16_to_cpu(ctrl->wValue);
+               __entry->wIndex = le16_to_cpu(ctrl->wIndex);
+               __entry->wLength = le16_to_cpu(ctrl->wLength);
+       ),
+       TP_printk("%s", usb_decode_ctrl(__get_str(str), CDNSP_MSG_MAX,
+                                       __entry->bRequestType,
+                                       __entry->bRequest, __entry->wValue,
+                                       __entry->wIndex, __entry->wLength)
+       )
+);
+
+DEFINE_EVENT(cdnsp_log_ctrl, cdnsp_ctrl_req,
+       TP_PROTO(struct usb_ctrlrequest *ctrl),
+       TP_ARGS(ctrl)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_bounce,
+       TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset,
+                dma_addr_t dma, unsigned int unalign),
+       TP_ARGS(preq, new_buf_len, offset, dma, unalign),
+       TP_STRUCT__entry(
+               __string(name, preq->pep->name)
+               __field(u32, new_buf_len)
+               __field(u32, offset)
+               __field(dma_addr_t, dma)
+               __field(unsigned int, unalign)
+       ),
+       TP_fast_assign(
+               __assign_str(name, preq->pep->name);
+               __entry->new_buf_len = new_buf_len;
+               __entry->offset = offset;
+               __entry->dma = dma;
+               __entry->unalign = unalign;
+       ),
+       TP_printk("%s buf len %d, offset %d, dma %pad, unalign %d",
+                 __get_str(name), __entry->new_buf_len,
+                 __entry->offset, &__entry->dma, __entry->unalign
+       )
+);
+
+DEFINE_EVENT(cdnsp_log_bounce, cdnsp_bounce_align_td_split,
+       TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset,
+                dma_addr_t dma, unsigned int unalign),
+       TP_ARGS(preq, new_buf_len, offset, dma, unalign)
+);
+
+DEFINE_EVENT(cdnsp_log_bounce, cdnsp_bounce_map,
+       TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset,
+                dma_addr_t dma, unsigned int unalign),
+       TP_ARGS(preq, new_buf_len, offset, dma, unalign)
+);
+
+DEFINE_EVENT(cdnsp_log_bounce, cdnsp_bounce_unmap,
+       TP_PROTO(struct cdnsp_request *preq, u32 new_buf_len, u32 offset,
+                dma_addr_t dma, unsigned int unalign),
+       TP_ARGS(preq, new_buf_len, offset, dma, unalign)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_trb,
+       TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+       TP_ARGS(ring, trb),
+       TP_STRUCT__entry(
+               __field(u32, type)
+               __field(u32, field0)
+               __field(u32, field1)
+               __field(u32, field2)
+               __field(u32, field3)
+               __field(union cdnsp_trb *, trb)
+               __field(dma_addr_t, trb_dma)
+               __dynamic_array(char, str, CDNSP_MSG_MAX)
+       ),
+       TP_fast_assign(
+               __entry->type = ring->type;
+               __entry->field0 = le32_to_cpu(trb->field[0]);
+               __entry->field1 = le32_to_cpu(trb->field[1]);
+               __entry->field2 = le32_to_cpu(trb->field[2]);
+               __entry->field3 = le32_to_cpu(trb->field[3]);
+               __entry->trb = (union cdnsp_trb *)trb;
+               __entry->trb_dma = cdnsp_trb_virt_to_dma(ring->deq_seg,
+                                                        (union cdnsp_trb *)trb);
+
+       ),
+       TP_printk("%s: %s trb: %p(%pad)", cdnsp_ring_type_string(__entry->type),
+                 cdnsp_decode_trb(__get_str(str), CDNSP_MSG_MAX,
+                                  __entry->field0, __entry->field1,
+                                  __entry->field2, __entry->field3),
+                                  __entry->trb, &__entry->trb_dma
+       )
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_handle_event,
+       TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+       TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_trb_without_td,
+       TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+       TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_handle_command,
+       TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+       TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_handle_transfer,
+       TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+       TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_queue_trb,
+       TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+       TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_cmd_wait_for_compl,
+       TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+       TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_cmd_timeout,
+       TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+       TP_ARGS(ring, trb)
+);
+
+DEFINE_EVENT(cdnsp_log_trb, cdnsp_defered_event,
+       TP_PROTO(struct cdnsp_ring *ring, struct cdnsp_generic_trb *trb),
+       TP_ARGS(ring, trb)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_pdev,
+       TP_PROTO(struct cdnsp_device *pdev),
+       TP_ARGS(pdev),
+       TP_STRUCT__entry(
+               __field(struct cdnsp_device *, pdev)
+               __field(struct usb_gadget *, gadget)
+               __field(dma_addr_t, out_ctx)
+               __field(dma_addr_t, in_ctx)
+               __field(u8, port_num)
+       ),
+       TP_fast_assign(
+               __entry->pdev = pdev;
+               __entry->gadget = &pdev->gadget;
+               __entry->in_ctx = pdev->in_ctx.dma;
+               __entry->out_ctx = pdev->out_ctx.dma;
+               __entry->port_num = pdev->active_port ?
+                               pdev->active_port->port_num : 0xFF;
+       ),
+       TP_printk("pdev %p gadget %p ctx %pad | %pad, port %d ",
+                 __entry->pdev, __entry->gadget, &__entry->in_ctx,
+                 &__entry->out_ctx, __entry->port_num
+       )
+);
+
+DEFINE_EVENT(cdnsp_log_pdev, cdnsp_alloc_priv_device,
+       TP_PROTO(struct cdnsp_device *vdev),
+       TP_ARGS(vdev)
+);
+
+DEFINE_EVENT(cdnsp_log_pdev, cdnsp_free_priv_device,
+       TP_PROTO(struct cdnsp_device *vdev),
+       TP_ARGS(vdev)
+);
+
+DEFINE_EVENT(cdnsp_log_pdev, cdnsp_setup_device,
+       TP_PROTO(struct cdnsp_device *vdev),
+       TP_ARGS(vdev)
+);
+
+DEFINE_EVENT(cdnsp_log_pdev, cdnsp_setup_addressable_priv_device,
+       TP_PROTO(struct cdnsp_device *vdev),
+       TP_ARGS(vdev)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_request,
+       TP_PROTO(struct cdnsp_request *req),
+       TP_ARGS(req),
+       TP_STRUCT__entry(
+               __string(name, req->pep->name)
+               __field(struct usb_request *, request)
+               __field(struct cdnsp_request *, preq)
+               __field(void *, buf)
+               __field(unsigned int, actual)
+               __field(unsigned int, length)
+               __field(int, status)
+               __field(dma_addr_t, dma)
+               __field(unsigned int, stream_id)
+               __field(unsigned int, zero)
+               __field(unsigned int, short_not_ok)
+               __field(unsigned int, no_interrupt)
+               __field(struct scatterlist*, sg)
+               __field(unsigned int, num_sgs)
+               __field(unsigned int, num_mapped_sgs)
+
+       ),
+       TP_fast_assign(
+               __assign_str(name, req->pep->name);
+               __entry->request = &req->request;
+               __entry->preq = req;
+               __entry->buf = req->request.buf;
+               __entry->actual = req->request.actual;
+               __entry->length = req->request.length;
+               __entry->status = req->request.status;
+               __entry->dma = req->request.dma;
+               __entry->stream_id = req->request.stream_id;
+               __entry->zero = req->request.zero;
+               __entry->short_not_ok = req->request.short_not_ok;
+               __entry->no_interrupt = req->request.no_interrupt;
+               __entry->sg = req->request.sg;
+               __entry->num_sgs = req->request.num_sgs;
+               __entry->num_mapped_sgs = req->request.num_mapped_sgs;
+       ),
+       TP_printk("%s; req U:%p/P:%p, req buf %p, length %u/%u, status %d, "
+                 "buf dma (%pad), SID %u, %s%s%s, sg %p, num_sg %d,"
+                 " num_m_sg %d",
+                 __get_str(name), __entry->request, __entry->preq,
+                 __entry->buf, __entry->actual, __entry->length,
+                 __entry->status, &__entry->dma,
+                 __entry->stream_id, __entry->zero ? "Z" : "z",
+                 __entry->short_not_ok ? "S" : "s",
+                 __entry->no_interrupt ? "I" : "i",
+                 __entry->sg, __entry->num_sgs, __entry->num_mapped_sgs
+               )
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_request_enqueue,
+       TP_PROTO(struct cdnsp_request *req),
+       TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_request_enqueue_busy,
+       TP_PROTO(struct cdnsp_request *req),
+       TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_request_enqueue_error,
+       TP_PROTO(struct cdnsp_request *req),
+       TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_request_dequeue,
+       TP_PROTO(struct cdnsp_request *req),
+       TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_request_giveback,
+       TP_PROTO(struct cdnsp_request *req),
+       TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_alloc_request,
+       TP_PROTO(struct cdnsp_request *req),
+       TP_ARGS(req)
+);
+
+DEFINE_EVENT(cdnsp_log_request, cdnsp_free_request,
+       TP_PROTO(struct cdnsp_request *req),
+       TP_ARGS(req)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_ep_ctx,
+       TP_PROTO(struct cdnsp_ep_ctx *ctx),
+       TP_ARGS(ctx),
+       TP_STRUCT__entry(
+               __field(u32, info)
+               __field(u32, info2)
+               __field(u64, deq)
+               __field(u32, tx_info)
+               __dynamic_array(char, str, CDNSP_MSG_MAX)
+       ),
+       TP_fast_assign(
+               __entry->info = le32_to_cpu(ctx->ep_info);
+               __entry->info2 = le32_to_cpu(ctx->ep_info2);
+               __entry->deq = le64_to_cpu(ctx->deq);
+               __entry->tx_info = le32_to_cpu(ctx->tx_info);
+       ),
+       TP_printk("%s", cdnsp_decode_ep_context(__get_str(str), CDNSP_MSG_MAX,
+                                               __entry->info, __entry->info2,
+                                               __entry->deq, __entry->tx_info)
+       )
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_ep_disabled,
+       TP_PROTO(struct cdnsp_ep_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_ep_stopped_or_disabled,
+       TP_PROTO(struct cdnsp_ep_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_remove_request,
+       TP_PROTO(struct cdnsp_ep_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_stop_ep,
+       TP_PROTO(struct cdnsp_ep_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_flush_ep,
+       TP_PROTO(struct cdnsp_ep_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_set_deq_ep,
+       TP_PROTO(struct cdnsp_ep_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_reset_ep,
+       TP_PROTO(struct cdnsp_ep_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_ep_ctx, cdnsp_handle_cmd_config_ep,
+       TP_PROTO(struct cdnsp_ep_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_slot_ctx,
+       TP_PROTO(struct cdnsp_slot_ctx *ctx),
+       TP_ARGS(ctx),
+       TP_STRUCT__entry(
+               __field(u32, info)
+               __field(u32, info2)
+               __field(u32, int_target)
+               __field(u32, state)
+       ),
+       TP_fast_assign(
+               __entry->info = le32_to_cpu(ctx->dev_info);
+               __entry->info2 = le32_to_cpu(ctx->dev_port);
+               __entry->int_target = le32_to_cpu(ctx->int_target);
+               __entry->state = le32_to_cpu(ctx->dev_state);
+       ),
+       TP_printk("%s", cdnsp_decode_slot_context(__entry->info,
+                                                 __entry->info2,
+                                                 __entry->int_target,
+                                                 __entry->state)
+       )
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_slot_already_in_default,
+       TP_PROTO(struct cdnsp_slot_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_enable_slot,
+       TP_PROTO(struct cdnsp_slot_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_disable_slot,
+       TP_PROTO(struct cdnsp_slot_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_reset_device,
+       TP_PROTO(struct cdnsp_slot_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_setup_device_slot,
+       TP_PROTO(struct cdnsp_slot_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_addr_dev,
+       TP_PROTO(struct cdnsp_slot_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_reset_dev,
+       TP_PROTO(struct cdnsp_slot_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_handle_cmd_set_deq,
+       TP_PROTO(struct cdnsp_slot_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DEFINE_EVENT(cdnsp_log_slot_ctx, cdnsp_configure_endpoint,
+       TP_PROTO(struct cdnsp_slot_ctx *ctx),
+       TP_ARGS(ctx)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_td_info,
+       TP_PROTO(struct cdnsp_request *preq),
+       TP_ARGS(preq),
+       TP_STRUCT__entry(
+               __string(name, preq->pep->name)
+               __field(struct usb_request *, request)
+               __field(struct cdnsp_request *, preq)
+               __field(union cdnsp_trb *, first_trb)
+               __field(union cdnsp_trb *, last_trb)
+               __field(dma_addr_t, trb_dma)
+       ),
+       TP_fast_assign(
+               __assign_str(name, preq->pep->name);
+               __entry->request = &preq->request;
+               __entry->preq = preq;
+               __entry->first_trb = preq->td.first_trb;
+               __entry->last_trb = preq->td.last_trb;
+               __entry->trb_dma = cdnsp_trb_virt_to_dma(preq->td.start_seg,
+                                                        preq->td.first_trb)
+       ),
+       TP_printk("%s req/preq:  %p/%p, first trb %p[vir]/%pad(dma), last trb %p",
+                 __get_str(name), __entry->request, __entry->preq,
+                 __entry->first_trb, &__entry->trb_dma,
+                 __entry->last_trb
+               )
+);
+
+DEFINE_EVENT(cdnsp_log_td_info, cdnsp_remove_request_td,
+       TP_PROTO(struct cdnsp_request *preq),
+       TP_ARGS(preq)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_ring,
+       TP_PROTO(struct cdnsp_ring *ring),
+       TP_ARGS(ring),
+       TP_STRUCT__entry(
+               __field(u32, type)
+               __field(void *, ring)
+               __field(dma_addr_t, enq)
+               __field(dma_addr_t, deq)
+               __field(dma_addr_t, enq_seg)
+               __field(dma_addr_t, deq_seg)
+               __field(unsigned int, num_segs)
+               __field(unsigned int, stream_id)
+               __field(unsigned int, cycle_state)
+               __field(unsigned int, num_trbs_free)
+               __field(unsigned int, bounce_buf_len)
+       ),
+       TP_fast_assign(
+               __entry->ring = ring;
+               __entry->type = ring->type;
+               __entry->num_segs = ring->num_segs;
+               __entry->stream_id = ring->stream_id;
+               __entry->enq_seg = ring->enq_seg->dma;
+               __entry->deq_seg = ring->deq_seg->dma;
+               __entry->cycle_state = ring->cycle_state;
+               __entry->num_trbs_free = ring->num_trbs_free;
+               __entry->bounce_buf_len = ring->bounce_buf_len;
+               __entry->enq = cdnsp_trb_virt_to_dma(ring->enq_seg,
+                                                    ring->enqueue);
+               __entry->deq = cdnsp_trb_virt_to_dma(ring->deq_seg,
+                                                    ring->dequeue);
+       ),
+       TP_printk("%s %p: enq %pad(%pad) deq %pad(%pad) segs %d stream %d"
+                 " free_trbs %d bounce %d cycle %d",
+                 cdnsp_ring_type_string(__entry->type), __entry->ring,
+                 &__entry->enq, &__entry->enq_seg,
+                 &__entry->deq, &__entry->deq_seg,
+                 __entry->num_segs,
+                 __entry->stream_id,
+                 __entry->num_trbs_free,
+                 __entry->bounce_buf_len,
+                 __entry->cycle_state
+               )
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_ring_alloc,
+       TP_PROTO(struct cdnsp_ring *ring),
+       TP_ARGS(ring)
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_ring_free,
+       TP_PROTO(struct cdnsp_ring *ring),
+       TP_ARGS(ring)
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_set_stream_ring,
+       TP_PROTO(struct cdnsp_ring *ring),
+       TP_ARGS(ring)
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_ring_expansion,
+       TP_PROTO(struct cdnsp_ring *ring),
+       TP_ARGS(ring)
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_inc_enq,
+       TP_PROTO(struct cdnsp_ring *ring),
+       TP_ARGS(ring)
+);
+
+DEFINE_EVENT(cdnsp_log_ring, cdnsp_inc_deq,
+       TP_PROTO(struct cdnsp_ring *ring),
+       TP_ARGS(ring)
+);
+
+DECLARE_EVENT_CLASS(cdnsp_log_portsc,
+               TP_PROTO(u32 portnum, u32 portsc),
+               TP_ARGS(portnum, portsc),
+               TP_STRUCT__entry(
+                               __field(u32, portnum)
+                               __field(u32, portsc)
+                               __dynamic_array(char, str, CDNSP_MSG_MAX)
+                               ),
+               TP_fast_assign(
+                               __entry->portnum = portnum;
+                               __entry->portsc = portsc;
+                               ),
+               TP_printk("port-%d: %s",
+                         __entry->portnum,
+                         cdnsp_decode_portsc(__get_str(str), CDNSP_MSG_MAX,
+                                             __entry->portsc)
+                       )
+);
+
+DEFINE_EVENT(cdnsp_log_portsc, cdnsp_handle_port_status,
+               TP_PROTO(u32 portnum, u32 portsc),
+               TP_ARGS(portnum, portsc)
+);
+
+DEFINE_EVENT(cdnsp_log_portsc, cdnsp_link_state_changed,
+               TP_PROTO(u32 portnum, u32 portsc),
+               TP_ARGS(portnum, portsc)
+);
+
+TRACE_EVENT(cdnsp_stream_number,
+       TP_PROTO(struct cdnsp_ep *pep, int num_stream_ctxs, int num_streams),
+       TP_ARGS(pep, num_stream_ctxs, num_streams),
+       TP_STRUCT__entry(
+               __string(name, pep->name)
+               __field(int, num_stream_ctxs)
+               __field(int, num_streams)
+       ),
+       TP_fast_assign(
+               __entry->num_stream_ctxs = num_stream_ctxs;
+               __entry->num_streams = num_streams;
+       ),
+       TP_printk("%s Need %u stream ctx entries for %u stream IDs.",
+                 __get_str(name), __entry->num_stream_ctxs,
+                 __entry->num_streams)
+);
+
+#endif /* __CDNSP_TRACE_H */
+
+/* this part must be outside header guard */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cdnsp-trace
+
+#include <trace/define_trace.h>
index 1991cb5cf6bf5ae3d832944299c7456991d1d581..199713769289c1a42a6fe44a51375a5fa65d0be3 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Cadence USBSS DRD Driver.
+ * Cadence USBSS and USBSSP DRD Driver.
  *
  * Copyright (C) 2018-2019 Cadence.
  * Copyright (C) 2017-2018 NXP
 #include <linux/io.h>
 #include <linux/pm_runtime.h>
 
-#include "gadget.h"
 #include "core.h"
 #include "host-export.h"
-#include "gadget-export.h"
 #include "drd.h"
 
-static int cdns3_idle_init(struct cdns3 *cdns);
+static int cdns_idle_init(struct cdns *cdns);
 
-static int cdns3_role_start(struct cdns3 *cdns, enum usb_role role)
+static int cdns_role_start(struct cdns *cdns, enum usb_role role)
 {
        int ret;
 
@@ -41,47 +39,47 @@ static int cdns3_role_start(struct cdns3 *cdns, enum usb_role role)
        if (!cdns->roles[role])
                return -ENXIO;
 
-       if (cdns->roles[role]->state == CDNS3_ROLE_STATE_ACTIVE)
+       if (cdns->roles[role]->state == CDNS_ROLE_STATE_ACTIVE)
                return 0;
 
        mutex_lock(&cdns->mutex);
        ret = cdns->roles[role]->start(cdns);
        if (!ret)
-               cdns->roles[role]->state = CDNS3_ROLE_STATE_ACTIVE;
+               cdns->roles[role]->state = CDNS_ROLE_STATE_ACTIVE;
        mutex_unlock(&cdns->mutex);
 
        return ret;
 }
 
-static void cdns3_role_stop(struct cdns3 *cdns)
+static void cdns_role_stop(struct cdns *cdns)
 {
        enum usb_role role = cdns->role;
 
        if (WARN_ON(role > USB_ROLE_DEVICE))
                return;
 
-       if (cdns->roles[role]->state == CDNS3_ROLE_STATE_INACTIVE)
+       if (cdns->roles[role]->state == CDNS_ROLE_STATE_INACTIVE)
                return;
 
        mutex_lock(&cdns->mutex);
        cdns->roles[role]->stop(cdns);
-       cdns->roles[role]->state = CDNS3_ROLE_STATE_INACTIVE;
+       cdns->roles[role]->state = CDNS_ROLE_STATE_INACTIVE;
        mutex_unlock(&cdns->mutex);
 }
 
-static void cdns3_exit_roles(struct cdns3 *cdns)
+static void cdns_exit_roles(struct cdns *cdns)
 {
-       cdns3_role_stop(cdns);
-       cdns3_drd_exit(cdns);
+       cdns_role_stop(cdns);
+       cdns_drd_exit(cdns);
 }
 
 /**
- * cdns3_core_init_role - initialize role of operation
- * @cdns: Pointer to cdns3 structure
+ * cdns_core_init_role - initialize role of operation
+ * @cdns: Pointer to cdns structure
  *
  * Returns 0 on success otherwise negative errno
  */
-static int cdns3_core_init_role(struct cdns3 *cdns)
+static int cdns_core_init_role(struct cdns *cdns)
 {
        struct device *dev = cdns->dev;
        enum usb_dr_mode best_dr_mode;
@@ -97,13 +95,23 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
         * can be restricted later depending on strap pin configuration.
         */
        if (dr_mode == USB_DR_MODE_UNKNOWN) {
-               if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) &&
-                   IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
-                       dr_mode = USB_DR_MODE_OTG;
-               else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST))
-                       dr_mode = USB_DR_MODE_HOST;
-               else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
-                       dr_mode = USB_DR_MODE_PERIPHERAL;
+               if (cdns->version == CDNSP_CONTROLLER_V2) {
+                       if (IS_ENABLED(CONFIG_USB_CDNSP_HOST) &&
+                           IS_ENABLED(CONFIG_USB_CDNSP_GADGET))
+                               dr_mode = USB_DR_MODE_OTG;
+                       else if (IS_ENABLED(CONFIG_USB_CDNSP_HOST))
+                               dr_mode = USB_DR_MODE_HOST;
+                       else if (IS_ENABLED(CONFIG_USB_CDNSP_GADGET))
+                               dr_mode = USB_DR_MODE_PERIPHERAL;
+               } else {
+                       if (IS_ENABLED(CONFIG_USB_CDNS3_HOST) &&
+                           IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
+                               dr_mode = USB_DR_MODE_OTG;
+                       else if (IS_ENABLED(CONFIG_USB_CDNS3_HOST))
+                               dr_mode = USB_DR_MODE_HOST;
+                       else if (IS_ENABLED(CONFIG_USB_CDNS3_GADGET))
+                               dr_mode = USB_DR_MODE_PERIPHERAL;
+               }
        }
 
        /*
@@ -112,7 +120,7 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
         */
        best_dr_mode = cdns->dr_mode;
 
-       ret = cdns3_idle_init(cdns);
+       ret = cdns_idle_init(cdns);
        if (ret)
                return ret;
 
@@ -128,7 +136,14 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
        dr_mode = best_dr_mode;
 
        if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_HOST) {
-               ret = cdns3_host_init(cdns);
+               if ((cdns->version == CDNSP_CONTROLLER_V2 &&
+                    IS_ENABLED(CONFIG_USB_CDNSP_HOST)) ||
+                   (cdns->version < CDNSP_CONTROLLER_V2 &&
+                    IS_ENABLED(CONFIG_USB_CDNS3_HOST)))
+                       ret = cdns_host_init(cdns);
+               else
+                       ret = -ENXIO;
+
                if (ret) {
                        dev_err(dev, "Host initialization failed with %d\n",
                                ret);
@@ -137,7 +152,11 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
        }
 
        if (dr_mode == USB_DR_MODE_OTG || dr_mode == USB_DR_MODE_PERIPHERAL) {
-               ret = cdns3_gadget_init(cdns);
+               if (cdns->gadget_init)
+                       ret = cdns->gadget_init(cdns);
+               else
+                       ret = -ENXIO;
+
                if (ret) {
                        dev_err(dev, "Device initialization failed with %d\n",
                                ret);
@@ -147,28 +166,28 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
 
        cdns->dr_mode = dr_mode;
 
-       ret = cdns3_drd_update_mode(cdns);
+       ret = cdns_drd_update_mode(cdns);
        if (ret)
                goto err;
 
        /* Initialize idle role to start with */
-       ret = cdns3_role_start(cdns, USB_ROLE_NONE);
+       ret = cdns_role_start(cdns, USB_ROLE_NONE);
        if (ret)
                goto err;
 
        switch (cdns->dr_mode) {
        case USB_DR_MODE_OTG:
-               ret = cdns3_hw_role_switch(cdns);
+               ret = cdns_hw_role_switch(cdns);
                if (ret)
                        goto err;
                break;
        case USB_DR_MODE_PERIPHERAL:
-               ret = cdns3_role_start(cdns, USB_ROLE_DEVICE);
+               ret = cdns_role_start(cdns, USB_ROLE_DEVICE);
                if (ret)
                        goto err;
                break;
        case USB_DR_MODE_HOST:
-               ret = cdns3_role_start(cdns, USB_ROLE_HOST);
+               ret = cdns_role_start(cdns, USB_ROLE_HOST);
                if (ret)
                        goto err;
                break;
@@ -179,32 +198,32 @@ static int cdns3_core_init_role(struct cdns3 *cdns)
 
        return 0;
 err:
-       cdns3_exit_roles(cdns);
+       cdns_exit_roles(cdns);
        return ret;
 }
 
 /**
- * cdns3_hw_role_state_machine  - role switch state machine based on hw events.
+ * cdns_hw_role_state_machine  - role switch state machine based on hw events.
  * @cdns: Pointer to controller structure.
  *
  * Returns next role to be entered based on hw events.
  */
-static enum usb_role cdns3_hw_role_state_machine(struct cdns3 *cdns)
+static enum usb_role cdns_hw_role_state_machine(struct cdns *cdns)
 {
        enum usb_role role = USB_ROLE_NONE;
        int id, vbus;
 
        if (cdns->dr_mode != USB_DR_MODE_OTG) {
-               if (cdns3_is_host(cdns))
+               if (cdns_is_host(cdns))
                        role = USB_ROLE_HOST;
-               if (cdns3_is_device(cdns))
+               if (cdns_is_device(cdns))
                        role = USB_ROLE_DEVICE;
 
                return role;
        }
 
-       id = cdns3_get_id(cdns);
-       vbus = cdns3_get_vbus(cdns);
+       id = cdns_get_id(cdns);
+       vbus = cdns_get_vbus(cdns);
 
        /*
         * Role change state machine
@@ -240,28 +259,28 @@ static enum usb_role cdns3_hw_role_state_machine(struct cdns3 *cdns)
        return role;
 }
 
-static int cdns3_idle_role_start(struct cdns3 *cdns)
+static int cdns_idle_role_start(struct cdns *cdns)
 {
        return 0;
 }
 
-static void cdns3_idle_role_stop(struct cdns3 *cdns)
+static void cdns_idle_role_stop(struct cdns *cdns)
 {
        /* Program Lane swap and bring PHY out of RESET */
        phy_reset(cdns->usb3_phy);
 }
 
-static int cdns3_idle_init(struct cdns3 *cdns)
+static int cdns_idle_init(struct cdns *cdns)
 {
-       struct cdns3_role_driver *rdrv;
+       struct cdns_role_driver *rdrv;
 
        rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
        if (!rdrv)
                return -ENOMEM;
 
-       rdrv->start = cdns3_idle_role_start;
-       rdrv->stop = cdns3_idle_role_stop;
-       rdrv->state = CDNS3_ROLE_STATE_INACTIVE;
+       rdrv->start = cdns_idle_role_start;
+       rdrv->stop = cdns_idle_role_stop;
+       rdrv->state = CDNS_ROLE_STATE_INACTIVE;
        rdrv->suspend = NULL;
        rdrv->resume = NULL;
        rdrv->name = "idle";
@@ -272,10 +291,10 @@ static int cdns3_idle_init(struct cdns3 *cdns)
 }
 
 /**
- * cdns3_hw_role_switch - switch roles based on HW state
+ * cdns_hw_role_switch - switch roles based on HW state
  * @cdns: controller
  */
-int cdns3_hw_role_switch(struct cdns3 *cdns)
+int cdns_hw_role_switch(struct cdns *cdns)
 {
        enum usb_role real_role, current_role;
        int ret = 0;
@@ -287,22 +306,22 @@ int cdns3_hw_role_switch(struct cdns3 *cdns)
        pm_runtime_get_sync(cdns->dev);
 
        current_role = cdns->role;
-       real_role = cdns3_hw_role_state_machine(cdns);
+       real_role = cdns_hw_role_state_machine(cdns);
 
        /* Do nothing if nothing changed */
        if (current_role == real_role)
                goto exit;
 
-       cdns3_role_stop(cdns);
+       cdns_role_stop(cdns);
 
        dev_dbg(cdns->dev, "Switching role %d -> %d", current_role, real_role);
 
-       ret = cdns3_role_start(cdns, real_role);
+       ret = cdns_role_start(cdns, real_role);
        if (ret) {
                /* Back to current role */
                dev_err(cdns->dev, "set %d has failed, back to %d\n",
                        real_role, current_role);
-               ret = cdns3_role_start(cdns, current_role);
+               ret = cdns_role_start(cdns, current_role);
                if (ret)
                        dev_err(cdns->dev, "back to %d failed too\n",
                                current_role);
@@ -319,15 +338,15 @@ exit:
  *
  * Returns role
  */
-static enum usb_role cdns3_role_get(struct usb_role_switch *sw)
+static enum usb_role cdns_role_get(struct usb_role_switch *sw)
 {
-       struct cdns3 *cdns = usb_role_switch_get_drvdata(sw);
+       struct cdns *cdns = usb_role_switch_get_drvdata(sw);
 
        return cdns->role;
 }
 
 /**
- * cdns3_role_set - set current role of controller.
+ * cdns_role_set - set current role of controller.
  *
  * @sw: pointer to USB role switch structure
  * @role: the previous role
@@ -335,9 +354,9 @@ static enum usb_role cdns3_role_get(struct usb_role_switch *sw)
  * - Role switch for dual-role devices
  * - USB_ROLE_GADGET <--> USB_ROLE_NONE for peripheral-only devices
  */
-static int cdns3_role_set(struct usb_role_switch *sw, enum usb_role role)
+static int cdns_role_set(struct usb_role_switch *sw, enum usb_role role)
 {
-       struct cdns3 *cdns = usb_role_switch_get_drvdata(sw);
+       struct cdns *cdns = usb_role_switch_get_drvdata(sw);
        int ret = 0;
 
        pm_runtime_get_sync(cdns->dev);
@@ -365,8 +384,8 @@ static int cdns3_role_set(struct usb_role_switch *sw, enum usb_role role)
                }
        }
 
-       cdns3_role_stop(cdns);
-       ret = cdns3_role_start(cdns, role);
+       cdns_role_stop(cdns);
+       ret = cdns_role_start(cdns, role);
        if (ret)
                dev_err(cdns->dev, "set role %d has failed\n", role);
 
@@ -375,37 +394,17 @@ pm_put:
        return ret;
 }
 
-static int set_phy_power_on(struct cdns3 *cdns)
-{
-       int ret;
-
-       ret = phy_power_on(cdns->usb2_phy);
-       if (ret)
-               return ret;
-
-       ret = phy_power_on(cdns->usb3_phy);
-       if (ret)
-               phy_power_off(cdns->usb2_phy);
-
-       return ret;
-}
-
-static void set_phy_power_off(struct cdns3 *cdns)
-{
-       phy_power_off(cdns->usb3_phy);
-       phy_power_off(cdns->usb2_phy);
-}
 
 /**
- * cdns3_wakeup_irq - interrupt handler for wakeup events
- * @irq: irq number for cdns3 core device
- * @data: structure of cdns3
+ * cdns_wakeup_irq - interrupt handler for wakeup events
+ * @irq: irq number for cdns3/cdnsp core device
+ * @data: structure of cdns
  *
  * Returns IRQ_HANDLED or IRQ_NONE
  */
-static irqreturn_t cdns3_wakeup_irq(int irq, void *data)
+static irqreturn_t cdns_wakeup_irq(int irq, void *data)
 {
-       struct cdns3 *cdns = data;
+       struct cdns *cdns = data;
 
        if (cdns->in_lpm) {
                disable_irq_nosync(irq);
@@ -420,17 +419,14 @@ static irqreturn_t cdns3_wakeup_irq(int irq, void *data)
 }
 
 /**
- * cdns3_probe - probe for cdns3 core device
- * @pdev: Pointer to cdns3 core platform device
+ * cdns_probe - probe for cdns3/cdnsp core device
+ * @cdns: Pointer to cdns structure.
  *
  * Returns 0 on success otherwise negative errno
  */
-static int cdns3_probe(struct platform_device *pdev)
+int cdns_init(struct cdns *cdns)
 {
-       struct device *dev = &pdev->dev;
-       struct resource *res;
-       struct cdns3 *cdns;
-       void __iomem *regs;
+       struct device *dev = cdns->dev;
        int ret;
 
        ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
@@ -439,259 +435,78 @@ static int cdns3_probe(struct platform_device *pdev)
                return ret;
        }
 
-       cdns = devm_kzalloc(dev, sizeof(*cdns), GFP_KERNEL);
-       if (!cdns)
-               return -ENOMEM;
-
-       cdns->dev = dev;
-       cdns->pdata = dev_get_platdata(dev);
-
-       platform_set_drvdata(pdev, cdns);
-
-       res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "host");
-       if (!res) {
-               dev_err(dev, "missing host IRQ\n");
-               return -ENODEV;
-       }
-
-       cdns->xhci_res[0] = *res;
-
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "xhci");
-       if (!res) {
-               dev_err(dev, "couldn't get xhci resource\n");
-               return -ENXIO;
-       }
-
-       cdns->xhci_res[1] = *res;
-
-       cdns->dev_irq = platform_get_irq_byname(pdev, "peripheral");
-       if (cdns->dev_irq < 0)
-               return cdns->dev_irq;
-
-       regs = devm_platform_ioremap_resource_byname(pdev, "dev");
-       if (IS_ERR(regs))
-               return PTR_ERR(regs);
-       cdns->dev_regs  = regs;
-
-       cdns->otg_irq = platform_get_irq_byname(pdev, "otg");
-       if (cdns->otg_irq < 0)
-               return cdns->otg_irq;
-
-       res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "otg");
-       if (!res) {
-               dev_err(dev, "couldn't get otg resource\n");
-               return -ENXIO;
-       }
-
-       cdns->phyrst_a_enable = device_property_read_bool(dev, "cdns,phyrst-a-enable");
-
-       cdns->otg_res = *res;
-
-       cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup");
-       if (cdns->wakeup_irq == -EPROBE_DEFER)
-               return cdns->wakeup_irq;
-       else if (cdns->wakeup_irq == 0)
-               return -EINVAL;
-
-       if (cdns->wakeup_irq < 0) {
-               dev_dbg(dev, "couldn't get wakeup irq\n");
-               cdns->wakeup_irq = 0x0;
-       }
-
        mutex_init(&cdns->mutex);
 
-       cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy");
-       if (IS_ERR(cdns->usb2_phy))
-               return PTR_ERR(cdns->usb2_phy);
-
-       ret = phy_init(cdns->usb2_phy);
-       if (ret)
-               return ret;
-
-       cdns->usb3_phy = devm_phy_optional_get(dev, "cdns3,usb3-phy");
-       if (IS_ERR(cdns->usb3_phy))
-               return PTR_ERR(cdns->usb3_phy);
-
-       ret = phy_init(cdns->usb3_phy);
-       if (ret)
-               goto err1;
-
-       ret = set_phy_power_on(cdns);
-       if (ret)
-               goto err2;
-
        if (device_property_read_bool(dev, "usb-role-switch")) {
                struct usb_role_switch_desc sw_desc = { };
 
-               sw_desc.set = cdns3_role_set;
-               sw_desc.get = cdns3_role_get;
+               sw_desc.set = cdns_role_set;
+               sw_desc.get = cdns_role_get;
                sw_desc.allow_userspace_control = true;
                sw_desc.driver_data = cdns;
                sw_desc.fwnode = dev->fwnode;
 
                cdns->role_sw = usb_role_switch_register(dev, &sw_desc);
                if (IS_ERR(cdns->role_sw)) {
-                       ret = PTR_ERR(cdns->role_sw);
                        dev_warn(dev, "Unable to register Role Switch\n");
-                       goto err3;
+                       return PTR_ERR(cdns->role_sw);
                }
        }
 
        if (cdns->wakeup_irq) {
                ret = devm_request_irq(cdns->dev, cdns->wakeup_irq,
-                                               cdns3_wakeup_irq,
+                                               cdns_wakeup_irq,
                                                IRQF_SHARED,
                                                dev_name(cdns->dev), cdns);
 
                if (ret) {
                        dev_err(cdns->dev, "couldn't register wakeup irq handler\n");
-                       goto err4;
+                       goto role_switch_unregister;
                }
        }
 
-       ret = cdns3_drd_init(cdns);
+       ret = cdns_drd_init(cdns);
        if (ret)
-               goto err4;
+               goto init_failed;
 
-       ret = cdns3_core_init_role(cdns);
+       ret = cdns_core_init_role(cdns);
        if (ret)
-               goto err4;
+               goto init_failed;
 
        spin_lock_init(&cdns->lock);
-       device_set_wakeup_capable(dev, true);
-       pm_runtime_set_active(dev);
-       pm_runtime_enable(dev);
-       if (!(cdns->pdata && (cdns->pdata->quirks & CDNS3_DEFAULT_PM_RUNTIME_ALLOW)))
-               pm_runtime_forbid(dev);
 
-       /*
-        * The controller needs less time between bus and controller suspend,
-        * and we also needs a small delay to avoid frequently entering low
-        * power mode.
-        */
-       pm_runtime_set_autosuspend_delay(dev, 20);
-       pm_runtime_mark_last_busy(dev);
-       pm_runtime_use_autosuspend(dev);
        dev_dbg(dev, "Cadence USB3 core: probe succeed\n");
 
        return 0;
-err4:
-       cdns3_drd_exit(cdns);
+init_failed:
+       cdns_drd_exit(cdns);
+role_switch_unregister:
        if (cdns->role_sw)
                usb_role_switch_unregister(cdns->role_sw);
-err3:
-       set_phy_power_off(cdns);
-err2:
-       phy_exit(cdns->usb3_phy);
-err1:
-       phy_exit(cdns->usb2_phy);
 
        return ret;
 }
+EXPORT_SYMBOL_GPL(cdns_init);
 
 /**
- * cdns3_remove - unbind drd driver and clean up
- * @pdev: Pointer to Linux platform device
+ * cdns_remove - unbind drd driver and clean up
+ * @cdns: Pointer to cdns structure.
  *
  * Returns 0 on success otherwise negative errno
  */
-static int cdns3_remove(struct platform_device *pdev)
+int cdns_remove(struct cdns *cdns)
 {
-       struct cdns3 *cdns = platform_get_drvdata(pdev);
-
-       pm_runtime_get_sync(&pdev->dev);
-       pm_runtime_disable(&pdev->dev);
-       pm_runtime_put_noidle(&pdev->dev);
-       cdns3_exit_roles(cdns);
+       cdns_exit_roles(cdns);
        usb_role_switch_unregister(cdns->role_sw);
-       set_phy_power_off(cdns);
-       phy_exit(cdns->usb2_phy);
-       phy_exit(cdns->usb3_phy);
-       return 0;
-}
-
-#ifdef CONFIG_PM
-
-static int cdns3_set_platform_suspend(struct device *dev,
-               bool suspend, bool wakeup)
-{
-       struct cdns3 *cdns = dev_get_drvdata(dev);
-       int ret = 0;
-
-       if (cdns->pdata && cdns->pdata->platform_suspend)
-               ret = cdns->pdata->platform_suspend(dev, suspend, wakeup);
-
-       return ret;
-}
-
-static int cdns3_controller_suspend(struct device *dev, pm_message_t msg)
-{
-       struct cdns3 *cdns = dev_get_drvdata(dev);
-       bool wakeup;
-       unsigned long flags;
-
-       if (cdns->in_lpm)
-               return 0;
-
-       if (PMSG_IS_AUTO(msg))
-               wakeup = true;
-       else
-               wakeup = device_may_wakeup(dev);
-
-       cdns3_set_platform_suspend(cdns->dev, true, wakeup);
-       set_phy_power_off(cdns);
-       spin_lock_irqsave(&cdns->lock, flags);
-       cdns->in_lpm = true;
-       spin_unlock_irqrestore(&cdns->lock, flags);
-       dev_dbg(cdns->dev, "%s ends\n", __func__);
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(cdns_remove);
 
-static int cdns3_controller_resume(struct device *dev, pm_message_t msg)
-{
-       struct cdns3 *cdns = dev_get_drvdata(dev);
-       int ret;
-       unsigned long flags;
-
-       if (!cdns->in_lpm)
-               return 0;
-
-       ret = set_phy_power_on(cdns);
-       if (ret)
-               return ret;
-
-       cdns3_set_platform_suspend(cdns->dev, false, false);
-
-       spin_lock_irqsave(&cdns->lock, flags);
-       if (cdns->roles[cdns->role]->resume && !PMSG_IS_AUTO(msg))
-               cdns->roles[cdns->role]->resume(cdns, false);
-
-       cdns->in_lpm = false;
-       spin_unlock_irqrestore(&cdns->lock, flags);
-       if (cdns->wakeup_pending) {
-               cdns->wakeup_pending = false;
-               enable_irq(cdns->wakeup_irq);
-       }
-       dev_dbg(cdns->dev, "%s ends\n", __func__);
-
-       return ret;
-}
-
-static int cdns3_runtime_suspend(struct device *dev)
-{
-       return cdns3_controller_suspend(dev, PMSG_AUTO_SUSPEND);
-}
-
-static int cdns3_runtime_resume(struct device *dev)
-{
-       return cdns3_controller_resume(dev, PMSG_AUTO_RESUME);
-}
 #ifdef CONFIG_PM_SLEEP
-
-static int cdns3_suspend(struct device *dev)
+int cdns_suspend(struct cdns *cdns)
 {
-       struct cdns3 *cdns = dev_get_drvdata(dev);
+       struct device *dev = cdns->dev;
        unsigned long flags;
 
        if (pm_runtime_status_suspended(dev))
@@ -703,52 +518,30 @@ static int cdns3_suspend(struct device *dev)
                spin_unlock_irqrestore(&cdns->lock, flags);
        }
 
-       return cdns3_controller_suspend(dev, PMSG_SUSPEND);
+       return 0;
 }
+EXPORT_SYMBOL_GPL(cdns_suspend);
 
-static int cdns3_resume(struct device *dev)
+int cdns_resume(struct cdns *cdns, u8 set_active)
 {
-       int ret;
+       struct device *dev = cdns->dev;
 
-       ret = cdns3_controller_resume(dev, PMSG_RESUME);
-       if (ret)
-               return ret;
+       if (cdns->roles[cdns->role]->resume)
+               cdns->roles[cdns->role]->resume(cdns, false);
 
-       pm_runtime_disable(dev);
-       pm_runtime_set_active(dev);
-       pm_runtime_enable(dev);
+       if (set_active) {
+               pm_runtime_disable(dev);
+               pm_runtime_set_active(dev);
+               pm_runtime_enable(dev);
+       }
 
-       return ret;
+       return 0;
 }
+EXPORT_SYMBOL_GPL(cdns_resume);
 #endif /* CONFIG_PM_SLEEP */
-#endif /* CONFIG_PM */
-
-static const struct dev_pm_ops cdns3_pm_ops = {
-       SET_SYSTEM_SLEEP_PM_OPS(cdns3_suspend, cdns3_resume)
-       SET_RUNTIME_PM_OPS(cdns3_runtime_suspend, cdns3_runtime_resume, NULL)
-};
-
-#ifdef CONFIG_OF
-static const struct of_device_id of_cdns3_match[] = {
-       { .compatible = "cdns,usb3" },
-       { },
-};
-MODULE_DEVICE_TABLE(of, of_cdns3_match);
-#endif
-
-static struct platform_driver cdns3_driver = {
-       .probe          = cdns3_probe,
-       .remove         = cdns3_remove,
-       .driver         = {
-               .name   = "cdns-usb3",
-               .of_match_table = of_match_ptr(of_cdns3_match),
-               .pm     = &cdns3_pm_ops,
-       },
-};
-
-module_platform_driver(cdns3_driver);
-
-MODULE_ALIAS("platform:cdns3");
+
+MODULE_AUTHOR("Peter Chen <peter.chen@nxp.com>");
 MODULE_AUTHOR("Pawel Laszczak <pawell@cadence.com>");
-MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("Cadence USB3 DRD Controller Driver");
+MODULE_AUTHOR("Roger Quadros <rogerq@ti.com>");
+MODULE_DESCRIPTION("Cadence USBSS and USBSSP DRD Driver");
+MODULE_LICENSE("GPL");
index 3176f924293a188b922be6271435627478d8f603..f8e350cef69958c40c98fc43958bd0bea744bc30 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Cadence USBSS DRD Header File.
+ * Cadence USBSS and USBSSP DRD Header File.
  *
  * Copyright (C) 2017-2018 NXP
  * Copyright (C) 2018-2019 Cadence.
 #ifndef __LINUX_CDNS3_CORE_H
 #define __LINUX_CDNS3_CORE_H
 
-struct cdns3;
+struct cdns;
 
 /**
- * struct cdns3_role_driver - host/gadget role driver
+ * struct cdns_role_driver - host/gadget role driver
  * @start: start this role
  * @stop: stop this role
  * @suspend: suspend callback for this role
@@ -26,18 +26,18 @@ struct cdns3;
  * @name: role name string (host/gadget)
  * @state: current state
  */
-struct cdns3_role_driver {
-       int (*start)(struct cdns3 *cdns);
-       void (*stop)(struct cdns3 *cdns);
-       int (*suspend)(struct cdns3 *cdns, bool do_wakeup);
-       int (*resume)(struct cdns3 *cdns, bool hibernated);
+struct cdns_role_driver {
+       int (*start)(struct cdns *cdns);
+       void (*stop)(struct cdns *cdns);
+       int (*suspend)(struct cdns *cdns, bool do_wakeup);
+       int (*resume)(struct cdns *cdns, bool hibernated);
        const char *name;
-#define CDNS3_ROLE_STATE_INACTIVE      0
-#define CDNS3_ROLE_STATE_ACTIVE                1
+#define CDNS_ROLE_STATE_INACTIVE       0
+#define CDNS_ROLE_STATE_ACTIVE         1
        int state;
 };
 
-#define CDNS3_XHCI_RESOURCES_NUM       2
+#define CDNS_XHCI_RESOURCES_NUM        2
 
 struct cdns3_platform_data {
        int (*platform_suspend)(struct device *dev,
@@ -47,7 +47,7 @@ struct cdns3_platform_data {
 };
 
 /**
- * struct cdns3 - Representation of Cadence USB3 DRD controller.
+ * struct cdns - Representation of Cadence USB3 DRD controller.
  * @dev: pointer to Cadence device struct
  * @xhci_regs: pointer to base of xhci registers
  * @xhci_res: the resource for xhci
@@ -55,14 +55,16 @@ struct cdns3_platform_data {
  * @otg_res: the resource for otg
  * @otg_v0_regs: pointer to base of v0 otg registers
  * @otg_v1_regs: pointer to base of v1 otg registers
+ * @otg_cdnsp_regs: pointer to base of CDNSP otg registers
  * @otg_regs: pointer to base of otg registers
+ * @otg_irq_regs: pointer to interrupt registers
  * @otg_irq: irq number for otg controller
  * @dev_irq: irq number for device controller
  * @wakeup_irq: irq number for wakeup event, it is optional
  * @roles: array of supported roles for this controller
  * @role: current role
- * @host_dev: the child host device pointer for cdns3 core
- * @gadget_dev: the child gadget device pointer for cdns3 core
+ * @host_dev: the child host device pointer for cdns core
+ * @gadget_dev: the child gadget device pointer
  * @usb2_phy: pointer to USB2 PHY
  * @usb3_phy: pointer to USB3 PHY
  * @mutex: the mutex for concurrent code at driver
@@ -76,29 +78,33 @@ struct cdns3_platform_data {
  * @pdata: platform data from glue layer
  * @lock: spinlock structure
  * @xhci_plat_data: xhci private data structure pointer
+ * @gadget_init: pointer to gadget initialization function
  */
-struct cdns3 {
+struct cdns {
        struct device                   *dev;
        void __iomem                    *xhci_regs;
-       struct resource                 xhci_res[CDNS3_XHCI_RESOURCES_NUM];
+       struct resource                 xhci_res[CDNS_XHCI_RESOURCES_NUM];
        struct cdns3_usb_regs __iomem   *dev_regs;
 
        struct resource                 otg_res;
        struct cdns3_otg_legacy_regs    *otg_v0_regs;
        struct cdns3_otg_regs           *otg_v1_regs;
-       struct cdns3_otg_common_regs    *otg_regs;
+       struct cdnsp_otg_regs           *otg_cdnsp_regs;
+       struct cdns_otg_common_regs     *otg_regs;
+       struct cdns_otg_irq_regs        *otg_irq_regs;
 #define CDNS3_CONTROLLER_V0    0
 #define CDNS3_CONTROLLER_V1    1
+#define CDNSP_CONTROLLER_V2    2
        u32                             version;
        bool                            phyrst_a_enable;
 
        int                             otg_irq;
        int                             dev_irq;
        int                             wakeup_irq;
-       struct cdns3_role_driver        *roles[USB_ROLE_DEVICE + 1];
+       struct cdns_role_driver *roles[USB_ROLE_DEVICE + 1];
        enum usb_role                   role;
        struct platform_device          *host_dev;
-       struct cdns3_device             *gadget_dev;
+       void                            *gadget_dev;
        struct phy                      *usb2_phy;
        struct phy                      *usb3_phy;
        /* mutext used in workqueue*/
@@ -110,8 +116,21 @@ struct cdns3 {
        struct cdns3_platform_data      *pdata;
        spinlock_t                      lock;
        struct xhci_plat_priv           *xhci_plat_data;
+
+       int (*gadget_init)(struct cdns *cdns);
 };
 
-int cdns3_hw_role_switch(struct cdns3 *cdns);
+int cdns_hw_role_switch(struct cdns *cdns);
+int cdns_init(struct cdns *cdns);
+int cdns_remove(struct cdns *cdns);
 
+#ifdef CONFIG_PM_SLEEP
+int cdns_resume(struct cdns *cdns, u8 set_active);
+int cdns_suspend(struct cdns *cdns);
+#else /* CONFIG_PM_SLEEP */
+static inline int cdns_resume(struct cdns *cdns, u8 set_active)
+{ return 0; }
+static inline int cdns_suspend(struct cdns *cdns)
+{ return 0; }
+#endif /* CONFIG_PM_SLEEP */
 #endif /* __LINUX_CDNS3_CORE_H */
index 38ccd29e4cdef1e0dda34fc13cb4bb774d9642ca..605a413db727d4e9fc4e3653d55aa330ce86b2f1 100644 (file)
@@ -1,35 +1,33 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Cadence USBSS DRD Driver.
+ * Cadence USBSS and USBSSP DRD Driver.
  *
- * Copyright (C) 2018-2019 Cadence.
+ * Copyright (C) 2018-2020 Cadence.
  * Copyright (C) 2019 Texas Instruments
  *
  * Author: Pawel Laszczak <pawell@cadence.com>
  *         Roger Quadros <rogerq@ti.com>
  *
- *
  */
 #include <linux/kernel.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
 #include <linux/iopoll.h>
 #include <linux/usb/otg.h>
-#include <linux/phy/phy.h>
 
-#include "gadget.h"
 #include "drd.h"
 #include "core.h"
 
 /**
- * cdns3_set_mode - change mode of OTG Core
+ * cdns_set_mode - change mode of OTG Core
  * @cdns: pointer to context structure
  * @mode: selected mode from cdns_role
  *
  * Returns 0 on success otherwise negative errno
  */
-int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
+static int cdns_set_mode(struct cdns *cdns, enum usb_dr_mode mode)
 {
+       u32 __iomem *override_reg;
        u32 reg;
 
        switch (mode) {
@@ -39,11 +37,24 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
                break;
        case USB_DR_MODE_OTG:
                dev_dbg(cdns->dev, "Set controller to OTG mode\n");
-               if (cdns->version == CDNS3_CONTROLLER_V1) {
-                       reg = readl(&cdns->otg_v1_regs->override);
+
+               if (cdns->version == CDNSP_CONTROLLER_V2)
+                       override_reg = &cdns->otg_cdnsp_regs->override;
+               else if (cdns->version == CDNS3_CONTROLLER_V1)
+                       override_reg = &cdns->otg_v1_regs->override;
+               else
+                       override_reg = &cdns->otg_v0_regs->ctrl1;
+
+               reg = readl(override_reg);
+
+               if (cdns->version != CDNS3_CONTROLLER_V0)
                        reg |= OVERRIDE_IDPULLUP;
-                       writel(reg, &cdns->otg_v1_regs->override);
+               else
+                       reg |= OVERRIDE_IDPULLUP_V0;
+
+               writel(reg, override_reg);
 
+               if (cdns->version == CDNS3_CONTROLLER_V1) {
                        /*
                         * Enable work around feature built into the
                         * controller to address issue with RX Sensitivity
@@ -55,10 +66,6 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
                                reg |= PHYRST_CFG_PHYRST_A_ENABLE;
                                writel(reg, &cdns->otg_v1_regs->phyrst_cfg);
                        }
-               } else {
-                       reg = readl(&cdns->otg_v0_regs->ctrl1);
-                       reg |= OVERRIDE_IDPULLUP_V0;
-                       writel(reg, &cdns->otg_v0_regs->ctrl1);
                }
 
                /*
@@ -76,7 +83,7 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode)
        return 0;
 }
 
-int cdns3_get_id(struct cdns3 *cdns)
+int cdns_get_id(struct cdns *cdns)
 {
        int id;
 
@@ -86,7 +93,7 @@ int cdns3_get_id(struct cdns3 *cdns)
        return id;
 }
 
-int cdns3_get_vbus(struct cdns3 *cdns)
+int cdns_get_vbus(struct cdns *cdns)
 {
        int vbus;
 
@@ -96,64 +103,95 @@ int cdns3_get_vbus(struct cdns3 *cdns)
        return vbus;
 }
 
-bool cdns3_is_host(struct cdns3 *cdns)
+void cdns_clear_vbus(struct cdns *cdns)
+{
+       u32 reg;
+
+       if (cdns->version != CDNSP_CONTROLLER_V2)
+               return;
+
+       reg = readl(&cdns->otg_cdnsp_regs->override);
+       reg |= OVERRIDE_SESS_VLD_SEL;
+       writel(reg, &cdns->otg_cdnsp_regs->override);
+}
+EXPORT_SYMBOL_GPL(cdns_clear_vbus);
+
+void cdns_set_vbus(struct cdns *cdns)
+{
+       u32 reg;
+
+       if (cdns->version != CDNSP_CONTROLLER_V2)
+               return;
+
+       reg = readl(&cdns->otg_cdnsp_regs->override);
+       reg &= ~OVERRIDE_SESS_VLD_SEL;
+       writel(reg, &cdns->otg_cdnsp_regs->override);
+}
+EXPORT_SYMBOL_GPL(cdns_set_vbus);
+
+bool cdns_is_host(struct cdns *cdns)
 {
        if (cdns->dr_mode == USB_DR_MODE_HOST)
                return true;
-       else if (cdns3_get_id(cdns) == CDNS3_ID_HOST)
+       else if (cdns_get_id(cdns) == CDNS3_ID_HOST)
                return true;
 
        return false;
 }
 
-bool cdns3_is_device(struct cdns3 *cdns)
+bool cdns_is_device(struct cdns *cdns)
 {
        if (cdns->dr_mode == USB_DR_MODE_PERIPHERAL)
                return true;
        else if (cdns->dr_mode == USB_DR_MODE_OTG)
-               if (cdns3_get_id(cdns) == CDNS3_ID_PERIPHERAL)
+               if (cdns_get_id(cdns) == CDNS3_ID_PERIPHERAL)
                        return true;
 
        return false;
 }
 
 /**
- * cdns3_otg_disable_irq - Disable all OTG interrupts
+ * cdns_otg_disable_irq - Disable all OTG interrupts
  * @cdns: Pointer to controller context structure
  */
-static void cdns3_otg_disable_irq(struct cdns3 *cdns)
+static void cdns_otg_disable_irq(struct cdns *cdns)
 {
-       writel(0, &cdns->otg_regs->ien);
+       writel(0, &cdns->otg_irq_regs->ien);
 }
 
 /**
- * cdns3_otg_enable_irq - enable id and sess_valid interrupts
+ * cdns_otg_enable_irq - enable id and sess_valid interrupts
  * @cdns: Pointer to controller context structure
  */
-static void cdns3_otg_enable_irq(struct cdns3 *cdns)
+static void cdns_otg_enable_irq(struct cdns *cdns)
 {
        writel(OTGIEN_ID_CHANGE_INT | OTGIEN_VBUSVALID_RISE_INT |
-              OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_regs->ien);
+              OTGIEN_VBUSVALID_FALL_INT, &cdns->otg_irq_regs->ien);
 }
 
 /**
- * cdns3_drd_host_on - start host.
+ * cdns_drd_host_on - start host.
  * @cdns: Pointer to controller context structure.
  *
  * Returns 0 on success otherwise negative errno.
  */
-int cdns3_drd_host_on(struct cdns3 *cdns)
+int cdns_drd_host_on(struct cdns *cdns)
 {
-       u32 val;
+       u32 val, ready_bit;
        int ret;
 
        /* Enable host mode. */
        writel(OTGCMD_HOST_BUS_REQ | OTGCMD_OTG_DIS,
               &cdns->otg_regs->cmd);
 
+       if (cdns->version == CDNSP_CONTROLLER_V2)
+               ready_bit = OTGSTS_CDNSP_XHCI_READY;
+       else
+               ready_bit = OTGSTS_CDNS3_XHCI_READY;
+
        dev_dbg(cdns->dev, "Waiting till Host mode is turned on\n");
        ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val,
-                                       val & OTGSTS_XHCI_READY, 1, 100000);
+                                       val & ready_bit, 1, 100000);
 
        if (ret)
                dev_err(cdns->dev, "timeout waiting for xhci_ready\n");
@@ -163,10 +201,10 @@ int cdns3_drd_host_on(struct cdns3 *cdns)
 }
 
 /**
- * cdns3_drd_host_off - stop host.
+ * cdns_drd_host_off - stop host.
  * @cdns: Pointer to controller context structure.
  */
-void cdns3_drd_host_off(struct cdns3 *cdns)
+void cdns_drd_host_off(struct cdns *cdns)
 {
        u32 val;
 
@@ -182,24 +220,29 @@ void cdns3_drd_host_off(struct cdns3 *cdns)
 }
 
 /**
- * cdns3_drd_gadget_on - start gadget.
+ * cdns_drd_gadget_on - start gadget.
  * @cdns: Pointer to controller context structure.
  *
  * Returns 0 on success otherwise negative errno
  */
-int cdns3_drd_gadget_on(struct cdns3 *cdns)
+int cdns_drd_gadget_on(struct cdns *cdns)
 {
-       int ret, val;
        u32 reg = OTGCMD_OTG_DIS;
+       u32 ready_bit;
+       int ret, val;
 
        /* switch OTG core */
        writel(OTGCMD_DEV_BUS_REQ | reg, &cdns->otg_regs->cmd);
 
        dev_dbg(cdns->dev, "Waiting till Device mode is turned on\n");
 
+       if (cdns->version == CDNSP_CONTROLLER_V2)
+               ready_bit = OTGSTS_CDNSP_DEV_READY;
+       else
+               ready_bit = OTGSTS_CDNS3_DEV_READY;
+
        ret = readl_poll_timeout_atomic(&cdns->otg_regs->sts, val,
-                                       val & OTGSTS_DEV_READY,
-                                       1, 100000);
+                                       val & ready_bit, 1, 100000);
        if (ret) {
                dev_err(cdns->dev, "timeout waiting for dev_ready\n");
                return ret;
@@ -208,12 +251,13 @@ int cdns3_drd_gadget_on(struct cdns3 *cdns)
        phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_DEVICE);
        return 0;
 }
+EXPORT_SYMBOL_GPL(cdns_drd_gadget_on);
 
 /**
- * cdns3_drd_gadget_off - stop gadget.
+ * cdns_drd_gadget_off - stop gadget.
  * @cdns: Pointer to controller context structure.
  */
-void cdns3_drd_gadget_off(struct cdns3 *cdns)
+void cdns_drd_gadget_off(struct cdns *cdns)
 {
        u32 val;
 
@@ -231,49 +275,50 @@ void cdns3_drd_gadget_off(struct cdns3 *cdns)
                                  1, 2000000);
        phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID);
 }
+EXPORT_SYMBOL_GPL(cdns_drd_gadget_off);
 
 /**
- * cdns3_init_otg_mode - initialize drd controller
+ * cdns_init_otg_mode - initialize drd controller
  * @cdns: Pointer to controller context structure
  *
  * Returns 0 on success otherwise negative errno
  */
-static int cdns3_init_otg_mode(struct cdns3 *cdns)
+static int cdns_init_otg_mode(struct cdns *cdns)
 {
        int ret;
 
-       cdns3_otg_disable_irq(cdns);
+       cdns_otg_disable_irq(cdns);
        /* clear all interrupts */
-       writel(~0, &cdns->otg_regs->ivect);
+       writel(~0, &cdns->otg_irq_regs->ivect);
 
-       ret = cdns3_set_mode(cdns, USB_DR_MODE_OTG);
+       ret = cdns_set_mode(cdns, USB_DR_MODE_OTG);
        if (ret)
                return ret;
 
-       cdns3_otg_enable_irq(cdns);
+       cdns_otg_enable_irq(cdns);
 
        return 0;
 }
 
 /**
- * cdns3_drd_update_mode - initialize mode of operation
+ * cdns_drd_update_mode - initialize mode of operation
  * @cdns: Pointer to controller context structure
  *
  * Returns 0 on success otherwise negative errno
  */
-int cdns3_drd_update_mode(struct cdns3 *cdns)
+int cdns_drd_update_mode(struct cdns *cdns)
 {
        int ret;
 
        switch (cdns->dr_mode) {
        case USB_DR_MODE_PERIPHERAL:
-               ret = cdns3_set_mode(cdns, USB_DR_MODE_PERIPHERAL);
+               ret = cdns_set_mode(cdns, USB_DR_MODE_PERIPHERAL);
                break;
        case USB_DR_MODE_HOST:
-               ret = cdns3_set_mode(cdns, USB_DR_MODE_HOST);
+               ret = cdns_set_mode(cdns, USB_DR_MODE_HOST);
                break;
        case USB_DR_MODE_OTG:
-               ret = cdns3_init_otg_mode(cdns);
+               ret = cdns_init_otg_mode(cdns);
                break;
        default:
                dev_err(cdns->dev, "Unsupported mode of operation %d\n",
@@ -284,27 +329,27 @@ int cdns3_drd_update_mode(struct cdns3 *cdns)
        return ret;
 }
 
-static irqreturn_t cdns3_drd_thread_irq(int irq, void *data)
+static irqreturn_t cdns_drd_thread_irq(int irq, void *data)
 {
-       struct cdns3 *cdns = data;
+       struct cdns *cdns = data;
 
-       cdns3_hw_role_switch(cdns);
+       cdns_hw_role_switch(cdns);
 
        return IRQ_HANDLED;
 }
 
 /**
- * cdns3_drd_irq - interrupt handler for OTG events
+ * cdns_drd_irq - interrupt handler for OTG events
  *
- * @irq: irq number for cdns3 core device
- * @data: structure of cdns3
+ * @irq: irq number for cdns core device
+ * @data: structure of cdns
  *
  * Returns IRQ_HANDLED or IRQ_NONE
  */
-static irqreturn_t cdns3_drd_irq(int irq, void *data)
+static irqreturn_t cdns_drd_irq(int irq, void *data)
 {
        irqreturn_t ret = IRQ_NONE;
-       struct cdns3 *cdns = data;
+       struct cdns *cdns = data;
        u32 reg;
 
        if (cdns->dr_mode != USB_DR_MODE_OTG)
@@ -313,30 +358,30 @@ static irqreturn_t cdns3_drd_irq(int irq, void *data)
        if (cdns->in_lpm)
                return ret;
 
-       reg = readl(&cdns->otg_regs->ivect);
+       reg = readl(&cdns->otg_irq_regs->ivect);
 
        if (!reg)
                return IRQ_NONE;
 
        if (reg & OTGIEN_ID_CHANGE_INT) {
                dev_dbg(cdns->dev, "OTG IRQ: new ID: %d\n",
-                       cdns3_get_id(cdns));
+                       cdns_get_id(cdns));
 
                ret = IRQ_WAKE_THREAD;
        }
 
        if (reg & (OTGIEN_VBUSVALID_RISE_INT | OTGIEN_VBUSVALID_FALL_INT)) {
                dev_dbg(cdns->dev, "OTG IRQ: new VBUS: %d\n",
-                       cdns3_get_vbus(cdns));
+                       cdns_get_vbus(cdns));
 
                ret = IRQ_WAKE_THREAD;
        }
 
-       writel(~0, &cdns->otg_regs->ivect);
+       writel(~0, &cdns->otg_irq_regs->ivect);
        return ret;
 }
 
-int cdns3_drd_init(struct cdns3 *cdns)
+int cdns_drd_init(struct cdns *cdns)
 {
        void __iomem *regs;
        u32 state;
@@ -347,28 +392,43 @@ int cdns3_drd_init(struct cdns3 *cdns)
                return PTR_ERR(regs);
 
        /* Detection of DRD version. Controller has been released
-        * in two versions. Both are similar, but they have same changes
-        * in register maps.
-        * The first register in old version is command register and it's read
-        * only, so driver should read 0 from it. On the other hand, in v1
-        * the first register contains device ID number which is not set to 0.
-        * Driver uses this fact to detect the proper version of
+        * in three versions. All are very similar and are software compatible,
+        * but they have same changes in register maps.
+        * The first register in oldest version is command register and it's
+        * read only. Driver should read 0 from it. On the other hand, in v1
+        * and v2 the first register contains device ID number which is not
+        * set to 0. Driver uses this fact to detect the proper version of
         * controller.
         */
        cdns->otg_v0_regs = regs;
        if (!readl(&cdns->otg_v0_regs->cmd)) {
                cdns->version  = CDNS3_CONTROLLER_V0;
                cdns->otg_v1_regs = NULL;
+               cdns->otg_cdnsp_regs = NULL;
                cdns->otg_regs = regs;
+               cdns->otg_irq_regs = (struct cdns_otg_irq_regs *)
+                                    &cdns->otg_v0_regs->ien;
                writel(1, &cdns->otg_v0_regs->simulate);
                dev_dbg(cdns->dev, "DRD version v0 (%08x)\n",
                         readl(&cdns->otg_v0_regs->version));
        } else {
                cdns->otg_v0_regs = NULL;
                cdns->otg_v1_regs = regs;
+               cdns->otg_cdnsp_regs = regs;
+
                cdns->otg_regs = (void *)&cdns->otg_v1_regs->cmd;
-               cdns->version  = CDNS3_CONTROLLER_V1;
-               writel(1, &cdns->otg_v1_regs->simulate);
+
+               if (cdns->otg_cdnsp_regs->did == OTG_CDNSP_DID) {
+                       cdns->otg_irq_regs = (struct cdns_otg_irq_regs *)
+                                             &cdns->otg_cdnsp_regs->ien;
+                       cdns->version  = CDNSP_CONTROLLER_V2;
+               } else {
+                       cdns->otg_irq_regs = (struct cdns_otg_irq_regs *)
+                                             &cdns->otg_v1_regs->ien;
+                       writel(1, &cdns->otg_v1_regs->simulate);
+                       cdns->version  = CDNS3_CONTROLLER_V1;
+               }
+
                dev_dbg(cdns->dev, "DRD version v1 (ID: %08x, rev: %08x)\n",
                         readl(&cdns->otg_v1_regs->did),
                         readl(&cdns->otg_v1_regs->rid));
@@ -378,17 +438,24 @@ int cdns3_drd_init(struct cdns3 *cdns)
 
        /* Update dr_mode according to STRAP configuration. */
        cdns->dr_mode = USB_DR_MODE_OTG;
-       if (state == OTGSTS_STRAP_HOST) {
+
+       if ((cdns->version == CDNSP_CONTROLLER_V2 &&
+            state == OTGSTS_CDNSP_STRAP_HOST) ||
+           (cdns->version != CDNSP_CONTROLLER_V2 &&
+            state == OTGSTS_STRAP_HOST)) {
                dev_dbg(cdns->dev, "Controller strapped to HOST\n");
                cdns->dr_mode = USB_DR_MODE_HOST;
-       } else if (state == OTGSTS_STRAP_GADGET) {
+       } else if ((cdns->version == CDNSP_CONTROLLER_V2 &&
+                   state == OTGSTS_CDNSP_STRAP_GADGET) ||
+                  (cdns->version != CDNSP_CONTROLLER_V2 &&
+                   state == OTGSTS_STRAP_GADGET)) {
                dev_dbg(cdns->dev, "Controller strapped to PERIPHERAL\n");
                cdns->dr_mode = USB_DR_MODE_PERIPHERAL;
        }
 
        ret = devm_request_threaded_irq(cdns->dev, cdns->otg_irq,
-                                       cdns3_drd_irq,
-                                       cdns3_drd_thread_irq,
+                                       cdns_drd_irq,
+                                       cdns_drd_thread_irq,
                                        IRQF_SHARED,
                                        dev_name(cdns->dev), cdns);
        if (ret) {
@@ -405,8 +472,9 @@ int cdns3_drd_init(struct cdns3 *cdns)
        return 0;
 }
 
-int cdns3_drd_exit(struct cdns3 *cdns)
+int cdns_drd_exit(struct cdns *cdns)
 {
-       cdns3_otg_disable_irq(cdns);
+       cdns_otg_disable_irq(cdns);
+
        return 0;
 }
index f1ccae285a16dbd40e03ec3a4607b4140025dde5..9724acdecbbb0fbe8bde1df2d6276a75e2206659 100644 (file)
@@ -1,8 +1,8 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Cadence USB3 DRD header file.
+ * Cadence USB3 and USBSSP DRD header file.
  *
- * Copyright (C) 2018-2019 Cadence.
+ * Copyright (C) 2018-2020 Cadence.
  *
  * Author: Pawel Laszczak <pawell@cadence.com>
  */
 #define __LINUX_CDNS3_DRD
 
 #include <linux/usb/otg.h>
-#include <linux/phy/phy.h>
 #include "core.h"
 
-/*  DRD register interface for version v1. */
+/*  DRD register interface for version v1 of cdns3 driver. */
 struct cdns3_otg_regs {
        __le32 did;
        __le32 rid;
@@ -38,7 +37,7 @@ struct cdns3_otg_regs {
        __le32 ctrl2;
 };
 
-/*  DRD register interface for version v0. */
+/*  DRD register interface for version v0 of cdns3 driver. */
 struct cdns3_otg_legacy_regs {
        __le32 cmd;
        __le32 sts;
@@ -57,14 +56,45 @@ struct cdns3_otg_legacy_regs {
        __le32 ctrl1;
 };
 
+/* DRD register interface for cdnsp driver */
+struct cdnsp_otg_regs {
+       __le32 did;
+       __le32 rid;
+       __le32 cfgs1;
+       __le32 cfgs2;
+       __le32 cmd;
+       __le32 sts;
+       __le32 state;
+       __le32 ien;
+       __le32 ivect;
+       __le32 tmr;
+       __le32 simulate;
+       __le32 adpbc_sts;
+       __le32 adp_ramp_time;
+       __le32 adpbc_ctrl1;
+       __le32 adpbc_ctrl2;
+       __le32 override;
+       __le32 vbusvalid_dbnc_cfg;
+       __le32 sessvalid_dbnc_cfg;
+       __le32 susp_timing_ctrl;
+};
+
+#define OTG_CDNSP_DID  0x0004034E
+
 /*
- * Common registers interface for both version of DRD.
+ * Common registers interface for both CDNS3 and CDNSP version of DRD.
  */
-struct cdns3_otg_common_regs {
+struct cdns_otg_common_regs {
        __le32 cmd;
        __le32 sts;
        __le32 state;
-       __le32 different1;
+};
+
+/*
+ * Interrupt related registers. This registers are mapped in different
+ * location for CDNSP controller.
+ */
+struct cdns_otg_irq_regs {
        __le32 ien;
        __le32 ivect;
 };
@@ -92,9 +122,9 @@ struct cdns3_otg_common_regs {
 #define OTGCMD_DEV_BUS_DROP            BIT(8)
 /* Drop the bus for Host mode*/
 #define OTGCMD_HOST_BUS_DROP           BIT(9)
-/* Power Down USBSS-DEV*/
+/* Power Down USBSS-DEV - only for CDNS3.*/
 #define OTGCMD_DEV_POWER_OFF           BIT(11)
-/* Power Down CDNSXHCI. */
+/* Power Down CDNSXHCI - only for CDNS3. */
 #define OTGCMD_HOST_POWER_OFF          BIT(12)
 
 /* OTGIEN - bitmasks */
@@ -123,20 +153,31 @@ struct cdns3_otg_common_regs {
 #define OTGSTS_OTG_NRDY_MASK           BIT(11)
 #define OTGSTS_OTG_NRDY(p)             ((p) & OTGSTS_OTG_NRDY_MASK)
 /*
- * Value of the strap pins.
+ * Value of the strap pins for:
+ * CDNS3:
  * 000 - no default configuration
  * 010 - Controller initiall configured as Host
  * 100 - Controller initially configured as Device
+ * CDNSP:
+ * 000 - No default configuration.
+ * 010 - Controller initiall configured as Host.
+ * 100 - Controller initially configured as Device.
  */
 #define OTGSTS_STRAP(p)                        (((p) & GENMASK(14, 12)) >> 12)
 #define OTGSTS_STRAP_NO_DEFAULT_CFG    0x00
 #define OTGSTS_STRAP_HOST_OTG          0x01
 #define OTGSTS_STRAP_HOST              0x02
 #define OTGSTS_STRAP_GADGET            0x04
+#define OTGSTS_CDNSP_STRAP_HOST                0x01
+#define OTGSTS_CDNSP_STRAP_GADGET      0x02
+
 /* Host mode is turned on. */
-#define OTGSTS_XHCI_READY              BIT(26)
+#define OTGSTS_CDNS3_XHCI_READY                BIT(26)
+#define OTGSTS_CDNSP_XHCI_READY                BIT(27)
+
 /* "Device mode is turned on .*/
-#define OTGSTS_DEV_READY               BIT(27)
+#define OTGSTS_CDNS3_DEV_READY         BIT(27)
+#define OTGSTS_CDNSP_DEV_READY         BIT(26)
 
 /* OTGSTATE- bitmasks */
 #define OTGSTATE_DEV_STATE_MASK                GENMASK(2, 0)
@@ -152,6 +193,8 @@ struct cdns3_otg_common_regs {
 #define OVERRIDE_IDPULLUP              BIT(0)
 /* Only for CDNS3_CONTROLLER_V0 version */
 #define OVERRIDE_IDPULLUP_V0           BIT(24)
+/* Vbusvalid/Sesvalid override select. */
+#define OVERRIDE_SESS_VLD_SEL          BIT(10)
 
 /* PHYRST_CFG - bitmasks */
 #define PHYRST_CFG_PHYRST_A_ENABLE     BIT(0)
@@ -159,17 +202,18 @@ struct cdns3_otg_common_regs {
 #define CDNS3_ID_PERIPHERAL            1
 #define CDNS3_ID_HOST                  0
 
-bool cdns3_is_host(struct cdns3 *cdns);
-bool cdns3_is_device(struct cdns3 *cdns);
-int cdns3_get_id(struct cdns3 *cdns);
-int cdns3_get_vbus(struct cdns3 *cdns);
-int cdns3_drd_init(struct cdns3 *cdns);
-int cdns3_drd_exit(struct cdns3 *cdns);
-int cdns3_drd_update_mode(struct cdns3 *cdns);
-int cdns3_drd_gadget_on(struct cdns3 *cdns);
-void cdns3_drd_gadget_off(struct cdns3 *cdns);
-int cdns3_drd_host_on(struct cdns3 *cdns);
-void cdns3_drd_host_off(struct cdns3 *cdns);
-int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode);
+bool cdns_is_host(struct cdns *cdns);
+bool cdns_is_device(struct cdns *cdns);
+int cdns_get_id(struct cdns *cdns);
+int cdns_get_vbus(struct cdns *cdns);
+void cdns_clear_vbus(struct cdns *cdns);
+void cdns_set_vbus(struct cdns *cdns);
+int cdns_drd_init(struct cdns *cdns);
+int cdns_drd_exit(struct cdns *cdns);
+int cdns_drd_update_mode(struct cdns *cdns);
+int cdns_drd_gadget_on(struct cdns *cdns);
+void cdns_drd_gadget_off(struct cdns *cdns);
+int cdns_drd_host_on(struct cdns *cdns);
+void cdns_drd_host_off(struct cdns *cdns);
 
 #endif /* __LINUX_CDNS3_DRD */
index 702c5a267a927f1c81c1a72b6f22e44edf67a3a5..c37b6269b001019054c4c4ebcfe31340e2586bc3 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Cadence USBSS DRD Driver - Gadget Export APIs.
+ * Cadence USBSS and USBSSP DRD Driver - Gadget Export APIs.
  *
  * Copyright (C) 2017 NXP
  * Copyright (C) 2017-2018 NXP
 #ifndef __LINUX_CDNS3_GADGET_EXPORT
 #define __LINUX_CDNS3_GADGET_EXPORT
 
-#ifdef CONFIG_USB_CDNS3_GADGET
+#if IS_ENABLED(CONFIG_USB_CDNSP_GADGET)
 
-int cdns3_gadget_init(struct cdns3 *cdns);
+int cdnsp_gadget_init(struct cdns *cdns);
 #else
 
-static inline int cdns3_gadget_init(struct cdns3 *cdns)
+static inline int cdnsp_gadget_init(struct cdns *cdns)
 {
        return -ENXIO;
 }
 
-#endif
+#endif /* CONFIG_USB_CDNSP_GADGET */
+
+#if IS_ENABLED(CONFIG_USB_CDNS3_GADGET)
+
+int cdns3_gadget_init(struct cdns *cdns);
+#else
+
+static inline int cdns3_gadget_init(struct cdns *cdns)
+{
+       return -ENXIO;
+}
+
+#endif /* CONFIG_USB_CDNS3_GADGET */
 
 #endif /* __LINUX_CDNS3_GADGET_EXPORT */
index 26041718a086cde7ef5ef3f4a396ec6b978e8672..cf92173ecf0083a7c1e6030f843af39e0f2a2b65 100644 (file)
@@ -1,6 +1,6 @@
 /* SPDX-License-Identifier: GPL-2.0 */
 /*
- * Cadence USBSS DRD Driver - Host Export APIs
+ * Cadence USBSS and USBSSP DRD Driver - Host Export APIs
  *
  * Copyright (C) 2017-2018 NXP
  *
@@ -9,25 +9,19 @@
 #ifndef __LINUX_CDNS3_HOST_EXPORT
 #define __LINUX_CDNS3_HOST_EXPORT
 
-struct usb_hcd;
-#ifdef CONFIG_USB_CDNS3_HOST
+#if IS_ENABLED(CONFIG_USB_CDNS_HOST)
 
-int cdns3_host_init(struct cdns3 *cdns);
-int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd);
+int cdns_host_init(struct cdns *cdns);
 
 #else
 
-static inline int cdns3_host_init(struct cdns3 *cdns)
+static inline int cdns_host_init(struct cdns *cdns)
 {
        return -ENXIO;
 }
 
-static inline void cdns3_host_exit(struct cdns3 *cdns) { }
-static inline int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd)
-{
-       return 0;
-}
+static inline void cdns_host_exit(struct cdns *cdns) { }
 
-#endif /* CONFIG_USB_CDNS3_HOST */
+#endif /* USB_CDNS_HOST */
 
 #endif /* __LINUX_CDNS3_HOST_EXPORT */
index ec89f2e5430f1d4a6889eb28497041ae429334d8..84dadfa726aa6a816283b610ff258e0d2b53990a 100644 (file)
@@ -1,6 +1,6 @@
 // SPDX-License-Identifier: GPL-2.0
 /*
- * Cadence USBSS DRD Driver - host side
+ * Cadence USBSS and USBSSP DRD Driver - host side
  *
  * Copyright (C) 2018-2019 Cadence Design Systems.
  * Copyright (C) 2017-2018 NXP
 #define CFG_RXDET_P3_EN                BIT(15)
 #define LPM_2_STB_SWITCH_EN    BIT(25)
 
+static int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd);
+
 static const struct xhci_plat_priv xhci_plat_cdns3_xhci = {
        .quirks = XHCI_SKIP_PHY_INIT | XHCI_AVOID_BEI,
        .suspend_quirk = xhci_cdns3_suspend_quirk,
 };
 
-static int __cdns3_host_init(struct cdns3 *cdns)
+static int __cdns_host_init(struct cdns *cdns)
 {
        struct platform_device *xhci;
        int ret;
        struct usb_hcd *hcd;
 
-       cdns3_drd_host_on(cdns);
+       cdns_drd_host_on(cdns);
 
        xhci = platform_device_alloc("xhci-hcd", PLATFORM_DEVID_AUTO);
        if (!xhci) {
@@ -46,7 +48,7 @@ static int __cdns3_host_init(struct cdns3 *cdns)
        cdns->host_dev = xhci;
 
        ret = platform_device_add_resources(xhci, cdns->xhci_res,
-                                           CDNS3_XHCI_RESOURCES_NUM);
+                                           CDNS_XHCI_RESOURCES_NUM);
        if (ret) {
                dev_err(cdns->dev, "couldn't add resources to xHCI device\n");
                goto err1;
@@ -87,7 +89,7 @@ err1:
        return ret;
 }
 
-int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd)
+static int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        u32 value;
@@ -113,25 +115,25 @@ int xhci_cdns3_suspend_quirk(struct usb_hcd *hcd)
        return 0;
 }
 
-static void cdns3_host_exit(struct cdns3 *cdns)
+static void cdns_host_exit(struct cdns *cdns)
 {
        kfree(cdns->xhci_plat_data);
        platform_device_unregister(cdns->host_dev);
        cdns->host_dev = NULL;
-       cdns3_drd_host_off(cdns);
+       cdns_drd_host_off(cdns);
 }
 
-int cdns3_host_init(struct cdns3 *cdns)
+int cdns_host_init(struct cdns *cdns)
 {
-       struct cdns3_role_driver *rdrv;
+       struct cdns_role_driver *rdrv;
 
        rdrv = devm_kzalloc(cdns->dev, sizeof(*rdrv), GFP_KERNEL);
        if (!rdrv)
                return -ENOMEM;
 
-       rdrv->start     = __cdns3_host_init;
-       rdrv->stop      = cdns3_host_exit;
-       rdrv->state     = CDNS3_ROLE_STATE_INACTIVE;
+       rdrv->start     = __cdns_host_init;
+       rdrv->stop      = cdns_host_exit;
+       rdrv->state     = CDNS_ROLE_STATE_INACTIVE;
        rdrv->name      = "host";
 
        cdns->roles[USB_ROLE_HOST] = rdrv;
index f02958927cbd8061a83db4354604883b8ad433a6..2ca1409792c42f0a4d84a346ce7593882048eeaa 100644 (file)
@@ -848,7 +848,7 @@ static int ucsi_reset_ppm(struct ucsi *ucsi)
                        goto out;
 
                /* If the PPM is still doing something else, reset it again. */
-               if (cci & ~UCSI_CCI_RESET_COMPLETE) {
+               if (cci & ~(UCSI_CCI_RESET_COMPLETE | UCSI_CCI_BUSY)) {
                        ret = ucsi->ops->async_write(ucsi, UCSI_CONTROL,
                                                     &command,
                                                     sizeof(command));
index aa4c122823018b756cdc566c33e20dbf834dfb10..976e8b9033c42be932cbeeeb1fcb6ba2a18748f8 100644 (file)
@@ -117,6 +117,15 @@ source "fs/verity/Kconfig"
 
 source "fs/notify/Kconfig"
 
+config MOUNT_NOTIFICATIONS
+       bool "Mount topology change notifications"
+       select WATCH_QUEUE
+       help
+         This option provides support for getting change notifications on the
+         mount tree topology.  This makes use of the /dev/watch_queue misc
+         device to handle the notification buffer and provides the
+         mount_notify() system call to enable/disable watchpoints.
+
 source "fs/quota/Kconfig"
 
 source "fs/autofs/Kconfig"
index 999d1a23f036c9f96a06e056d333e2e3832cdc37..b6c230914e3114a0d9359b02a180a384378f2e12 100644 (file)
@@ -23,6 +23,7 @@ obj-y +=      no-block.o
 endif
 
 obj-$(CONFIG_PROC_FS) += proc_namespace.o
+obj-$(CONFIG_MOUNT_NOTIFICATIONS) += mount_notify.o
 
 obj-y                          += notify/
 obj-$(CONFIG_EPOLL)            += eventpoll.o
index 52f2198d44c95c513d942af7a50f6807820cd931..f61e275bd79232ac220e51f5a138aac39dbe377e 100644 (file)
@@ -1254,6 +1254,13 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
        if (!test_bit(BTRFS_FS_OPEN, &fs_info->flags))
                return;
 
+       /*
+        * Long running balances can keep us blocked here for eternity, so
+        * simply skip deletion if we're unable to get the mutex.
+        */
+       if (!mutex_trylock(&fs_info->delete_unused_bgs_mutex))
+               return;
+
        spin_lock(&fs_info->unused_bgs_lock);
        while (!list_empty(&fs_info->unused_bgs)) {
                int trimming;
@@ -1273,8 +1280,6 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
 
                btrfs_discard_cancel_work(&fs_info->discard_ctl, block_group);
 
-               mutex_lock(&fs_info->delete_unused_bgs_mutex);
-
                /* Don't want to race with allocators so take the groups_sem */
                down_write(&space_info->groups_sem);
 
@@ -1420,11 +1425,11 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
 end_trans:
                btrfs_end_transaction(trans);
 next:
-               mutex_unlock(&fs_info->delete_unused_bgs_mutex);
                btrfs_put_block_group(block_group);
                spin_lock(&fs_info->unused_bgs_lock);
        }
        spin_unlock(&fs_info->unused_bgs_lock);
+       mutex_unlock(&fs_info->delete_unused_bgs_mutex);
        return;
 
 flip_async:
index 555cbcef6585739b73bd2d5e9c495abddc462cca..d9bf53d9ff907cfe47603dc0bcb81d82f9afde99 100644 (file)
@@ -42,6 +42,15 @@ enum {
         * to an inode.
         */
        BTRFS_INODE_NO_XATTRS,
+       /*
+        * Set when we are in a context where we need to start a transaction and
+        * have dirty pages with the respective file range locked. This is to
+        * ensure that when reserving space for the transaction, if we are low
+        * on available space and need to flush delalloc, we will not flush
+        * delalloc for this inode, because that could result in a deadlock (on
+        * the file range, inode's io_tree).
+        */
+       BTRFS_INODE_NO_DELALLOC_FLUSH,
 };
 
 /* in memory btrfs inode */
index 07810891e20458ab308455dd960fc4373d18b3aa..cc89b63d65a4dfbd0ad0d95f9ebe02df58c281ce 100644 (file)
@@ -2555,8 +2555,14 @@ out:
  * @p:         Holds all btree nodes along the search path
  * @root:      The root node of the tree
  * @key:       The key we are looking for
- * @ins_len:   Indicates purpose of search, for inserts it is 1, for
- *             deletions it's -1. 0 for plain searches
+ * @ins_len:   Indicates purpose of search:
+ *              >0  for inserts it's size of item inserted (*)
+ *              <0  for deletions
+ *               0  for plain searches, not modifying the tree
+ *
+ *              (*) If size of item inserted doesn't include
+ *              sizeof(struct btrfs_item), then p->search_for_extension must
+ *              be set.
  * @cow:       boolean should CoW operations be performed. Must always be 1
  *             when modifying the tree.
  *
@@ -2717,6 +2723,20 @@ cow_done:
 
                if (level == 0) {
                        p->slots[level] = slot;
+                       /*
+                        * Item key already exists. In this case, if we are
+                        * allowed to insert the item (for example, in dir_item
+                        * case, item key collision is allowed), it will be
+                        * merged with the original item. Only the item size
+                        * grows, no new btrfs item will be added. If
+                        * search_for_extension is not set, ins_len already
+                        * accounts the size btrfs_item, deduct it here so leaf
+                        * space check will be correct.
+                        */
+                       if (ret == 0 && ins_len > 0 && !p->search_for_extension) {
+                               ASSERT(ins_len >= sizeof(struct btrfs_item));
+                               ins_len -= sizeof(struct btrfs_item);
+                       }
                        if (ins_len > 0 &&
                            btrfs_leaf_free_space(b) < ins_len) {
                                if (write_lock_level < 1) {
index 1d3c1e479f3dc04c2faa21b50300a23d08b2f43a..2e7a0aab681b936a3df6385f5bc566f7878ab9a2 100644 (file)
@@ -131,6 +131,8 @@ enum {
         * defrag
         */
        BTRFS_FS_STATE_REMOUNTING,
+       /* Filesystem in RO mode */
+       BTRFS_FS_STATE_RO,
        /* Track if a transaction abort has been reported on this filesystem */
        BTRFS_FS_STATE_TRANS_ABORTED,
        /*
@@ -367,6 +369,12 @@ struct btrfs_path {
        unsigned int search_commit_root:1;
        unsigned int need_commit_sem:1;
        unsigned int skip_release_on_error:1;
+       /*
+        * Indicate that new item (btrfs_search_slot) is extending already
+        * existing item and ins_len contains only the data size and not item
+        * header (ie. sizeof(struct btrfs_item) is not included).
+        */
+       unsigned int search_for_extension:1;
 };
 #define BTRFS_MAX_EXTENT_ITEM_SIZE(r) ((BTRFS_LEAF_DATA_SIZE(r->fs_info) >> 4) - \
                                        sizeof(struct btrfs_item))
@@ -786,7 +794,7 @@ struct btrfs_fs_info {
        /* used to keep from writing metadata until there is a nice batch */
        struct percpu_counter dirty_metadata_bytes;
        struct percpu_counter delalloc_bytes;
-       struct percpu_counter dio_bytes;
+       struct percpu_counter ordered_bytes;
        s32 dirty_metadata_batch;
        s32 delalloc_batch;
 
@@ -922,6 +930,7 @@ struct btrfs_fs_info {
        /* Used to reclaim the metadata space in the background. */
        struct work_struct async_reclaim_work;
        struct work_struct async_data_reclaim_work;
+       struct work_struct preempt_reclaim_work;
 
        spinlock_t unused_bgs_lock;
        struct list_head unused_bgs;
@@ -2729,6 +2738,7 @@ enum btrfs_flush_state {
        ALLOC_CHUNK_FORCE       =       8,
        RUN_DELAYED_IPUTS       =       9,
        COMMIT_TRANS            =       10,
+       FORCE_COMMIT_TRANS      =       11,
 };
 
 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
@@ -2885,10 +2895,26 @@ static inline int btrfs_fs_closing(struct btrfs_fs_info *fs_info)
  * If we remount the fs to be R/O or umount the fs, the cleaner needn't do
  * anything except sleeping. This function is used to check the status of
  * the fs.
+ * We check for BTRFS_FS_STATE_RO to avoid races with a concurrent remount,
+ * since setting and checking for SB_RDONLY in the superblock's flags is not
+ * atomic.
  */
 static inline int btrfs_need_cleaner_sleep(struct btrfs_fs_info *fs_info)
 {
-       return fs_info->sb->s_flags & SB_RDONLY || btrfs_fs_closing(fs_info);
+       return test_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state) ||
+               btrfs_fs_closing(fs_info);
+}
+
+static inline void btrfs_set_sb_rdonly(struct super_block *sb)
+{
+       sb->s_flags |= SB_RDONLY;
+       set_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state);
+}
+
+static inline void btrfs_clear_sb_rdonly(struct super_block *sb)
+{
+       sb->s_flags &= ~SB_RDONLY;
+       clear_bit(BTRFS_FS_STATE_RO, &btrfs_sb(sb)->fs_state);
 }
 
 /* tree mod log functions from ctree.c */
@@ -3073,7 +3099,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
                               u32 min_type);
 
 int btrfs_start_delalloc_snapshot(struct btrfs_root *root);
-int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr);
+int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
+                              bool in_reclaim_context);
 int btrfs_set_extent_delalloc(struct btrfs_inode *inode, u64 start, u64 end,
                              unsigned int extra_bits,
                              struct extent_state **cached_state);
index 1c977e6d45dc3157e6f1424d47b5daa5d76cd8c8..6e414785b56fed4c4bb89f6a41c52f40e2996dfd 100644 (file)
@@ -135,6 +135,11 @@ struct btrfs_delayed_data_ref {
        u64 offset;
 };
 
+enum btrfs_delayed_ref_flags {
+       /* Used to indicate that we are flushing delayed refs for the commit. */
+       BTRFS_DELAYED_REFS_FLUSHING,
+};
+
 struct btrfs_delayed_ref_root {
        /* head ref rbtree */
        struct rb_root_cached href_root;
@@ -158,12 +163,7 @@ struct btrfs_delayed_ref_root {
 
        u64 pending_csums;
 
-       /*
-        * set when the tree is flushing before a transaction commit,
-        * used by the throttling code to decide if new updates need
-        * to be run right away
-        */
-       int flushing;
+       unsigned long flags;
 
        u64 run_delayed_start;
 
index a98e33f232d55373563390854077a3ca4601d1a0..324f646d6e5e273b944235abe67721a0e3c607fe 100644 (file)
@@ -715,7 +715,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
         * flush all outstanding I/O and inode extent mappings before the
         * copy operation is declared as being finished
         */
-       ret = btrfs_start_delalloc_roots(fs_info, U64_MAX);
+       ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false);
        if (ret) {
                mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
                return ret;
index 1db966bf85b244fcf96cb67153053d62b149ca4c..2b8383d4114490bcc162f174a95ff1d6b2ef79e1 100644 (file)
@@ -199,16 +199,15 @@ static struct btrfs_block_group *find_next_block_group(
 static struct btrfs_block_group *peek_discard_list(
                                        struct btrfs_discard_ctl *discard_ctl,
                                        enum btrfs_discard_state *discard_state,
-                                       int *discard_index)
+                                       int *discard_index, u64 now)
 {
        struct btrfs_block_group *block_group;
-       const u64 now = ktime_get_ns();
 
        spin_lock(&discard_ctl->lock);
 again:
        block_group = find_next_block_group(discard_ctl, now);
 
-       if (block_group && now > block_group->discard_eligible_time) {
+       if (block_group && now >= block_group->discard_eligible_time) {
                if (block_group->discard_index == BTRFS_DISCARD_INDEX_UNUSED &&
                    block_group->used != 0) {
                        if (btrfs_is_block_group_data_only(block_group))
@@ -222,12 +221,11 @@ again:
                        block_group->discard_state = BTRFS_DISCARD_EXTENTS;
                }
                discard_ctl->block_group = block_group;
+       }
+       if (block_group) {
                *discard_state = block_group->discard_state;
                *discard_index = block_group->discard_index;
-       } else {
-               block_group = NULL;
        }
-
        spin_unlock(&discard_ctl->lock);
 
        return block_group;
@@ -330,28 +328,15 @@ void btrfs_discard_queue_work(struct btrfs_discard_ctl *discard_ctl,
                btrfs_discard_schedule_work(discard_ctl, false);
 }
 
-/**
- * btrfs_discard_schedule_work - responsible for scheduling the discard work
- * @discard_ctl: discard control
- * @override: override the current timer
- *
- * Discards are issued by a delayed workqueue item.  @override is used to
- * update the current delay as the baseline delay interval is reevaluated on
- * transaction commit.  This is also maxed with any other rate limit.
- */
-void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
-                                bool override)
+static void __btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
+                                         u64 now, bool override)
 {
        struct btrfs_block_group *block_group;
-       const u64 now = ktime_get_ns();
-
-       spin_lock(&discard_ctl->lock);
 
        if (!btrfs_run_discard_work(discard_ctl))
-               goto out;
-
+               return;
        if (!override && delayed_work_pending(&discard_ctl->work))
-               goto out;
+               return;
 
        block_group = find_next_block_group(discard_ctl, now);
        if (block_group) {
@@ -393,7 +378,24 @@ void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
                mod_delayed_work(discard_ctl->discard_workers,
                                 &discard_ctl->work, nsecs_to_jiffies(delay));
        }
-out:
+}
+
+/*
+ * btrfs_discard_schedule_work - responsible for scheduling the discard work
+ * @discard_ctl:  discard control
+ * @override:     override the current timer
+ *
+ * Discards are issued by a delayed workqueue item.  @override is used to
+ * update the current delay as the baseline delay interval is reevaluated on
+ * transaction commit.  This is also maxed with any other rate limit.
+ */
+void btrfs_discard_schedule_work(struct btrfs_discard_ctl *discard_ctl,
+                                bool override)
+{
+       const u64 now = ktime_get_ns();
+
+       spin_lock(&discard_ctl->lock);
+       __btrfs_discard_schedule_work(discard_ctl, now, override);
        spin_unlock(&discard_ctl->lock);
 }
 
@@ -438,13 +440,18 @@ static void btrfs_discard_workfn(struct work_struct *work)
        int discard_index = 0;
        u64 trimmed = 0;
        u64 minlen = 0;
+       u64 now = ktime_get_ns();
 
        discard_ctl = container_of(work, struct btrfs_discard_ctl, work.work);
 
        block_group = peek_discard_list(discard_ctl, &discard_state,
-                                       &discard_index);
+                                       &discard_index, now);
        if (!block_group || !btrfs_run_discard_work(discard_ctl))
                return;
+       if (now < block_group->discard_eligible_time) {
+               btrfs_discard_schedule_work(discard_ctl, false);
+               return;
+       }
 
        /* Perform discarding */
        minlen = discard_minlen[discard_index];
@@ -474,13 +481,6 @@ static void btrfs_discard_workfn(struct work_struct *work)
                discard_ctl->discard_extent_bytes += trimmed;
        }
 
-       /*
-        * Updated without locks as this is inside the workfn and nothing else
-        * is reading the values
-        */
-       discard_ctl->prev_discard = trimmed;
-       discard_ctl->prev_discard_time = ktime_get_ns();
-
        /* Determine next steps for a block_group */
        if (block_group->discard_cursor >= btrfs_block_group_end(block_group)) {
                if (discard_state == BTRFS_DISCARD_BITMAPS) {
@@ -496,11 +496,13 @@ static void btrfs_discard_workfn(struct work_struct *work)
                }
        }
 
+       now = ktime_get_ns();
        spin_lock(&discard_ctl->lock);
+       discard_ctl->prev_discard = trimmed;
+       discard_ctl->prev_discard_time = now;
        discard_ctl->block_group = NULL;
+       __btrfs_discard_schedule_work(discard_ctl, now, false);
        spin_unlock(&discard_ctl->lock);
-
-       btrfs_discard_schedule_work(discard_ctl, false);
 }
 
 /**
index 765deefda92b1770229001a2774f47d38341ba6b..b75cc338c9910f2783b49ab0ad2249eea372ab49 100644 (file)
@@ -1470,7 +1470,7 @@ void btrfs_free_fs_info(struct btrfs_fs_info *fs_info)
 {
        percpu_counter_destroy(&fs_info->dirty_metadata_bytes);
        percpu_counter_destroy(&fs_info->delalloc_bytes);
-       percpu_counter_destroy(&fs_info->dio_bytes);
+       percpu_counter_destroy(&fs_info->ordered_bytes);
        percpu_counter_destroy(&fs_info->dev_replace.bio_counter);
        btrfs_free_csum_hash(fs_info);
        btrfs_free_stripe_hash_table(fs_info);
@@ -1729,7 +1729,7 @@ static int cleaner_kthread(void *arg)
                 */
                btrfs_delete_unused_bgs(fs_info);
 sleep:
-               clear_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
+               clear_and_wake_up_bit(BTRFS_FS_CLEANER_RUNNING, &fs_info->flags);
                if (kthread_should_park())
                        kthread_parkme();
                if (kthread_should_stop())
@@ -2804,7 +2804,7 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
        sb->s_blocksize = BTRFS_BDEV_BLOCKSIZE;
        sb->s_blocksize_bits = blksize_bits(BTRFS_BDEV_BLOCKSIZE);
 
-       ret = percpu_counter_init(&fs_info->dio_bytes, 0, GFP_KERNEL);
+       ret = percpu_counter_init(&fs_info->ordered_bytes, 0, GFP_KERNEL);
        if (ret)
                return ret;
 
@@ -2830,6 +2830,9 @@ static int init_mount_fs_info(struct btrfs_fs_info *fs_info, struct super_block
                return -ENOMEM;
        btrfs_init_delayed_root(fs_info->delayed_root);
 
+       if (sb_rdonly(sb))
+               set_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
+
        return btrfs_alloc_stripe_hash_table(fs_info);
 }
 
@@ -2969,6 +2972,7 @@ int btrfs_start_pre_rw_mount(struct btrfs_fs_info *fs_info)
                }
        }
 
+       ret = btrfs_find_orphan_roots(fs_info);
 out:
        return ret;
 }
@@ -3383,10 +3387,6 @@ int __cold open_ctree(struct super_block *sb, struct btrfs_fs_devices *fs_device
                }
        }
 
-       ret = btrfs_find_orphan_roots(fs_info);
-       if (ret)
-               goto fail_qgroup;
-
        fs_info->fs_root = btrfs_get_fs_root(fs_info, BTRFS_FS_TREE_OBJECTID, true);
        if (IS_ERR(fs_info->fs_root)) {
                err = PTR_ERR(fs_info->fs_root);
@@ -4113,6 +4113,7 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
 
        cancel_work_sync(&fs_info->async_reclaim_work);
        cancel_work_sync(&fs_info->async_data_reclaim_work);
+       cancel_work_sync(&fs_info->preempt_reclaim_work);
 
        /* Cancel or finish ongoing discard work */
        btrfs_discard_cleanup(fs_info);
@@ -4165,9 +4166,9 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
                       percpu_counter_sum(&fs_info->delalloc_bytes));
        }
 
-       if (percpu_counter_sum(&fs_info->dio_bytes))
+       if (percpu_counter_sum(&fs_info->ordered_bytes))
                btrfs_info(fs_info, "at unmount dio bytes count %lld",
-                          percpu_counter_sum(&fs_info->dio_bytes));
+                          percpu_counter_sum(&fs_info->ordered_bytes));
 
        btrfs_sysfs_remove_mounted(fs_info);
        btrfs_sysfs_remove_fsid(fs_info->fs_devices);
@@ -4181,6 +4182,9 @@ void __cold close_ctree(struct btrfs_fs_info *fs_info)
        invalidate_inode_pages2(fs_info->btree_inode->i_mapping);
        btrfs_stop_all_workers(fs_info);
 
+       /* We shouldn't have any transaction open at this point */
+       ASSERT(list_empty(&fs_info->trans_list));
+
        clear_bit(BTRFS_FS_OPEN, &fs_info->flags);
        free_root_pointers(fs_info, true);
        btrfs_free_fs_roots(fs_info);
index 56ea380f5a178817ad6627c23e3f612b4c502eac..b6d774803a2c34ae6777b147dde8abf91f1825e5 100644 (file)
@@ -844,6 +844,7 @@ int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
        want = extent_ref_type(parent, owner);
        if (insert) {
                extra_size = btrfs_extent_inline_ref_size(want);
+               path->search_for_extension = 1;
                path->keep_locks = 1;
        } else
                extra_size = -1;
@@ -996,6 +997,7 @@ again:
 out:
        if (insert) {
                path->keep_locks = 0;
+               path->search_for_extension = 0;
                btrfs_unlock_up_safe(path, 1);
        }
        return err;
@@ -2158,7 +2160,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
 
        delayed_refs = &trans->transaction->delayed_refs;
        if (count == 0)
-               count = atomic_read(&delayed_refs->num_entries) * 2;
+               count = delayed_refs->num_heads_ready;
 
 again:
 #ifdef SCRAMBLE_DELAYED_REFS
index 1545c22ef2804ce1f3690febd0565ecf2339d8d1..6ccfc019ad909eb06636d7007b31d1276902b665 100644 (file)
@@ -1016,8 +1016,10 @@ again:
        }
 
        btrfs_release_path(path);
+       path->search_for_extension = 1;
        ret = btrfs_search_slot(trans, root, &file_key, path,
                                csum_size, 1);
+       path->search_for_extension = 0;
        if (ret < 0)
                goto out;
 
index 4d8897879c9cbd98de1032e4e1b6ea0857c3f64a..71d0d14bc18b39305631a081e9c3e1163cde4c82 100644 (file)
@@ -775,8 +775,10 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
        while (num_entries) {
                e = kmem_cache_zalloc(btrfs_free_space_cachep,
                                      GFP_NOFS);
-               if (!e)
+               if (!e) {
+                       ret = -ENOMEM;
                        goto free_cache;
+               }
 
                ret = io_ctl_read_entry(&io_ctl, e, &type);
                if (ret) {
@@ -785,6 +787,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
                }
 
                if (!e->bytes) {
+                       ret = -1;
                        kmem_cache_free(btrfs_free_space_cachep, e);
                        goto free_cache;
                }
@@ -805,6 +808,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
                        e->bitmap = kmem_cache_zalloc(
                                        btrfs_free_space_bitmap_cachep, GFP_NOFS);
                        if (!e->bitmap) {
+                               ret = -ENOMEM;
                                kmem_cache_free(
                                        btrfs_free_space_cachep, e);
                                goto free_cache;
index 8e23780acfaeb4d0a4c5acfbdc6b35b69f76d858..070716650df87e2dc85700209f2b5eb73fc3206a 100644 (file)
@@ -9390,7 +9390,8 @@ static struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode
  * some fairly slow code that needs optimization. This walks the list
  * of all the inodes with pending delalloc and forces them to disk.
  */
-static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot)
+static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot,
+                                bool in_reclaim_context)
 {
        struct btrfs_inode *binode;
        struct inode *inode;
@@ -9411,6 +9412,11 @@ static int start_delalloc_inodes(struct btrfs_root *root, u64 *nr, bool snapshot
 
                list_move_tail(&binode->delalloc_inodes,
                               &root->delalloc_inodes);
+
+               if (in_reclaim_context &&
+                   test_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &binode->runtime_flags))
+                       continue;
+
                inode = igrab(&binode->vfs_inode);
                if (!inode) {
                        cond_resched_lock(&root->delalloc_lock);
@@ -9464,10 +9470,11 @@ int btrfs_start_delalloc_snapshot(struct btrfs_root *root)
        if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state))
                return -EROFS;
 
-       return start_delalloc_inodes(root, &nr, true);
+       return start_delalloc_inodes(root, &nr, true, false);
 }
 
-int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
+int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr,
+                              bool in_reclaim_context)
 {
        struct btrfs_root *root;
        struct list_head splice;
@@ -9490,7 +9497,7 @@ int btrfs_start_delalloc_roots(struct btrfs_fs_info *fs_info, u64 nr)
                               &fs_info->delalloc_roots);
                spin_unlock(&fs_info->delalloc_root_lock);
 
-               ret = start_delalloc_inodes(root, &nr, false);
+               ret = start_delalloc_inodes(root, &nr, false, in_reclaim_context);
                btrfs_put_root(root);
                if (ret < 0)
                        goto out;
index 703212ff50a56e348440b2302dd246d389388c5a..dde49a791f3e236e7ad7279589d4fb94da1917f2 100644 (file)
@@ -4951,7 +4951,7 @@ long btrfs_ioctl(struct file *file, unsigned int
        case BTRFS_IOC_SYNC: {
                int ret;
 
-               ret = btrfs_start_delalloc_roots(fs_info, U64_MAX);
+               ret = btrfs_start_delalloc_roots(fs_info, U64_MAX, false);
                if (ret)
                        return ret;
                ret = btrfs_sync_fs(inode->i_sb, 1);
index 79d366a36223a790fafae0c23c15b8161459597f..120fcc54ad1a42a48e94edb03f5be8b117de964a 100644 (file)
@@ -202,11 +202,11 @@ static int __btrfs_add_ordered_extent(struct btrfs_inode *inode, u64 file_offset
        if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
                set_bit(type, &entry->flags);
 
-       if (dio) {
-               percpu_counter_add_batch(&fs_info->dio_bytes, num_bytes,
-                                        fs_info->delalloc_batch);
+       percpu_counter_add_batch(&fs_info->ordered_bytes, num_bytes,
+                                fs_info->delalloc_batch);
+
+       if (dio)
                set_bit(BTRFS_ORDERED_DIRECT, &entry->flags);
-       }
 
        /* one ref for the tree */
        refcount_set(&entry->refs, 1);
@@ -480,9 +480,8 @@ void btrfs_remove_ordered_extent(struct btrfs_inode *btrfs_inode,
                btrfs_delalloc_release_metadata(btrfs_inode, entry->num_bytes,
                                                false);
 
-       if (test_bit(BTRFS_ORDERED_DIRECT, &entry->flags))
-               percpu_counter_add_batch(&fs_info->dio_bytes, -entry->num_bytes,
-                                        fs_info->delalloc_batch);
+       percpu_counter_add_batch(&fs_info->ordered_bytes, -entry->num_bytes,
+                                fs_info->delalloc_batch);
 
        tree = &btrfs_inode->ordered_tree;
        spin_lock_irq(&tree->lock);
index fe3046007f52abad65bcf437b1d22910574d6ff0..808370ada888992cff2e343eae11457cdd97dee9 100644 (file)
@@ -3190,6 +3190,12 @@ out:
        return ret;
 }
 
+static bool rescan_should_stop(struct btrfs_fs_info *fs_info)
+{
+       return btrfs_fs_closing(fs_info) ||
+               test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state);
+}
+
 static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
 {
        struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
@@ -3198,6 +3204,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
        struct btrfs_trans_handle *trans = NULL;
        int err = -ENOMEM;
        int ret = 0;
+       bool stopped = false;
 
        path = btrfs_alloc_path();
        if (!path)
@@ -3210,7 +3217,7 @@ static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
        path->skip_locking = 1;
 
        err = 0;
-       while (!err && !btrfs_fs_closing(fs_info)) {
+       while (!err && !(stopped = rescan_should_stop(fs_info))) {
                trans = btrfs_start_transaction(fs_info->fs_root, 0);
                if (IS_ERR(trans)) {
                        err = PTR_ERR(trans);
@@ -3253,7 +3260,7 @@ out:
        }
 
        mutex_lock(&fs_info->qgroup_rescan_lock);
-       if (!btrfs_fs_closing(fs_info))
+       if (!stopped)
                fs_info->qgroup_flags &= ~BTRFS_QGROUP_STATUS_FLAG_RESCAN;
        if (trans) {
                ret = update_qgroup_status_item(trans);
@@ -3272,7 +3279,7 @@ out:
 
        btrfs_end_transaction(trans);
 
-       if (btrfs_fs_closing(fs_info)) {
+       if (stopped) {
                btrfs_info(fs_info, "qgroup scan paused");
        } else if (err >= 0) {
                btrfs_info(fs_info, "qgroup scan completed%s",
@@ -3530,16 +3537,6 @@ static int try_flush_qgroup(struct btrfs_root *root)
        int ret;
        bool can_commit = true;
 
-       /*
-        * We don't want to run flush again and again, so if there is a running
-        * one, we won't try to start a new flush, but exit directly.
-        */
-       if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
-               wait_event(root->qgroup_flush_wait,
-                       !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
-               return 0;
-       }
-
        /*
         * If current process holds a transaction, we shouldn't flush, as we
         * assume all space reservation happens before a transaction handle is
@@ -3554,6 +3551,26 @@ static int try_flush_qgroup(struct btrfs_root *root)
            current->journal_info != BTRFS_SEND_TRANS_STUB)
                can_commit = false;
 
+       /*
+        * We don't want to run flush again and again, so if there is a running
+        * one, we won't try to start a new flush, but exit directly.
+        */
+       if (test_and_set_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state)) {
+               /*
+                * We are already holding a transaction, thus we can block other
+                * threads from flushing.  So exit right now. This increases
+                * the chance of EDQUOT for heavy load and near limit cases.
+                * But we can argue that if we're already near limit, EDQUOT is
+                * unavoidable anyway.
+                */
+               if (!can_commit)
+                       return 0;
+
+               wait_event(root->qgroup_flush_wait,
+                       !test_bit(BTRFS_ROOT_QGROUP_FLUSHING, &root->state));
+               return 0;
+       }
+
        ret = btrfs_start_delalloc_snapshot(root);
        if (ret < 0)
                goto out;
index ab80896315beff90cf4f5016d23e91b295b63df5..b03e7891394e36c838f83a7219beb065177f5ae0 100644 (file)
@@ -89,6 +89,19 @@ static int copy_inline_to_page(struct btrfs_inode *inode,
        if (ret)
                goto out_unlock;
 
+       /*
+        * After dirtying the page our caller will need to start a transaction,
+        * and if we are low on metadata free space, that can cause flushing of
+        * delalloc for all inodes in order to get metadata space released.
+        * However we are holding the range locked for the whole duration of
+        * the clone/dedupe operation, so we may deadlock if that happens and no
+        * other task releases enough space. So mark this inode as not being
+        * possible to flush to avoid such deadlock. We will clear that flag
+        * when we finish cloning all extents, since a transaction is started
+        * after finding each extent to clone.
+        */
+       set_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &inode->runtime_flags);
+
        if (comp_type == BTRFS_COMPRESS_NONE) {
                char *map;
 
@@ -549,6 +562,8 @@ process_slot:
 out:
        btrfs_free_path(path);
        kvfree(buf);
+       clear_bit(BTRFS_INODE_NO_DELALLOC_FLUSH, &BTRFS_I(inode)->runtime_flags);
+
        return ret;
 }
 
index d719a2755a40d100043b0ad8c9aeaf460fc13f72..ae97f4dbaff30f9004f915e91d2672f694db74f5 100644 (file)
@@ -236,6 +236,7 @@ struct waiting_dir_move {
         * after this directory is moved, we can try to rmdir the ino rmdir_ino.
         */
        u64 rmdir_ino;
+       u64 rmdir_gen;
        bool orphanized;
 };
 
@@ -316,7 +317,7 @@ static int is_waiting_for_move(struct send_ctx *sctx, u64 ino);
 static struct waiting_dir_move *
 get_waiting_dir_move(struct send_ctx *sctx, u64 ino);
 
-static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino);
+static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen);
 
 static int need_send_hole(struct send_ctx *sctx)
 {
@@ -2299,7 +2300,7 @@ static int get_cur_path(struct send_ctx *sctx, u64 ino, u64 gen,
 
                fs_path_reset(name);
 
-               if (is_waiting_for_rm(sctx, ino)) {
+               if (is_waiting_for_rm(sctx, ino, gen)) {
                        ret = gen_unique_name(sctx, ino, gen, name);
                        if (ret < 0)
                                goto out;
@@ -2858,8 +2859,8 @@ out:
        return ret;
 }
 
-static struct orphan_dir_info *
-add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
+static struct orphan_dir_info *add_orphan_dir_info(struct send_ctx *sctx,
+                                                  u64 dir_ino, u64 dir_gen)
 {
        struct rb_node **p = &sctx->orphan_dirs.rb_node;
        struct rb_node *parent = NULL;
@@ -2868,20 +2869,23 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
        while (*p) {
                parent = *p;
                entry = rb_entry(parent, struct orphan_dir_info, node);
-               if (dir_ino < entry->ino) {
+               if (dir_ino < entry->ino)
                        p = &(*p)->rb_left;
-               } else if (dir_ino > entry->ino) {
+               else if (dir_ino > entry->ino)
                        p = &(*p)->rb_right;
-               } else {
+               else if (dir_gen < entry->gen)
+                       p = &(*p)->rb_left;
+               else if (dir_gen > entry->gen)
+                       p = &(*p)->rb_right;
+               else
                        return entry;
-               }
        }
 
        odi = kmalloc(sizeof(*odi), GFP_KERNEL);
        if (!odi)
                return ERR_PTR(-ENOMEM);
        odi->ino = dir_ino;
-       odi->gen = 0;
+       odi->gen = dir_gen;
        odi->last_dir_index_offset = 0;
 
        rb_link_node(&odi->node, parent, p);
@@ -2889,8 +2893,8 @@ add_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
        return odi;
 }
 
-static struct orphan_dir_info *
-get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
+static struct orphan_dir_info *get_orphan_dir_info(struct send_ctx *sctx,
+                                                  u64 dir_ino, u64 gen)
 {
        struct rb_node *n = sctx->orphan_dirs.rb_node;
        struct orphan_dir_info *entry;
@@ -2901,15 +2905,19 @@ get_orphan_dir_info(struct send_ctx *sctx, u64 dir_ino)
                        n = n->rb_left;
                else if (dir_ino > entry->ino)
                        n = n->rb_right;
+               else if (gen < entry->gen)
+                       n = n->rb_left;
+               else if (gen > entry->gen)
+                       n = n->rb_right;
                else
                        return entry;
        }
        return NULL;
 }
 
-static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino)
+static int is_waiting_for_rm(struct send_ctx *sctx, u64 dir_ino, u64 gen)
 {
-       struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino);
+       struct orphan_dir_info *odi = get_orphan_dir_info(sctx, dir_ino, gen);
 
        return odi != NULL;
 }
@@ -2954,7 +2962,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
        key.type = BTRFS_DIR_INDEX_KEY;
        key.offset = 0;
 
-       odi = get_orphan_dir_info(sctx, dir);
+       odi = get_orphan_dir_info(sctx, dir, dir_gen);
        if (odi)
                key.offset = odi->last_dir_index_offset;
 
@@ -2985,7 +2993,7 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
 
                dm = get_waiting_dir_move(sctx, loc.objectid);
                if (dm) {
-                       odi = add_orphan_dir_info(sctx, dir);
+                       odi = add_orphan_dir_info(sctx, dir, dir_gen);
                        if (IS_ERR(odi)) {
                                ret = PTR_ERR(odi);
                                goto out;
@@ -2993,12 +3001,13 @@ static int can_rmdir(struct send_ctx *sctx, u64 dir, u64 dir_gen,
                        odi->gen = dir_gen;
                        odi->last_dir_index_offset = found_key.offset;
                        dm->rmdir_ino = dir;
+                       dm->rmdir_gen = dir_gen;
                        ret = 0;
                        goto out;
                }
 
                if (loc.objectid > send_progress) {
-                       odi = add_orphan_dir_info(sctx, dir);
+                       odi = add_orphan_dir_info(sctx, dir, dir_gen);
                        if (IS_ERR(odi)) {
                                ret = PTR_ERR(odi);
                                goto out;
@@ -3038,6 +3047,7 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino, bool orphanized)
                return -ENOMEM;
        dm->ino = ino;
        dm->rmdir_ino = 0;
+       dm->rmdir_gen = 0;
        dm->orphanized = orphanized;
 
        while (*p) {
@@ -3183,7 +3193,7 @@ static int path_loop(struct send_ctx *sctx, struct fs_path *name,
        while (ino != BTRFS_FIRST_FREE_OBJECTID) {
                fs_path_reset(name);
 
-               if (is_waiting_for_rm(sctx, ino))
+               if (is_waiting_for_rm(sctx, ino, gen))
                        break;
                if (is_waiting_for_move(sctx, ino)) {
                        if (*ancestor_ino == 0)
@@ -3223,6 +3233,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
        u64 parent_ino, parent_gen;
        struct waiting_dir_move *dm = NULL;
        u64 rmdir_ino = 0;
+       u64 rmdir_gen;
        u64 ancestor;
        bool is_orphan;
        int ret;
@@ -3237,6 +3248,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
        dm = get_waiting_dir_move(sctx, pm->ino);
        ASSERT(dm);
        rmdir_ino = dm->rmdir_ino;
+       rmdir_gen = dm->rmdir_gen;
        is_orphan = dm->orphanized;
        free_waiting_dir_move(sctx, dm);
 
@@ -3273,6 +3285,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
                        dm = get_waiting_dir_move(sctx, pm->ino);
                        ASSERT(dm);
                        dm->rmdir_ino = rmdir_ino;
+                       dm->rmdir_gen = rmdir_gen;
                }
                goto out;
        }
@@ -3291,7 +3304,7 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
                struct orphan_dir_info *odi;
                u64 gen;
 
-               odi = get_orphan_dir_info(sctx, rmdir_ino);
+               odi = get_orphan_dir_info(sctx, rmdir_ino, rmdir_gen);
                if (!odi) {
                        /* already deleted */
                        goto finish;
index 64099565ab8f5d2d038a6ff84b76501fde56e686..80f3edd6a3919a47eb2b4f075c2710cbfe9df9ac 100644 (file)
@@ -206,6 +206,7 @@ static int create_space_info(struct btrfs_fs_info *info, u64 flags)
        INIT_LIST_HEAD(&space_info->ro_bgs);
        INIT_LIST_HEAD(&space_info->tickets);
        INIT_LIST_HEAD(&space_info->priority_tickets);
+       space_info->clamp = 1;
 
        ret = btrfs_sysfs_add_space_info_type(info, space_info);
        if (ret)
@@ -489,7 +490,7 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
 {
        struct btrfs_trans_handle *trans;
        u64 delalloc_bytes;
-       u64 dio_bytes;
+       u64 ordered_bytes;
        u64 items;
        long time_left;
        int loops;
@@ -513,26 +514,21 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
 
        delalloc_bytes = percpu_counter_sum_positive(
                                                &fs_info->delalloc_bytes);
-       dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
-       if (delalloc_bytes == 0 && dio_bytes == 0) {
-               if (trans)
-                       return;
-               if (wait_ordered)
-                       btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
+       ordered_bytes = percpu_counter_sum_positive(&fs_info->ordered_bytes);
+       if (delalloc_bytes == 0 && ordered_bytes == 0)
                return;
-       }
 
        /*
         * If we are doing more ordered than delalloc we need to just wait on
         * ordered extents, otherwise we'll waste time trying to flush delalloc
         * that likely won't give us the space back we need.
         */
-       if (dio_bytes > delalloc_bytes)
+       if (ordered_bytes > delalloc_bytes)
                wait_ordered = true;
 
        loops = 0;
-       while ((delalloc_bytes || dio_bytes) && loops < 3) {
-               btrfs_start_delalloc_roots(fs_info, items);
+       while ((delalloc_bytes || ordered_bytes) && loops < 3) {
+               btrfs_start_delalloc_roots(fs_info, items, true);
 
                loops++;
                if (wait_ordered && !trans) {
@@ -553,7 +549,8 @@ static void shrink_delalloc(struct btrfs_fs_info *fs_info,
 
                delalloc_bytes = percpu_counter_sum_positive(
                                                &fs_info->delalloc_bytes);
-               dio_bytes = percpu_counter_sum_positive(&fs_info->dio_bytes);
+               ordered_bytes = percpu_counter_sum_positive(
+                                               &fs_info->ordered_bytes);
        }
 }
 
@@ -667,7 +664,7 @@ enospc:
  */
 static void flush_space(struct btrfs_fs_info *fs_info,
                       struct btrfs_space_info *space_info, u64 num_bytes,
-                      int state)
+                      enum btrfs_flush_state state, bool for_preempt)
 {
        struct btrfs_root *root = fs_info->extent_root;
        struct btrfs_trans_handle *trans;
@@ -736,13 +733,21 @@ static void flush_space(struct btrfs_fs_info *fs_info,
        case COMMIT_TRANS:
                ret = may_commit_transaction(fs_info, space_info);
                break;
+       case FORCE_COMMIT_TRANS:
+               trans = btrfs_join_transaction(root);
+               if (IS_ERR(trans)) {
+                       ret = PTR_ERR(trans);
+                       break;
+               }
+               ret = btrfs_commit_transaction(trans);
+               break;
        default:
                ret = -ENOSPC;
                break;
        }
 
        trace_btrfs_flush_space(fs_info, space_info->flags, num_bytes, state,
-                               ret);
+                               ret, for_preempt);
        return;
 }
 
@@ -752,7 +757,6 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
 {
        u64 used;
        u64 avail;
-       u64 expected;
        u64 to_reclaim = space_info->reclaim_size;
 
        lockdep_assert_held(&space_info->lock);
@@ -770,44 +774,74 @@ btrfs_calc_reclaim_metadata_size(struct btrfs_fs_info *fs_info,
        if (space_info->total_bytes + avail < used)
                to_reclaim += used - (space_info->total_bytes + avail);
 
-       if (to_reclaim)
-               return to_reclaim;
-
-       to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
-       if (btrfs_can_overcommit(fs_info, space_info, to_reclaim,
-                                BTRFS_RESERVE_FLUSH_ALL))
-               return 0;
-
-       used = btrfs_space_info_used(space_info, true);
-
-       if (btrfs_can_overcommit(fs_info, space_info, SZ_1M,
-                                BTRFS_RESERVE_FLUSH_ALL))
-               expected = div_factor_fine(space_info->total_bytes, 95);
-       else
-               expected = div_factor_fine(space_info->total_bytes, 90);
-
-       if (used > expected)
-               to_reclaim = used - expected;
-       else
-               to_reclaim = 0;
-       to_reclaim = min(to_reclaim, space_info->bytes_may_use +
-                                    space_info->bytes_reserved);
        return to_reclaim;
 }
 
-static inline int need_do_async_reclaim(struct btrfs_fs_info *fs_info,
-                                       struct btrfs_space_info *space_info,
-                                       u64 used)
+static inline bool need_preemptive_reclaim(struct btrfs_fs_info *fs_info,
+                                          struct btrfs_space_info *space_info)
 {
+       u64 ordered, delalloc;
        u64 thresh = div_factor_fine(space_info->total_bytes, 98);
+       u64 used;
 
        /* If we're just plain full then async reclaim just slows us down. */
        if ((space_info->bytes_used + space_info->bytes_reserved) >= thresh)
                return 0;
 
-       if (!btrfs_calc_reclaim_metadata_size(fs_info, space_info))
+       /*
+        * We have tickets queued, bail so we don't compete with the async
+        * flushers.
+        */
+       if (space_info->reclaim_size)
                return 0;
 
+       /*
+        * If we have over half of the free space occupied by reservations or
+        * pinned then we want to start flushing.
+        *
+        * We do not do the traditional thing here, which is to say
+        *
+        *   if (used >= ((total_bytes + avail) >> 1))
+        *     return 1;
+        *
+        * because this doesn't quite work how we want.  If we had more than 50%
+        * of the space_info used by bytes_used and we had 0 available we'd just
+        * constantly run the background flusher.  Instead we want it to kick in
+        * if our reclaimable space exceeds our clamped free space.
+        */
+       thresh = calc_available_free_space(fs_info, space_info,
+                                          BTRFS_RESERVE_FLUSH_ALL);
+       thresh += (space_info->total_bytes - space_info->bytes_used -
+                  space_info->bytes_reserved - space_info->bytes_readonly);
+       thresh >>= space_info->clamp;
+
+       used = space_info->bytes_pinned;
+
+       /*
+        * If we have more ordered bytes than delalloc bytes then we're either
+        * doing a lot of DIO, or we simply don't have a lot of delalloc waiting
+        * around.  Preemptive flushing is only useful in that it can free up
+        * space before tickets need to wait for things to finish.  In the case
+        * of ordered extents, preemptively waiting on ordered extents gets us
+        * nothing, if our reservations are tied up in ordered extents we'll
+        * simply have to slow down writers by forcing them to wait on ordered
+        * extents.
+        *
+        * In the case that ordered is larger than delalloc, only include the
+        * block reserves that we would actually be able to directly reclaim
+        * from.  In this case if we're heavy on metadata operations this will
+        * clearly be heavy enough to warrant preemptive flushing.  In the case
+        * of heavy DIO or ordered reservations, preemptive flushing will just
+        * waste time and cause us to slow down.
+        */
+       ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
+       delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
+       if (ordered >= delalloc)
+               used += fs_info->delayed_refs_rsv.reserved +
+                       fs_info->delayed_block_rsv.reserved;
+       else
+               used += space_info->bytes_may_use;
+
        return (used >= thresh && !btrfs_fs_closing(fs_info) &&
                !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
 }
@@ -920,7 +954,7 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
        struct btrfs_fs_info *fs_info;
        struct btrfs_space_info *space_info;
        u64 to_reclaim;
-       int flush_state;
+       enum btrfs_flush_state flush_state;
        int commit_cycles = 0;
        u64 last_tickets_id;
 
@@ -939,7 +973,8 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
 
        flush_state = FLUSH_DELAYED_ITEMS_NR;
        do {
-               flush_space(fs_info, space_info, to_reclaim, flush_state);
+               flush_space(fs_info, space_info, to_reclaim, flush_state,
+                           false);
                spin_lock(&space_info->lock);
                if (list_empty(&space_info->tickets)) {
                        space_info->flush = 0;
@@ -987,6 +1022,106 @@ static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
        } while (flush_state <= COMMIT_TRANS);
 }
 
+/*
+ * This handles pre-flushing of metadata space before we get to the point that
+ * we need to start blocking people on tickets.  The logic here is different
+ * from the other flush paths because it doesn't rely on tickets to tell us how
+ * much we need to flush, instead it attempts to keep us below the 80% full
+ * watermark of space by flushing whichever reservation pool is currently the
+ * largest.
+ */
+static void btrfs_preempt_reclaim_metadata_space(struct work_struct *work)
+{
+       struct btrfs_fs_info *fs_info;
+       struct btrfs_space_info *space_info;
+       struct btrfs_block_rsv *delayed_block_rsv;
+       struct btrfs_block_rsv *delayed_refs_rsv;
+       struct btrfs_block_rsv *global_rsv;
+       struct btrfs_block_rsv *trans_rsv;
+       int loops = 0;
+
+       fs_info = container_of(work, struct btrfs_fs_info,
+                              preempt_reclaim_work);
+       space_info = btrfs_find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
+       delayed_block_rsv = &fs_info->delayed_block_rsv;
+       delayed_refs_rsv = &fs_info->delayed_refs_rsv;
+       global_rsv = &fs_info->global_block_rsv;
+       trans_rsv = &fs_info->trans_block_rsv;
+
+       spin_lock(&space_info->lock);
+       while (need_preemptive_reclaim(fs_info, space_info)) {
+               enum btrfs_flush_state flush;
+               u64 delalloc_size = 0;
+               u64 to_reclaim, block_rsv_size;
+               u64 global_rsv_size = global_rsv->reserved;
+
+               loops++;
+
+               /*
+                * We don't have a precise counter for the metadata being
+                * reserved for delalloc, so we'll approximate it by subtracting
+                * out the block rsv's space from the bytes_may_use.  If that
+                * amount is higher than the individual reserves, then we can
+                * assume it's tied up in delalloc reservations.
+                */
+               block_rsv_size = global_rsv_size +
+                       delayed_block_rsv->reserved +
+                       delayed_refs_rsv->reserved +
+                       trans_rsv->reserved;
+               if (block_rsv_size < space_info->bytes_may_use)
+                       delalloc_size = space_info->bytes_may_use -
+                               block_rsv_size;
+               spin_unlock(&space_info->lock);
+
+               /*
+                * We don't want to include the global_rsv in our calculation,
+                * because that's space we can't touch.  Subtract it from the
+                * block_rsv_size for the next checks.
+                */
+               block_rsv_size -= global_rsv_size;
+
+               /*
+                * We really want to avoid flushing delalloc too much, as it
+                * could result in poor allocation patterns, so only flush it if
+                * it's larger than the rest of the pools combined.
+                */
+               if (delalloc_size > block_rsv_size) {
+                       to_reclaim = delalloc_size;
+                       flush = FLUSH_DELALLOC;
+               } else if (space_info->bytes_pinned >
+                          (delayed_block_rsv->reserved +
+                           delayed_refs_rsv->reserved)) {
+                       to_reclaim = space_info->bytes_pinned;
+                       flush = FORCE_COMMIT_TRANS;
+               } else if (delayed_block_rsv->reserved >
+                          delayed_refs_rsv->reserved) {
+                       to_reclaim = delayed_block_rsv->reserved;
+                       flush = FLUSH_DELAYED_ITEMS_NR;
+               } else {
+                       to_reclaim = delayed_refs_rsv->reserved;
+                       flush = FLUSH_DELAYED_REFS_NR;
+               }
+
+               /*
+                * We don't want to reclaim everything, just a portion, so scale
+                * down the to_reclaim by 1/4.  If it takes us down to 0,
+                * reclaim 1 items worth.
+                */
+               to_reclaim >>= 2;
+               if (!to_reclaim)
+                       to_reclaim = btrfs_calc_insert_metadata_size(fs_info, 1);
+               flush_space(fs_info, space_info, to_reclaim, flush, true);
+               cond_resched();
+               spin_lock(&space_info->lock);
+       }
+
+       /* We only went through once, back off our clamping. */
+       if (loops == 1 && !space_info->reclaim_size)
+               space_info->clamp = max(1, space_info->clamp - 1);
+       trace_btrfs_done_preemptive_reclaim(fs_info, space_info);
+       spin_unlock(&space_info->lock);
+}
+
 /*
  * FLUSH_DELALLOC_WAIT:
  *   Space is freed from flushing delalloc in one of two ways.
@@ -1067,7 +1202,8 @@ static void btrfs_async_reclaim_data_space(struct work_struct *work)
        spin_unlock(&space_info->lock);
 
        while (!space_info->full) {
-               flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE);
+               flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE,
+                           false);
                spin_lock(&space_info->lock);
                if (list_empty(&space_info->tickets)) {
                        space_info->flush = 0;
@@ -1080,7 +1216,7 @@ static void btrfs_async_reclaim_data_space(struct work_struct *work)
 
        while (flush_state < ARRAY_SIZE(data_flush_states)) {
                flush_space(fs_info, space_info, U64_MAX,
-                           data_flush_states[flush_state]);
+                           data_flush_states[flush_state], false);
                spin_lock(&space_info->lock);
                if (list_empty(&space_info->tickets)) {
                        space_info->flush = 0;
@@ -1113,6 +1249,8 @@ void btrfs_init_async_reclaim_work(struct btrfs_fs_info *fs_info)
 {
        INIT_WORK(&fs_info->async_reclaim_work, btrfs_async_reclaim_metadata_space);
        INIT_WORK(&fs_info->async_data_reclaim_work, btrfs_async_reclaim_data_space);
+       INIT_WORK(&fs_info->preempt_reclaim_work,
+                 btrfs_preempt_reclaim_metadata_space);
 }
 
 static const enum btrfs_flush_state priority_flush_states[] = {
@@ -1151,7 +1289,8 @@ static void priority_reclaim_metadata_space(struct btrfs_fs_info *fs_info,
 
        flush_state = 0;
        do {
-               flush_space(fs_info, space_info, to_reclaim, states[flush_state]);
+               flush_space(fs_info, space_info, to_reclaim, states[flush_state],
+                           false);
                flush_state++;
                spin_lock(&space_info->lock);
                if (ticket->bytes == 0) {
@@ -1167,7 +1306,8 @@ static void priority_reclaim_data_space(struct btrfs_fs_info *fs_info,
                                        struct reserve_ticket *ticket)
 {
        while (!space_info->full) {
-               flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE);
+               flush_space(fs_info, space_info, U64_MAX, ALLOC_CHUNK_FORCE,
+                           false);
                spin_lock(&space_info->lock);
                if (ticket->bytes == 0) {
                        spin_unlock(&space_info->lock);
@@ -1224,6 +1364,7 @@ static void wait_reserve_ticket(struct btrfs_fs_info *fs_info,
 static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
                                 struct btrfs_space_info *space_info,
                                 struct reserve_ticket *ticket,
+                                u64 start_ns, u64 orig_bytes,
                                 enum btrfs_reserve_flush_enum flush)
 {
        int ret;
@@ -1279,6 +1420,8 @@ static int handle_reserve_ticket(struct btrfs_fs_info *fs_info,
         * space wasn't reserved at all).
         */
        ASSERT(!(ticket->bytes == 0 && ticket->error));
+       trace_btrfs_reserve_ticket(fs_info, space_info->flags, orig_bytes,
+                                  start_ns, flush, ticket->error);
        return ret;
 }
 
@@ -1292,6 +1435,24 @@ static inline bool is_normal_flushing(enum btrfs_reserve_flush_enum flush)
                (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
 }
 
+static inline void maybe_clamp_preempt(struct btrfs_fs_info *fs_info,
+                                      struct btrfs_space_info *space_info)
+{
+       u64 ordered = percpu_counter_sum_positive(&fs_info->ordered_bytes);
+       u64 delalloc = percpu_counter_sum_positive(&fs_info->delalloc_bytes);
+
+       /*
+        * If we're heavy on ordered operations then clamping won't help us.  We
+        * need to clamp specifically to keep up with dirty'ing buffered
+        * writers, because there's not a 1:1 correlation of writing delalloc
+        * and freeing space, like there is with flushing delayed refs or
+        * delayed nodes.  If we're already more ordered than delalloc then
+        * we're keeping up, otherwise we aren't and should probably clamp.
+        */
+       if (ordered < delalloc)
+               space_info->clamp = min(space_info->clamp + 1, 8);
+}
+
 /**
  * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
  * @root - the root we're allocating for
@@ -1312,6 +1473,7 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
 {
        struct work_struct *async_work;
        struct reserve_ticket ticket;
+       u64 start_ns = 0;
        u64 used;
        int ret = 0;
        bool pending_tickets;
@@ -1364,6 +1526,9 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
                space_info->reclaim_size += ticket.bytes;
                init_waitqueue_head(&ticket.wait);
                ticket.steal = (flush == BTRFS_RESERVE_FLUSH_ALL_STEAL);
+               if (trace_btrfs_reserve_ticket_enabled())
+                       start_ns = ktime_get_ns();
+
                if (flush == BTRFS_RESERVE_FLUSH_ALL ||
                    flush == BTRFS_RESERVE_FLUSH_ALL_STEAL ||
                    flush == BTRFS_RESERVE_FLUSH_DATA) {
@@ -1380,6 +1545,14 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
                        list_add_tail(&ticket.list,
                                      &space_info->priority_tickets);
                }
+
+               /*
+                * We were forced to add a reserve ticket, so our preemptive
+                * flushing is unable to keep up.  Clamp down on the threshold
+                * for the preemptive flushing in order to keep up with the
+                * workload.
+                */
+               maybe_clamp_preempt(fs_info, space_info);
        } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
                used += orig_bytes;
                /*
@@ -1388,19 +1561,20 @@ static int __reserve_bytes(struct btrfs_fs_info *fs_info,
                 * the async reclaim as we will panic.
                 */
                if (!test_bit(BTRFS_FS_LOG_RECOVERING, &fs_info->flags) &&
-                   need_do_async_reclaim(fs_info, space_info, used) &&
-                   !work_busy(&fs_info->async_reclaim_work)) {
+                   need_preemptive_reclaim(fs_info, space_info) &&
+                   !work_busy(&fs_info->preempt_reclaim_work)) {
                        trace_btrfs_trigger_flush(fs_info, space_info->flags,
                                                  orig_bytes, flush, "preempt");
                        queue_work(system_unbound_wq,
-                                  &fs_info->async_reclaim_work);
+                                  &fs_info->preempt_reclaim_work);
                }
        }
        spin_unlock(&space_info->lock);
        if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
                return ret;
 
-       return handle_reserve_ticket(fs_info, space_info, &ticket, flush);
+       return handle_reserve_ticket(fs_info, space_info, &ticket, start_ns,
+                                    orig_bytes, flush);
 }
 
 /**
index 5646393b928c9c8123dd5965158ec6782f5f572f..2bd6e06dbbc758c96b982dc1aa70b342e11270cd 100644 (file)
@@ -22,6 +22,9 @@ struct btrfs_space_info {
                                   the space info if we had an ENOSPC in the
                                   allocator. */
 
+       int clamp;              /* Used to scale our threshold for preemptive
+                                  flushing. Power of two. */
+
        unsigned int full:1;    /* indicates that we cannot allocate any more
                                   chunks for this space */
        unsigned int chunk_alloc:1;     /* set if we are allocating a chunk */
index 022f2081008921fb9bab167d972c53360da7e35e..12d7d3be7cd456993405ab86fd16d92aa2a5a306 100644 (file)
@@ -175,7 +175,7 @@ void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function
        btrfs_discard_stop(fs_info);
 
        /* btrfs handle error by forcing the filesystem readonly */
-       sb->s_flags |= SB_RDONLY;
+       btrfs_set_sb_rdonly(sb);
        btrfs_info(fs_info, "forced readonly");
        /*
         * Note that a running device replace operation is not canceled here
@@ -1953,7 +1953,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                /* avoid complains from lockdep et al. */
                up(&fs_info->uuid_tree_rescan_sem);
 
-               sb->s_flags |= SB_RDONLY;
+               btrfs_set_sb_rdonly(sb);
 
                /*
                 * Setting SB_RDONLY will put the cleaner thread to
@@ -1964,10 +1964,42 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                 */
                btrfs_delete_unused_bgs(fs_info);
 
+               /*
+                * The cleaner task could be already running before we set the
+                * flag BTRFS_FS_STATE_RO (and SB_RDONLY in the superblock).
+                * We must make sure that after we finish the remount, i.e. after
+                * we call btrfs_commit_super(), the cleaner can no longer start
+                * a transaction - either because it was dropping a dead root,
+                * running delayed iputs or deleting an unused block group (the
+                * cleaner picked a block group from the list of unused block
+                * groups before we were able to in the previous call to
+                * btrfs_delete_unused_bgs()).
+                */
+               wait_on_bit(&fs_info->flags, BTRFS_FS_CLEANER_RUNNING,
+                           TASK_UNINTERRUPTIBLE);
+
+               /*
+                * We've set the superblock to RO mode, so we might have made
+                * the cleaner task sleep without running all pending delayed
+                * iputs. Go through all the delayed iputs here, so that if an
+                * unmount happens without remounting RW we don't end up at
+                * finishing close_ctree() with a non-empty list of delayed
+                * iputs.
+                */
+               btrfs_run_delayed_iputs(fs_info);
+
                btrfs_dev_replace_suspend_for_unmount(fs_info);
                btrfs_scrub_cancel(fs_info);
                btrfs_pause_balance(fs_info);
 
+               /*
+                * Pause the qgroup rescan worker if it is running. We don't want
+                * it to be still running after we are in RO mode, as after that,
+                * by the time we unmount, it might have left a transaction open,
+                * so we would leak the transaction and/or crash.
+                */
+               btrfs_qgroup_wait_for_completion(fs_info, false);
+
                ret = btrfs_commit_super(fs_info);
                if (ret)
                        goto restore;
@@ -2006,7 +2038,7 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                if (ret)
                        goto restore;
 
-               sb->s_flags &= ~SB_RDONLY;
+               btrfs_clear_sb_rdonly(sb);
 
                set_bit(BTRFS_FS_OPEN, &fs_info->flags);
        }
@@ -2028,6 +2060,8 @@ restore:
        /* We've hit an error - don't reset SB_RDONLY */
        if (sb_rdonly(sb))
                old_flags |= SB_RDONLY;
+       if (!(old_flags & SB_RDONLY))
+               clear_bit(BTRFS_FS_STATE_RO, &fs_info->fs_state);
        sb->s_flags = old_flags;
        fs_info->mount_opt = old_opts;
        fs_info->compress_type = old_compress_type;
index 8ca334d554afb9cef5539d1a94af71d2468ac36c..6bd97bd4cb37114720393f252202844dd9653593 100644 (file)
@@ -55,8 +55,14 @@ struct inode *btrfs_new_test_inode(void)
        struct inode *inode;
 
        inode = new_inode(test_mnt->mnt_sb);
-       if (inode)
-               inode_init_owner(inode, NULL, S_IFREG);
+       if (!inode)
+               return NULL;
+
+       inode->i_mode = S_IFREG;
+       BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
+       BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
+       BTRFS_I(inode)->location.offset = 0;
+       inode_init_owner(inode, NULL, S_IFREG);
 
        return inode;
 }
index 04022069761deb90a2651d9eed9a94469268eb92..c9874b12d337c7e8323584178b0ce55b321fe34e 100644 (file)
@@ -232,11 +232,6 @@ static noinline int test_btrfs_get_extent(u32 sectorsize, u32 nodesize)
                return ret;
        }
 
-       inode->i_mode = S_IFREG;
-       BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
-       BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
-       BTRFS_I(inode)->location.offset = 0;
-
        fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
        if (!fs_info) {
                test_std_err(TEST_ALLOC_FS_INFO);
@@ -835,10 +830,6 @@ static int test_hole_first(u32 sectorsize, u32 nodesize)
                return ret;
        }
 
-       BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
-       BTRFS_I(inode)->location.objectid = BTRFS_FIRST_FREE_OBJECTID;
-       BTRFS_I(inode)->location.offset = 0;
-
        fs_info = btrfs_alloc_dummy_fs_info(nodesize, sectorsize);
        if (!fs_info) {
                test_std_err(TEST_ALLOC_FS_INFO);
index 8e0f7a1029c6c8002b08a781efd17be012330a1c..bc9b3306eb36944681e6014f99ecc6d7273f39b3 100644 (file)
@@ -909,9 +909,9 @@ bool btrfs_should_end_transaction(struct btrfs_trans_handle *trans)
 {
        struct btrfs_transaction *cur_trans = trans->transaction;
 
-       smp_mb();
        if (cur_trans->state >= TRANS_STATE_COMMIT_START ||
-           cur_trans->delayed_refs.flushing)
+           test_bit(BTRFS_DELAYED_REFS_FLUSHING,
+                    &cur_trans->delayed_refs.flags))
                return true;
 
        return should_end_transaction(trans);
@@ -1227,10 +1227,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
        btrfs_tree_unlock(eb);
        free_extent_buffer(eb);
 
-       if (ret)
-               return ret;
-
-       ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
        if (ret)
                return ret;
 
@@ -1248,10 +1244,6 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans)
        if (ret)
                return ret;
 
-       /* run_qgroups might have added some more refs */
-       ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
-       if (ret)
-               return ret;
 again:
        while (!list_empty(&fs_info->dirty_cowonly_roots)) {
                struct btrfs_root *root;
@@ -1266,15 +1258,24 @@ again:
                ret = update_cowonly_root(trans, root);
                if (ret)
                        return ret;
-               ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
-               if (ret)
-                       return ret;
        }
 
+       /* Now flush any delayed refs generated by updating all of the roots. */
+       ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
+       if (ret)
+               return ret;
+
        while (!list_empty(dirty_bgs) || !list_empty(io_bgs)) {
                ret = btrfs_write_dirty_block_groups(trans);
                if (ret)
                        return ret;
+
+               /*
+                * We're writing the dirty block groups, which could generate
+                * delayed refs, which could generate more dirty block groups,
+                * so we want to keep this flushing in this loop to make sure
+                * everything gets run.
+                */
                ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
                if (ret)
                        return ret;
@@ -1319,7 +1320,6 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
        struct btrfs_root *gang[8];
        int i;
        int ret;
-       int err = 0;
 
        spin_lock(&fs_info->fs_roots_radix_lock);
        while (1) {
@@ -1331,6 +1331,8 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
                        break;
                for (i = 0; i < ret; i++) {
                        struct btrfs_root *root = gang[i];
+                       int ret2;
+
                        radix_tree_tag_clear(&fs_info->fs_roots_radix,
                                        (unsigned long)root->root_key.objectid,
                                        BTRFS_ROOT_TRANS_TAG);
@@ -1350,17 +1352,17 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans)
                                                    root->node);
                        }
 
-                       err = btrfs_update_root(trans, fs_info->tree_root,
+                       ret2 = btrfs_update_root(trans, fs_info->tree_root,
                                                &root->root_key,
                                                &root->root_item);
+                       if (ret2)
+                               return ret2;
                        spin_lock(&fs_info->fs_roots_radix_lock);
-                       if (err)
-                               break;
                        btrfs_qgroup_free_meta_all_pertrans(root);
                }
        }
        spin_unlock(&fs_info->fs_roots_radix_lock);
-       return err;
+       return 0;
 }
 
 /*
@@ -1685,12 +1687,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
                goto fail;
        }
 
-       ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
-       if (ret) {
-               btrfs_abort_transaction(trans, ret);
-               goto fail;
-       }
-
        /*
         * Do special qgroup accounting for snapshot, as we do some qgroup
         * snapshot hack to do fast snapshot.
@@ -1738,12 +1734,6 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
                }
        }
 
-       ret = btrfs_run_delayed_refs(trans, (unsigned long)-1);
-       if (ret) {
-               btrfs_abort_transaction(trans, ret);
-               goto fail;
-       }
-
 fail:
        pending->error = ret;
 dir_item_existed:
@@ -2042,32 +2032,25 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans)
        btrfs_trans_release_metadata(trans);
        trans->block_rsv = NULL;
 
-       /* make a pass through all the delayed refs we have so far
-        * any runnings procs may add more while we are here
-        */
-       ret = btrfs_run_delayed_refs(trans, 0);
-       if (ret) {
-               btrfs_end_transaction(trans);
-               return ret;
-       }
-
-       cur_trans = trans->transaction;
-
        /*
-        * set the flushing flag so procs in this transaction have to
-        * start sending their work down.
+        * We only want one transaction commit doing the flushing so we do not
+        * waste a bunch of time on lock contention on the extent root node.
         */
-       cur_trans->delayed_refs.flushing = 1;
-       smp_wmb();
+       if (!test_and_set_bit(BTRFS_DELAYED_REFS_FLUSHING,
+                             &cur_trans->delayed_refs.flags)) {
+               /*
+                * make a pass through all the delayed refs we have so far
+                * any runnings procs may add more while we are here
+                */
+               ret = btrfs_run_delayed_refs(trans, 0);
+               if (ret) {
+                       btrfs_end_transaction(trans);
+                       return ret;
+               }
+       }
 
        btrfs_create_pending_block_groups(trans);
 
-       ret = btrfs_run_delayed_refs(trans, 0);
-       if (ret) {
-               btrfs_end_transaction(trans);
-               return ret;
-       }
-
        if (!test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &cur_trans->flags)) {
                int run_it = 0;
 
index ee086fc56c30edbcb0e4529609e6519178da9409..b62be84833e9a84654b581bdc287e4ffe6d952f8 100644 (file)
@@ -2592,7 +2592,7 @@ int btrfs_init_new_device(struct btrfs_fs_info *fs_info, const char *device_path
        set_blocksize(device->bdev, BTRFS_BDEV_BLOCKSIZE);
 
        if (seeding_dev) {
-               sb->s_flags &= ~SB_RDONLY;
+               btrfs_clear_sb_rdonly(sb);
                ret = btrfs_prepare_sprout(fs_info);
                if (ret) {
                        btrfs_abort_transaction(trans, ret);
@@ -2728,7 +2728,7 @@ error_sysfs:
        mutex_unlock(&fs_info->fs_devices->device_list_mutex);
 error_trans:
        if (seeding_dev)
-               sb->s_flags |= SB_RDONLY;
+               btrfs_set_sb_rdonly(sb);
        if (trans)
                btrfs_end_transaction(trans);
 error_free_zone:
index 1a0a827a7f34578984f6185c9d235d4248bae8a7..be799040a4154ab075a7aeb104309730ce8ddc8e 100644 (file)
@@ -372,20 +372,3 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
        }
        return err;
 }
-
-int __ext4_handle_dirty_super(const char *where, unsigned int line,
-                             handle_t *handle, struct super_block *sb)
-{
-       struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
-       int err = 0;
-
-       ext4_superblock_csum_set(sb);
-       if (ext4_handle_valid(handle)) {
-               err = jbd2_journal_dirty_metadata(handle, bh);
-               if (err)
-                       ext4_journal_abort_handle(where, line, __func__,
-                                                 bh, handle, err);
-       } else
-               mark_buffer_dirty(bh);
-       return err;
-}
index a124c68b0c75e10938578dab8f257c09e70cff86..0d2fa423b7adbea8e4e12eec74552ee71884b861 100644 (file)
@@ -244,9 +244,6 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
                                 handle_t *handle, struct inode *inode,
                                 struct buffer_head *bh);
 
-int __ext4_handle_dirty_super(const char *where, unsigned int line,
-                             handle_t *handle, struct super_block *sb);
-
 #define ext4_journal_get_write_access(handle, bh) \
        __ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh))
 #define ext4_forget(handle, is_metadata, inode, bh, block_nr) \
@@ -257,8 +254,6 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
 #define ext4_handle_dirty_metadata(handle, inode, bh) \
        __ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \
                                     (bh))
-#define ext4_handle_dirty_super(handle, sb) \
-       __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb))
 
 handle_t *__ext4_journal_start_sb(struct super_block *sb, unsigned int line,
                                  int type, int blocks, int rsv_blocks,
index 3ed8c048fb12c547775850ba62d364bc08362c20..349b27f0dda0cba64cc10575bb233b9b52de2bd4 100644 (file)
@@ -809,9 +809,12 @@ static int ext4_sample_last_mounted(struct super_block *sb,
        err = ext4_journal_get_write_access(handle, sbi->s_sbh);
        if (err)
                goto out_journal;
-       strlcpy(sbi->s_es->s_last_mounted, cp,
+       lock_buffer(sbi->s_sbh);
+       strncpy(sbi->s_es->s_last_mounted, cp,
                sizeof(sbi->s_es->s_last_mounted));
-       ext4_handle_dirty_super(handle, sb);
+       ext4_superblock_csum_set(sb);
+       unlock_buffer(sbi->s_sbh);
+       ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
 out_journal:
        ext4_journal_stop(handle);
 out:
index 27946882d4ce45b7b544bce1b28a26eef006cd64..c173c840585618b0be404937154dd8516eccf60a 100644 (file)
@@ -5150,9 +5150,13 @@ static int ext4_do_update_inode(handle_t *handle,
                err = ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh);
                if (err)
                        goto out_brelse;
+               lock_buffer(EXT4_SB(sb)->s_sbh);
                ext4_set_feature_large_file(sb);
+               ext4_superblock_csum_set(sb);
+               unlock_buffer(EXT4_SB(sb)->s_sbh);
                ext4_handle_sync(handle);
-               err = ext4_handle_dirty_super(handle, sb);
+               err = ext4_handle_dirty_metadata(handle, NULL,
+                                                EXT4_SB(sb)->s_sbh);
        }
        ext4_update_inode_fsync_trans(handle, inode, need_datasync);
 out_brelse:
index 524e134324475e4fffd769b30e5b13bc5cb258f9..d9665d2f82db845175e290b70d1ea7cb26f9b938 100644 (file)
@@ -1157,7 +1157,10 @@ resizefs_out:
                        err = ext4_journal_get_write_access(handle, sbi->s_sbh);
                        if (err)
                                goto pwsalt_err_journal;
+                       lock_buffer(sbi->s_sbh);
                        generate_random_uuid(sbi->s_es->s_encrypt_pw_salt);
+                       ext4_superblock_csum_set(sb);
+                       unlock_buffer(sbi->s_sbh);
                        err = ext4_handle_dirty_metadata(handle, NULL,
                                                         sbi->s_sbh);
                pwsalt_err_journal:
index b17a082b7db15298e99813cb22d07fd4d0c2e458..9c2bc58bc737479f1d2906c2e137cb5234842382 100644 (file)
@@ -2976,14 +2976,17 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
            (le32_to_cpu(sbi->s_es->s_inodes_count))) {
                /* Insert this inode at the head of the on-disk orphan list */
                NEXT_ORPHAN(inode) = le32_to_cpu(sbi->s_es->s_last_orphan);
+               lock_buffer(sbi->s_sbh);
                sbi->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
+               ext4_superblock_csum_set(sb);
+               unlock_buffer(sbi->s_sbh);
                dirty = true;
        }
        list_add(&EXT4_I(inode)->i_orphan, &sbi->s_orphan);
        mutex_unlock(&sbi->s_orphan_lock);
 
        if (dirty) {
-               err = ext4_handle_dirty_super(handle, sb);
+               err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
                rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
                if (!err)
                        err = rc;
@@ -3059,9 +3062,12 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
                        mutex_unlock(&sbi->s_orphan_lock);
                        goto out_brelse;
                }
+               lock_buffer(sbi->s_sbh);
                sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
+               ext4_superblock_csum_set(inode->i_sb);
+               unlock_buffer(sbi->s_sbh);
                mutex_unlock(&sbi->s_orphan_lock);
-               err = ext4_handle_dirty_super(handle, inode->i_sb);
+               err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
        } else {
                struct ext4_iloc iloc2;
                struct inode *i_prev =
index 928700d57eb67e5bc01340af2dd8826a6e9c6718..bd0d185654f3357cdc7a5826a6d3afa87c052dff 100644 (file)
@@ -899,8 +899,11 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
        EXT4_SB(sb)->s_gdb_count++;
        ext4_kvfree_array_rcu(o_group_desc);
 
+       lock_buffer(EXT4_SB(sb)->s_sbh);
        le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
-       err = ext4_handle_dirty_super(handle, sb);
+       ext4_superblock_csum_set(sb);
+       unlock_buffer(EXT4_SB(sb)->s_sbh);
+       err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
        if (err)
                ext4_std_error(sb, err);
        return err;
@@ -1384,6 +1387,7 @@ static void ext4_update_super(struct super_block *sb,
        reserved_blocks *= blocks_count;
        do_div(reserved_blocks, 100);
 
+       lock_buffer(sbi->s_sbh);
        ext4_blocks_count_set(es, ext4_blocks_count(es) + blocks_count);
        ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + free_blocks);
        le32_add_cpu(&es->s_inodes_count, EXT4_INODES_PER_GROUP(sb) *
@@ -1421,6 +1425,8 @@ static void ext4_update_super(struct super_block *sb,
         * active. */
        ext4_r_blocks_count_set(es, ext4_r_blocks_count(es) +
                                reserved_blocks);
+       ext4_superblock_csum_set(sb);
+       unlock_buffer(sbi->s_sbh);
 
        /* Update the free space counts */
        percpu_counter_add(&sbi->s_freeclusters_counter,
@@ -1515,7 +1521,7 @@ static int ext4_flex_group_add(struct super_block *sb,
 
        ext4_update_super(sb, flex_gd);
 
-       err = ext4_handle_dirty_super(handle, sb);
+       err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
 
 exit_journal:
        err2 = ext4_journal_stop(handle);
@@ -1717,15 +1723,18 @@ static int ext4_group_extend_no_check(struct super_block *sb,
                goto errout;
        }
 
+       lock_buffer(EXT4_SB(sb)->s_sbh);
        ext4_blocks_count_set(es, o_blocks_count + add);
        ext4_free_blocks_count_set(es, ext4_free_blocks_count(es) + add);
+       ext4_superblock_csum_set(sb);
+       unlock_buffer(EXT4_SB(sb)->s_sbh);
        ext4_debug("freeing blocks %llu through %llu\n", o_blocks_count,
                   o_blocks_count + add);
        /* We add the blocks to the bitmap and set the group need init bit */
        err = ext4_group_add_blocks(handle, sb, o_blocks_count, add);
        if (err)
                goto errout;
-       ext4_handle_dirty_super(handle, sb);
+       ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
        ext4_debug("freed blocks %llu through %llu\n", o_blocks_count,
                   o_blocks_count + add);
 errout:
@@ -1874,12 +1883,15 @@ static int ext4_convert_meta_bg(struct super_block *sb, struct inode *inode)
        if (err)
                goto errout;
 
+       lock_buffer(sbi->s_sbh);
        ext4_clear_feature_resize_inode(sb);
        ext4_set_feature_meta_bg(sb);
        sbi->s_es->s_first_meta_bg =
                cpu_to_le32(num_desc_blocks(sb, sbi->s_groups_count));
+       ext4_superblock_csum_set(sb);
+       unlock_buffer(sbi->s_sbh);
 
-       err = ext4_handle_dirty_super(handle, sb);
+       err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh);
        if (err) {
                ext4_std_error(sb, err);
                goto errout;
index 21121787c874e186ed547177adcffdebbd3891a6..c3ae7c706566d05f0af309587d1bcde0be67a427 100644 (file)
@@ -65,7 +65,8 @@ static struct ratelimit_state ext4_mount_msg_ratelimit;
 static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
                             unsigned long journal_devnum);
 static int ext4_show_options(struct seq_file *seq, struct dentry *root);
-static int ext4_commit_super(struct super_block *sb, int sync);
+static void ext4_update_super(struct super_block *sb);
+static int ext4_commit_super(struct super_block *sb);
 static int ext4_mark_recovery_complete(struct super_block *sb,
                                        struct ext4_super_block *es);
 static int ext4_clear_journal_err(struct super_block *sb,
@@ -586,6 +587,7 @@ static int ext4_errno_to_code(int errno)
        return EXT4_ERR_UNKNOWN;
 }
 
+<<<<<<< HEAD
 static void __save_error_info(struct super_block *sb, int error,
                              __u32 ino, __u64 block,
                              const char *func, unsigned int line)
@@ -595,6 +597,14 @@ static void __save_error_info(struct super_block *sb, int error,
        EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
        if (bdev_read_only(sb->s_bdev))
                return;
+=======
+static void save_error_info(struct super_block *sb, int error,
+                           __u32 ino, __u64 block,
+                           const char *func, unsigned int line)
+{
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
+
+>>>>>>> linux-next/akpm-base
        /* We default to EFSCORRUPTED error... */
        if (error == 0)
                error = EFSCORRUPTED;
@@ -618,6 +628,7 @@ static void __save_error_info(struct super_block *sb, int error,
        spin_unlock(&sbi->s_error_lock);
 }
 
+<<<<<<< HEAD
 static void save_error_info(struct super_block *sb, int error,
                            __u32 ino, __u64 block,
                            const char *func, unsigned int line)
@@ -627,6 +638,8 @@ static void save_error_info(struct super_block *sb, int error,
                ext4_commit_super(sb, 1);
 }
 
+=======
+>>>>>>> linux-next/akpm-base
 /* Deal with the reporting of failure conditions on a filesystem such as
  * inconsistencies detected or read IO failures.
  *
@@ -647,6 +660,7 @@ static void save_error_info(struct super_block *sb, int error,
  * used to deal with unrecoverable failures such as journal IO errors or ENOMEM
  * at a critical moment in log management.
  */
+<<<<<<< HEAD
 static void ext4_handle_error(struct super_block *sb, bool force_ro)
 {
        journal_t *journal = EXT4_SB(sb)->s_journal;
@@ -660,6 +674,42 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro)
        ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
        if (journal)
                jbd2_journal_abort(journal, -EIO);
+=======
+static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
+                             __u32 ino, __u64 block,
+                             const char *func, unsigned int line)
+{
+       journal_t *journal = EXT4_SB(sb)->s_journal;
+       bool continue_fs = !force_ro && test_opt(sb, ERRORS_CONT);
+
+       EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
+       if (test_opt(sb, WARN_ON_ERROR))
+               WARN_ON_ONCE(1);
+
+       if (!continue_fs && !sb_rdonly(sb)) {
+               ext4_set_mount_flag(sb, EXT4_MF_FS_ABORTED);
+               if (journal)
+                       jbd2_journal_abort(journal, -EIO);
+       }
+
+       if (!bdev_read_only(sb->s_bdev)) {
+               save_error_info(sb, error, ino, block, func, line);
+               /*
+                * In case the fs should keep running, we need to writeout
+                * superblock through the journal. Due to lock ordering
+                * constraints, it may not be safe to do it right here so we
+                * defer superblock flushing to a workqueue.
+                */
+               if (continue_fs)
+                       schedule_work(&EXT4_SB(sb)->s_error_work);
+               else
+                       ext4_commit_super(sb);
+       }
+
+       if (sb_rdonly(sb) || continue_fs)
+               return;
+
+>>>>>>> linux-next/akpm-base
        /*
         * We force ERRORS_RO behavior when system is rebooting. Otherwise we
         * could panic during 'reboot -f' as the underlying device got already
@@ -682,8 +732,44 @@ static void flush_stashed_error_work(struct work_struct *work)
 {
        struct ext4_sb_info *sbi = container_of(work, struct ext4_sb_info,
                                                s_error_work);
+<<<<<<< HEAD
 
        ext4_commit_super(sbi->s_sb, 1);
+=======
+       journal_t *journal = sbi->s_journal;
+       handle_t *handle;
+
+       /*
+        * If the journal is still running, we have to write out superblock
+        * through the journal to avoid collisions of other journalled sb
+        * updates.
+        *
+        * We use directly jbd2 functions here to avoid recursing back into
+        * ext4 error handling code during handling of previous errors.
+        */
+       if (!sb_rdonly(sbi->s_sb) && journal) {
+               handle = jbd2_journal_start(journal, 1);
+               if (IS_ERR(handle))
+                       goto write_directly;
+               if (jbd2_journal_get_write_access(handle, sbi->s_sbh)) {
+                       jbd2_journal_stop(handle);
+                       goto write_directly;
+               }
+               ext4_update_super(sbi->s_sb);
+               if (jbd2_journal_dirty_metadata(handle, sbi->s_sbh)) {
+                       jbd2_journal_stop(handle);
+                       goto write_directly;
+               }
+               jbd2_journal_stop(handle);
+               return;
+       }
+write_directly:
+       /*
+        * Write through journal failed. Write sb directly to get error info
+        * out and hope for the best.
+        */
+       ext4_commit_super(sbi->s_sb);
+>>>>>>> linux-next/akpm-base
 }
 
 #define ext4_error_ratelimit(sb)                                       \
@@ -710,8 +796,12 @@ void __ext4_error(struct super_block *sb, const char *function,
                       sb->s_id, function, line, current->comm, &vaf);
                va_end(args);
        }
+<<<<<<< HEAD
        save_error_info(sb, error, 0, block, function, line);
        ext4_handle_error(sb, force_ro);
+=======
+       ext4_handle_error(sb, force_ro, error, 0, block, function, line);
+>>>>>>> linux-next/akpm-base
 }
 
 void __ext4_error_inode(struct inode *inode, const char *function,
@@ -741,9 +831,14 @@ void __ext4_error_inode(struct inode *inode, const char *function,
                               current->comm, &vaf);
                va_end(args);
        }
+<<<<<<< HEAD
        save_error_info(inode->i_sb, error, inode->i_ino, block,
                        function, line);
        ext4_handle_error(inode->i_sb, false);
+=======
+       ext4_handle_error(inode->i_sb, false, error, inode->i_ino, block,
+                         function, line);
+>>>>>>> linux-next/akpm-base
 }
 
 void __ext4_error_file(struct file *file, const char *function,
@@ -780,9 +875,14 @@ void __ext4_error_file(struct file *file, const char *function,
                               current->comm, path, &vaf);
                va_end(args);
        }
+<<<<<<< HEAD
        save_error_info(inode->i_sb, EFSCORRUPTED, inode->i_ino, block,
                        function, line);
        ext4_handle_error(inode->i_sb, false);
+=======
+       ext4_handle_error(inode->i_sb, false, EFSCORRUPTED, inode->i_ino, block,
+                         function, line);
+>>>>>>> linux-next/akpm-base
 }
 
 const char *ext4_decode_error(struct super_block *sb, int errno,
@@ -849,8 +949,12 @@ void __ext4_std_error(struct super_block *sb, const char *function,
                       sb->s_id, function, line, errstr);
        }
 
+<<<<<<< HEAD
        save_error_info(sb, -errno, 0, 0, function, line);
        ext4_handle_error(sb, false);
+=======
+       ext4_handle_error(sb, false, -errno, 0, 0, function, line);
+>>>>>>> linux-next/akpm-base
 }
 
 void __ext4_msg(struct super_block *sb,
@@ -944,6 +1048,7 @@ __acquires(bitlock)
        if (test_opt(sb, ERRORS_CONT)) {
                if (test_opt(sb, WARN_ON_ERROR))
                        WARN_ON_ONCE(1);
+<<<<<<< HEAD
                __save_error_info(sb, EFSCORRUPTED, ino, block, function, line);
                schedule_work(&EXT4_SB(sb)->s_error_work);
                return;
@@ -951,6 +1056,18 @@ __acquires(bitlock)
        ext4_unlock_group(sb, grp);
        save_error_info(sb, EFSCORRUPTED, ino, block, function, line);
        ext4_handle_error(sb, false);
+=======
+               EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
+               if (!bdev_read_only(sb->s_bdev)) {
+                       save_error_info(sb, EFSCORRUPTED, ino, block, function,
+                                       line);
+                       schedule_work(&EXT4_SB(sb)->s_error_work);
+               }
+               return;
+       }
+       ext4_unlock_group(sb, grp);
+       ext4_handle_error(sb, false, EFSCORRUPTED, ino, block, function, line);
+>>>>>>> linux-next/akpm-base
        /*
         * We only get here in the ERRORS_RO case; relocking the group
         * may be dangerous, but nothing bad will happen since the
@@ -1152,7 +1269,7 @@ static void ext4_put_super(struct super_block *sb)
                es->s_state = cpu_to_le16(sbi->s_mount_state);
        }
        if (!sb_rdonly(sb))
-               ext4_commit_super(sb, 1);
+               ext4_commit_super(sb);
 
        rcu_read_lock();
        group_desc = rcu_dereference(sbi->s_group_desc);
@@ -2642,7 +2759,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
        if (sbi->s_journal)
                ext4_set_feature_journal_needs_recovery(sb);
 
-       err = ext4_commit_super(sb, 1);
+       err = ext4_commit_super(sb);
 done:
        if (test_opt(sb, DEBUG))
                printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
@@ -4145,6 +4262,16 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
 
        if (le32_to_cpu(es->s_log_block_size) >
            (EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+<<<<<<< HEAD
+               ext4_msg(sb, KERN_ERR,
+                        "Invalid log block size: %u",
+                        le32_to_cpu(es->s_log_block_size));
+               goto failed_mount;
+       }
+       if (le32_to_cpu(es->s_log_cluster_size) >
+           (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+               ext4_msg(sb, KERN_ERR,
+=======
                ext4_msg(sb, KERN_ERR,
                         "Invalid log block size: %u",
                         le32_to_cpu(es->s_log_block_size));
@@ -4153,6 +4280,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        if (le32_to_cpu(es->s_log_cluster_size) >
            (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
                ext4_msg(sb, KERN_ERR,
+>>>>>>> linux-next/akpm-base
                         "Invalid log cluster size: %u",
                         le32_to_cpu(es->s_log_cluster_size));
                goto failed_mount;
@@ -4868,7 +4996,7 @@ no_journal:
        if (DUMMY_ENCRYPTION_ENABLED(sbi) && !sb_rdonly(sb) &&
            !ext4_has_feature_encrypt(sb)) {
                ext4_set_feature_encrypt(sb);
-               ext4_commit_super(sb, 1);
+               ext4_commit_super(sb);
        }
 
        /*
@@ -5418,7 +5546,7 @@ static int ext4_load_journal(struct super_block *sb,
                es->s_journal_dev = cpu_to_le32(journal_devnum);
 
                /* Make sure we flush the recovery flag to disk. */
-               ext4_commit_super(sb, 1);
+               ext4_commit_super(sb);
        }
 
        return 0;
@@ -5428,16 +5556,23 @@ err_out:
        return err;
 }
 
-static int ext4_commit_super(struct super_block *sb, int sync)
+/* Copy state of EXT4_SB(sb) into buffer for on-disk superblock */
+static void ext4_update_super(struct super_block *sb)
 {
        struct ext4_sb_info *sbi = EXT4_SB(sb);
+<<<<<<< HEAD
        struct ext4_super_block *es = EXT4_SB(sb)->s_es;
        struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
        int error = 0;
 
        if (!sbh || block_device_ejected(sb))
                return error;
+=======
+       struct ext4_super_block *es = sbi->s_es;
+       struct buffer_head *sbh = sbi->s_sbh;
+>>>>>>> linux-next/akpm-base
 
+       lock_buffer(sbh);
        /*
         * If the file system is mounted read-only, don't update the
         * superblock write time.  This avoids updating the superblock
@@ -5451,17 +5586,21 @@ static int ext4_commit_super(struct super_block *sb, int sync)
        if (!(sb->s_flags & SB_RDONLY))
                ext4_update_tstamp(es, s_wtime);
        es->s_kbytes_written =
-               cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
+               cpu_to_le64(sbi->s_kbytes_written +
                    ((part_stat_read(sb->s_bdev, sectors[STAT_WRITE]) -
-                     EXT4_SB(sb)->s_sectors_written_start) >> 1));
-       if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeclusters_counter))
+                     sbi->s_sectors_written_start) >> 1));
+       if (percpu_counter_initialized(&sbi->s_freeclusters_counter))
                ext4_free_blocks_count_set(es,
-                       EXT4_C2B(EXT4_SB(sb), percpu_counter_sum_positive(
-                               &EXT4_SB(sb)->s_freeclusters_counter)));
-       if (percpu_counter_initialized(&EXT4_SB(sb)->s_freeinodes_counter))
+                       EXT4_C2B(sbi, percpu_counter_sum_positive(
+                               &sbi->s_freeclusters_counter)));
+       if (percpu_counter_initialized(&sbi->s_freeinodes_counter))
                es->s_free_inodes_count =
                        cpu_to_le32(percpu_counter_sum_positive(
+<<<<<<< HEAD
                                &EXT4_SB(sb)->s_freeinodes_counter));
+=======
+                               &sbi->s_freeinodes_counter));
+>>>>>>> linux-next/akpm-base
        /* Copy error information to the on-disk superblock */
        spin_lock(&sbi->s_error_lock);
        if (sbi->s_add_error_count > 0) {
@@ -5502,10 +5641,24 @@ static int ext4_commit_super(struct super_block *sb, int sync)
        }
        spin_unlock(&sbi->s_error_lock);
 
+<<<<<<< HEAD
        BUFFER_TRACE(sbh, "marking dirty");
+=======
+>>>>>>> linux-next/akpm-base
        ext4_superblock_csum_set(sb);
-       if (sync)
-               lock_buffer(sbh);
+       unlock_buffer(sbh);
+}
+
+static int ext4_commit_super(struct super_block *sb)
+{
+       struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
+       int error = 0;
+
+       if (!sbh || block_device_ejected(sb))
+               return error;
+
+       ext4_update_super(sb);
+
        if (buffer_write_io_error(sbh) || !buffer_uptodate(sbh)) {
                /*
                 * Oh, dear.  A previous attempt to write the
@@ -5520,17 +5673,15 @@ static int ext4_commit_super(struct super_block *sb, int sync)
                clear_buffer_write_io_error(sbh);
                set_buffer_uptodate(sbh);
        }
+       BUFFER_TRACE(sbh, "marking dirty");
        mark_buffer_dirty(sbh);
-       if (sync) {
-               unlock_buffer(sbh);
-               error = __sync_dirty_buffer(sbh,
-                       REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
-               if (buffer_write_io_error(sbh)) {
-                       ext4_msg(sb, KERN_ERR, "I/O error while writing "
-                              "superblock");
-                       clear_buffer_write_io_error(sbh);
-                       set_buffer_uptodate(sbh);
-               }
+       error = __sync_dirty_buffer(sbh,
+               REQ_SYNC | (test_opt(sb, BARRIER) ? REQ_FUA : 0));
+       if (buffer_write_io_error(sbh)) {
+               ext4_msg(sb, KERN_ERR, "I/O error while writing "
+                      "superblock");
+               clear_buffer_write_io_error(sbh);
+               set_buffer_uptodate(sbh);
        }
        return error;
 }
@@ -5561,7 +5712,7 @@ static int ext4_mark_recovery_complete(struct super_block *sb,
 
        if (ext4_has_feature_journal_needs_recovery(sb) && sb_rdonly(sb)) {
                ext4_clear_feature_journal_needs_recovery(sb);
-               ext4_commit_super(sb, 1);
+               ext4_commit_super(sb);
        }
 out:
        jbd2_journal_unlock_updates(journal);
@@ -5603,7 +5754,7 @@ static int ext4_clear_journal_err(struct super_block *sb,
 
                EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
                es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
-               ext4_commit_super(sb, 1);
+               ext4_commit_super(sb);
 
                jbd2_journal_clear_err(journal);
                jbd2_journal_update_sb_errno(journal);
@@ -5705,7 +5856,7 @@ static int ext4_freeze(struct super_block *sb)
                ext4_clear_feature_journal_needs_recovery(sb);
        }
 
-       error = ext4_commit_super(sb, 1);
+       error = ext4_commit_super(sb);
 out:
        if (journal)
                /* we rely on upper layer to stop further updates */
@@ -5727,7 +5878,7 @@ static int ext4_unfreeze(struct super_block *sb)
                ext4_set_feature_journal_needs_recovery(sb);
        }
 
-       ext4_commit_super(sb, 1);
+       ext4_commit_super(sb);
        return 0;
 }
 
@@ -5987,7 +6138,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
        }
 
        if (sbi->s_journal == NULL && !(old_sb_flags & SB_RDONLY)) {
-               err = ext4_commit_super(sb, 1);
+               err = ext4_commit_super(sb);
                if (err)
                        goto restore_opts;
        }
index 4e3b1f8c2e81eaaf5489ee96f2c63a5d6e8681da..372208500f4e766f0167ce4597f777dafc6891f2 100644 (file)
@@ -792,8 +792,11 @@ static void ext4_xattr_update_super_block(handle_t *handle,
 
        BUFFER_TRACE(EXT4_SB(sb)->s_sbh, "get_write_access");
        if (ext4_journal_get_write_access(handle, EXT4_SB(sb)->s_sbh) == 0) {
+               lock_buffer(EXT4_SB(sb)->s_sbh);
                ext4_set_feature_xattr(sb);
-               ext4_handle_dirty_super(handle, sb);
+               ext4_superblock_csum_set(sb);
+               unlock_buffer(EXT4_SB(sb)->s_sbh);
+               ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh);
        }
 }
 
index 1e899298f7f00988d3400cff3fc9657f98c488ab..b5d702df7111a919a79f903ffe5462845deb4bd3 100644 (file)
                                 * fsck() must be run to repair
                                 */
 #define        FM_EXTENDFS 0x00000008  /* file system extendfs() in progress */
+#define        FM_STATE_MAX 0x0000000f /* max value of s_state */
 
 #endif                         /* _H_JFS_FILSYS */
index 2935d4c776ec75e87980d6603b17181a2ab6f6e4..5d7d7170c03c0241b86c240a6f507d2a2b14b1e9 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/fs.h>
 #include <linux/buffer_head.h>
 #include <linux/blkdev.h>
+#include <linux/log2.h>
 
 #include "jfs_incore.h"
 #include "jfs_filsys.h"
@@ -366,6 +367,15 @@ static int chkSuper(struct super_block *sb)
        sbi->bsize = bsize;
        sbi->l2bsize = le16_to_cpu(j_sb->s_l2bsize);
 
+       /* check some fields for possible corruption */
+       if (sbi->l2bsize != ilog2((u32)bsize) ||
+           j_sb->pad != 0 ||
+           le32_to_cpu(j_sb->s_state) > FM_STATE_MAX) {
+               rc = -EINVAL;
+               jfs_err("jfs_mount: Mount Failure: superblock is corrupt!");
+               goto out;
+       }
+
        /*
         * For now, ignore s_pbsize, l2bfactor.  All I/O going through buffer
         * cache.
index ce6c376e0bc2d9378af90d141d12bf387d9c11b9..84d176c0b37f059936a658297e1f7e7913de299b 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/poll.h>
 #include <linux/ns_common.h>
 #include <linux/fs_pin.h>
+#include <linux/watch_queue.h>
 
 struct mnt_namespace {
        struct ns_common        ns;
@@ -77,6 +78,9 @@ struct mount {
        int mnt_expiry_mark;            /* true if marked for expiry */
        struct hlist_head mnt_pins;
        struct hlist_head mnt_stuck_children;
+#ifdef CONFIG_MOUNT_NOTIFICATIONS
+       struct watch_list *mnt_watchers; /* Watches on dentries within this mount */
+#endif
 } __randomize_layout;
 
 #define MNT_NS_INTERNAL ERR_PTR(-EINVAL) /* distinct from any mnt_namespace */
@@ -158,3 +162,17 @@ static inline bool is_anon_ns(struct mnt_namespace *ns)
 }
 
 extern void mnt_cursor_del(struct mnt_namespace *ns, struct mount *cursor);
+
+#ifdef CONFIG_MOUNT_NOTIFICATIONS
+extern void notify_mount(struct mount *triggered,
+                        struct mount *aux,
+                        enum mount_notification_subtype subtype,
+                        u32 info_flags);
+#else
+static inline void notify_mount(struct mount *triggered,
+                               struct mount *aux,
+                               enum mount_notification_subtype subtype,
+                               u32 info_flags)
+{
+}
+#endif
diff --git a/fs/mount_notify.c b/fs/mount_notify.c
new file mode 100644 (file)
index 0000000..44f570e
--- /dev/null
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Provide mount topology/attribute change notifications.
+ *
+ * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#include <linux/fs.h>
+#include <linux/namei.h>
+#include <linux/syscalls.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+#include "mount.h"
+
+/*
+ * Post mount notifications to all watches going rootwards along the tree.
+ *
+ * Must be called with the mount_lock held.
+ */
+static void post_mount_notification(struct mount *changed,
+                                   struct mount_notification *notify)
+{
+       const struct cred *cred = current_cred();
+       struct path cursor;
+       struct mount *mnt;
+       unsigned seq;
+
+       seq = 0;
+       rcu_read_lock();
+restart:
+       cursor.mnt = &changed->mnt;
+       cursor.dentry = changed->mnt.mnt_root;
+       mnt = real_mount(cursor.mnt);
+       notify->watch.info &= ~NOTIFY_MOUNT_IN_SUBTREE;
+
+       read_seqbegin_or_lock(&rename_lock, &seq);
+       for (;;) {
+               if (mnt->mnt_watchers &&
+                   !hlist_empty(&mnt->mnt_watchers->watchers)) {
+                       if (cursor.dentry->d_flags & DCACHE_MOUNT_WATCH)
+                               post_watch_notification(mnt->mnt_watchers,
+                                                       &notify->watch, cred,
+                                                       (unsigned long)cursor.dentry);
+               } else {
+                       cursor.dentry = mnt->mnt.mnt_root;
+               }
+               notify->watch.info |= NOTIFY_MOUNT_IN_SUBTREE;
+
+               if (cursor.dentry == cursor.mnt->mnt_root ||
+                   IS_ROOT(cursor.dentry)) {
+                       struct mount *parent = READ_ONCE(mnt->mnt_parent);
+
+                       /* Escaped? */
+                       if (cursor.dentry != cursor.mnt->mnt_root)
+                               break;
+
+                       /* Global root? */
+                       if (mnt == parent)
+                               break;
+
+                       cursor.dentry = READ_ONCE(mnt->mnt_mountpoint);
+                       mnt = parent;
+                       cursor.mnt = &mnt->mnt;
+               } else {
+                       cursor.dentry = cursor.dentry->d_parent;
+               }
+       }
+
+       if (need_seqretry(&rename_lock, seq)) {
+               seq = 1;
+               goto restart;
+       }
+
+       done_seqretry(&rename_lock, seq);
+       rcu_read_unlock();
+}
+
+/*
+ * Generate a mount notification.
+ */
+void notify_mount(struct mount *trigger,
+                 struct mount *aux,
+                 enum mount_notification_subtype subtype,
+                 u32 info_flags)
+{
+
+       struct mount_notification n;
+
+       memset(&n, 0, sizeof(n));
+       n.watch.type    = WATCH_TYPE_MOUNT_NOTIFY;
+       n.watch.subtype = subtype;
+       n.watch.info    = info_flags | watch_sizeof(n);
+       n.triggered_on  = trigger->mnt_id;
+
+       switch (subtype) {
+       case NOTIFY_MOUNT_EXPIRY:
+       case NOTIFY_MOUNT_READONLY:
+       case NOTIFY_MOUNT_SETATTR:
+               break;
+
+       case NOTIFY_MOUNT_NEW_MOUNT:
+       case NOTIFY_MOUNT_UNMOUNT:
+       case NOTIFY_MOUNT_MOVE_FROM:
+       case NOTIFY_MOUNT_MOVE_TO:
+               n.auxiliary_mount       = aux->mnt_id;
+               break;
+
+       default:
+               BUG();
+       }
+
+       post_mount_notification(trigger, &n);
+}
+
+static void release_mount_watch(struct watch *watch)
+{
+       struct dentry *dentry = (struct dentry *)(unsigned long)watch->id;
+
+       dput(dentry);
+}
+
+/**
+ * sys_watch_mount - Watch for mount topology/attribute changes
+ * @dfd: Base directory to pathwalk from or fd referring to mount.
+ * @filename: Path to mount to place the watch upon
+ * @at_flags: Pathwalk control flags
+ * @watch_fd: The watch queue to send notifications to.
+ * @watch_id: The watch ID to be placed in the notification (-1 to remove watch)
+ */
+SYSCALL_DEFINE5(watch_mount,
+               int, dfd,
+               const char __user *, filename,
+               unsigned int, at_flags,
+               int, watch_fd,
+               int, watch_id)
+{
+       struct watch_queue *wqueue;
+       struct watch_list *wlist = NULL;
+       struct watch *watch = NULL;
+       struct mount *m;
+       struct path path;
+       unsigned int lookup_flags =
+               LOOKUP_DIRECTORY | LOOKUP_FOLLOW | LOOKUP_AUTOMOUNT;
+       int ret;
+
+       if (watch_id < -1 || watch_id > 0xff)
+               return -EINVAL;
+       if ((at_flags & ~(AT_NO_AUTOMOUNT | AT_EMPTY_PATH)) != 0)
+               return -EINVAL;
+       if (at_flags & AT_NO_AUTOMOUNT)
+               lookup_flags &= ~LOOKUP_AUTOMOUNT;
+       if (at_flags & AT_EMPTY_PATH)
+               lookup_flags |= LOOKUP_EMPTY;
+
+       ret = user_path_at(dfd, filename, lookup_flags, &path);
+       if (ret)
+               return ret;
+
+       ret = inode_permission(path.dentry->d_inode, MAY_EXEC);
+       if (ret)
+               goto err_path;
+
+       wqueue = get_watch_queue(watch_fd);
+       if (IS_ERR(wqueue))
+               goto err_path;
+
+       m = real_mount(path.mnt);
+
+       if (watch_id >= 0) {
+               ret = -ENOMEM;
+               if (!READ_ONCE(m->mnt_watchers)) {
+                       wlist = kzalloc(sizeof(*wlist), GFP_KERNEL);
+                       if (!wlist)
+                               goto err_wqueue;
+                       init_watch_list(wlist, release_mount_watch);
+               }
+
+               watch = kzalloc(sizeof(*watch), GFP_KERNEL);
+               if (!watch)
+                       goto err_wlist;
+
+               init_watch(watch, wqueue);
+               watch->id       = (unsigned long)path.dentry;
+               watch->info_id  = (u32)watch_id << WATCH_INFO_ID__SHIFT;
+
+               ret = security_watch_mount(watch, &path);
+               if (ret < 0)
+                       goto err_watch;
+
+               down_write(&m->mnt.mnt_sb->s_umount);
+               if (!m->mnt_watchers) {
+                       m->mnt_watchers = wlist;
+                       wlist = NULL;
+               }
+
+               ret = add_watch_to_object(watch, m->mnt_watchers);
+               if (ret == 0) {
+                       spin_lock(&path.dentry->d_lock);
+                       path.dentry->d_flags |= DCACHE_MOUNT_WATCH;
+                       spin_unlock(&path.dentry->d_lock);
+                       dget(path.dentry);
+                       watch = NULL;
+               }
+               up_write(&m->mnt.mnt_sb->s_umount);
+       } else {
+               down_write(&m->mnt.mnt_sb->s_umount);
+               ret = remove_watch_from_object(m->mnt_watchers, wqueue,
+                                              (unsigned long)path.dentry,
+                                              false);
+               up_write(&m->mnt.mnt_sb->s_umount);
+       }
+
+err_watch:
+       kfree(watch);
+err_wlist:
+       kfree(wlist);
+err_wqueue:
+       put_watch_queue(wqueue);
+err_path:
+       path_put(&path);
+       return ret;
+}
index d2db7dfe232b3ecd299187ed477ad2fd7a0fd1a5..4c9ea37181d76a2b1ec7af7df093206bf5e4367c 100644 (file)
@@ -498,6 +498,9 @@ static int mnt_make_readonly(struct mount *mnt)
        smp_wmb();
        mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
        unlock_mount_hash();
+       if (ret == 0)
+               notify_mount(mnt, NULL, NOTIFY_MOUNT_READONLY,
+                            NOTIFY_MOUNT_IS_NOW_RO);
        return ret;
 }
 
@@ -506,6 +509,7 @@ static int __mnt_unmake_readonly(struct mount *mnt)
        lock_mount_hash();
        mnt->mnt.mnt_flags &= ~MNT_READONLY;
        unlock_mount_hash();
+       notify_mount(mnt, NULL, NOTIFY_MOUNT_READONLY, 0);
        return 0;
 }
 
@@ -835,6 +839,7 @@ static struct mountpoint *unhash_mnt(struct mount *mnt)
  */
 static void umount_mnt(struct mount *mnt)
 {
+       notify_mount(mnt->mnt_parent, mnt, NOTIFY_MOUNT_UNMOUNT, 0);
        put_mountpoint(unhash_mnt(mnt));
 }
 
@@ -1178,6 +1183,11 @@ static void mntput_no_expire(struct mount *mnt)
        mnt->mnt.mnt_flags |= MNT_DOOMED;
        rcu_read_unlock();
 
+#ifdef CONFIG_MOUNT_NOTIFICATIONS
+       if (mnt->mnt_watchers)
+               remove_watch_list(mnt->mnt_watchers, mnt->mnt_id);
+#endif
+
        list_del(&mnt->mnt_instance);
 
        if (unlikely(!list_empty(&mnt->mnt_mounts))) {
@@ -1506,6 +1516,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
                p = list_first_entry(&tmp_list, struct mount, mnt_list);
                list_del_init(&p->mnt_expire);
                list_del_init(&p->mnt_list);
+
                ns = p->mnt_ns;
                if (ns) {
                        ns->mounts--;
@@ -2140,7 +2151,10 @@ static int attach_recursive_mnt(struct mount *source_mnt,
        }
        if (moving) {
                unhash_mnt(source_mnt);
+               notify_mount(source_mnt->mnt_parent, source_mnt,
+                            NOTIFY_MOUNT_MOVE_FROM, 0);
                attach_mnt(source_mnt, dest_mnt, dest_mp);
+               notify_mount(dest_mnt, source_mnt, NOTIFY_MOUNT_MOVE_TO, 0);
                touch_mnt_namespace(source_mnt->mnt_ns);
        } else {
                if (source_mnt->mnt_ns) {
@@ -2149,6 +2163,11 @@ static int attach_recursive_mnt(struct mount *source_mnt,
                }
                mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt);
                commit_tree(source_mnt);
+               notify_mount(dest_mnt, source_mnt, NOTIFY_MOUNT_NEW_MOUNT,
+                            (source_mnt->mnt.mnt_sb->s_flags & SB_RDONLY ?
+                             NOTIFY_MOUNT_IS_NOW_RO : 0) |
+                            (source_mnt->mnt.mnt_sb->s_flags & SB_SUBMOUNT ?
+                             NOTIFY_MOUNT_IS_SUBMOUNT : 0));
        }
 
        hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) {
@@ -2525,6 +2544,8 @@ static void set_mount_attributes(struct mount *mnt, unsigned int mnt_flags)
        mnt->mnt.mnt_flags = mnt_flags;
        touch_mnt_namespace(mnt->mnt_ns);
        unlock_mount_hash();
+       notify_mount(mnt, NULL, NOTIFY_MOUNT_SETATTR,
+                    (mnt_flags & SB_RDONLY ? NOTIFY_MOUNT_IS_NOW_RO : 0));
 }
 
 static void mnt_warn_timestamp_expiry(struct path *mountpoint, struct vfsmount *mnt)
@@ -2995,6 +3016,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
                        propagate_mount_busy(mnt, 1))
                        continue;
                list_move(&mnt->mnt_expire, &graveyard);
+               notify_mount(mnt, NULL, NOTIFY_MOUNT_EXPIRY, 0);
        }
        while (!list_empty(&graveyard)) {
                mnt = list_first_entry(&graveyard, struct mount, mnt_expire);
index 4727b7f03c5bb24a260465b70788db5e2e1baee6..8d6d2678abade46ac06ba69a8a84700335c424de 100644 (file)
 #include "pnfs.h"
 #include "trace.h"
 
+static bool inter_copy_offload_enable;
+module_param(inter_copy_offload_enable, bool, 0644);
+MODULE_PARM_DESC(inter_copy_offload_enable,
+                "Enable inter server to server copy offload. Default: false");
+
 #ifdef CONFIG_NFSD_V4_SECURITY_LABEL
 #include <linux/security.h>
 
index 45ee6b12ce5b70d327d08bb9a10745d659383a90..eaaa1605b5b5f38a51065a8bcc21ebc76d5a0190 100644 (file)
@@ -147,6 +147,25 @@ svcxdr_dupstr(struct nfsd4_compoundargs *argp, void *buf, u32 len)
        return p;
 }
 
+static void *
+svcxdr_savemem(struct nfsd4_compoundargs *argp, __be32 *p, u32 len)
+{
+       __be32 *tmp;
+
+       /*
+        * The location of the decoded data item is stable,
+        * so @p is OK to use. This is the common case.
+        */
+       if (p != argp->xdr->scratch.iov_base)
+               return p;
+
+       tmp = svcxdr_tmpalloc(argp, len);
+       if (!tmp)
+               return NULL;
+       memcpy(tmp, p, len);
+       return tmp;
+}
+
 /*
  * NFSv4 basic data type decoders
  */
@@ -183,11 +202,10 @@ nfsd4_decode_opaque(struct nfsd4_compoundargs *argp, struct xdr_netobj *o)
        p = xdr_inline_decode(argp->xdr, len);
        if (!p)
                return nfserr_bad_xdr;
-       o->data = svcxdr_tmpalloc(argp, len);
+       o->data = svcxdr_savemem(argp, p, len);
        if (!o->data)
                return nfserr_jukebox;
        o->len = len;
-       memcpy(o->data, p, len);
 
        return nfs_ok;
 }
@@ -205,10 +223,9 @@ nfsd4_decode_component4(struct nfsd4_compoundargs *argp, char **namp, u32 *lenp)
        status = check_filename((char *)p, *lenp);
        if (status)
                return status;
-       *namp = svcxdr_tmpalloc(argp, *lenp);
+       *namp = svcxdr_savemem(argp, p, *lenp);
        if (!*namp)
                return nfserr_jukebox;
-       memcpy(*namp, p, *lenp);
 
        return nfs_ok;
 }
@@ -1200,10 +1217,9 @@ nfsd4_decode_putfh(struct nfsd4_compoundargs *argp, struct nfsd4_putfh *putfh)
        p = xdr_inline_decode(argp->xdr, putfh->pf_fhlen);
        if (!p)
                return nfserr_bad_xdr;
-       putfh->pf_fhval = svcxdr_tmpalloc(argp, putfh->pf_fhlen);
+       putfh->pf_fhval = svcxdr_savemem(argp, p, putfh->pf_fhlen);
        if (!putfh->pf_fhval)
                return nfserr_jukebox;
-       memcpy(putfh->pf_fhval, p, putfh->pf_fhlen);
 
        return nfs_ok;
 }
@@ -1318,24 +1334,20 @@ nfsd4_decode_setclientid(struct nfsd4_compoundargs *argp, struct nfsd4_setclient
        p = xdr_inline_decode(argp->xdr, setclientid->se_callback_netid_len);
        if (!p)
                return nfserr_bad_xdr;
-       setclientid->se_callback_netid_val = svcxdr_tmpalloc(argp,
+       setclientid->se_callback_netid_val = svcxdr_savemem(argp, p,
                                                setclientid->se_callback_netid_len);
        if (!setclientid->se_callback_netid_val)
                return nfserr_jukebox;
-       memcpy(setclientid->se_callback_netid_val, p,
-              setclientid->se_callback_netid_len);
 
        if (xdr_stream_decode_u32(argp->xdr, &setclientid->se_callback_addr_len) < 0)
                return nfserr_bad_xdr;
        p = xdr_inline_decode(argp->xdr, setclientid->se_callback_addr_len);
        if (!p)
                return nfserr_bad_xdr;
-       setclientid->se_callback_addr_val = svcxdr_tmpalloc(argp,
+       setclientid->se_callback_addr_val = svcxdr_savemem(argp, p,
                                                setclientid->se_callback_addr_len);
        if (!setclientid->se_callback_addr_val)
                return nfserr_jukebox;
-       memcpy(setclientid->se_callback_addr_val, p,
-              setclientid->se_callback_addr_len);
        if (xdr_stream_decode_u32(argp->xdr, &setclientid->se_callback_ident) < 0)
                return nfserr_bad_xdr;
 
@@ -1375,10 +1387,9 @@ nfsd4_decode_verify(struct nfsd4_compoundargs *argp, struct nfsd4_verify *verify
        p = xdr_inline_decode(argp->xdr, verify->ve_attrlen);
        if (!p)
                return nfserr_bad_xdr;
-       verify->ve_attrval = svcxdr_tmpalloc(argp, verify->ve_attrlen);
+       verify->ve_attrval = svcxdr_savemem(argp, p, verify->ve_attrlen);
        if (!verify->ve_attrval)
                return nfserr_jukebox;
-       memcpy(verify->ve_attrval, p, verify->ve_attrlen);
 
        return nfs_ok;
 }
@@ -2333,10 +2344,9 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
                p = xdr_inline_decode(argp->xdr, argp->taglen);
                if (!p)
                        return 0;
-               argp->tag = svcxdr_tmpalloc(argp, argp->taglen);
+               argp->tag = svcxdr_savemem(argp, p, argp->taglen);
                if (!argp->tag)
                        return 0;
-               memcpy(argp->tag, p, argp->taglen);
                max_reply += xdr_align_size(argp->taglen);
        }
 
@@ -4756,6 +4766,7 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
                            resp->rqstp->rq_vec, read->rd_vlen, maxcount, eof);
        if (nfserr)
                return nfserr;
+       xdr_truncate_encode(xdr, starting_len + 16 + xdr_align_size(*maxcount));
 
        tmp = htonl(NFS4_CONTENT_DATA);
        write_bytes_to_xdr_buf(xdr->buf, starting_len,      &tmp,   4);
@@ -4763,6 +4774,10 @@ nfsd4_encode_read_plus_data(struct nfsd4_compoundres *resp,
        write_bytes_to_xdr_buf(xdr->buf, starting_len + 4,  &tmp64, 8);
        tmp = htonl(*maxcount);
        write_bytes_to_xdr_buf(xdr->buf, starting_len + 12, &tmp,   4);
+
+       tmp = xdr_zero;
+       write_bytes_to_xdr_buf(xdr->buf, starting_len + 16 + *maxcount, &tmp,
+                              xdr_pad_size(*maxcount));
        return nfs_ok;
 }
 
@@ -4855,14 +4870,15 @@ out:
        if (nfserr && segments == 0)
                xdr_truncate_encode(xdr, starting_len);
        else {
-               tmp = htonl(eof);
-               write_bytes_to_xdr_buf(xdr->buf, starting_len,     &tmp, 4);
-               tmp = htonl(segments);
-               write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
                if (nfserr) {
                        xdr_truncate_encode(xdr, last_segment);
                        nfserr = nfs_ok;
+                       eof = 0;
                }
+               tmp = htonl(eof);
+               write_bytes_to_xdr_buf(xdr->buf, starting_len,     &tmp, 4);
+               tmp = htonl(segments);
+               write_bytes_to_xdr_buf(xdr->buf, starting_len + 4, &tmp, 4);
        }
 
        return nfserr;
index 00384c332f9bb657a41654c99628d59cddb3b3df..f9c9f4c63cc77db4b67700e66cf521caa11a7017 100644 (file)
 
 #define NFSDDBG_FACILITY       NFSDDBG_SVC
 
-bool inter_copy_offload_enable;
-EXPORT_SYMBOL_GPL(inter_copy_offload_enable);
-module_param(inter_copy_offload_enable, bool, 0644);
-MODULE_PARM_DESC(inter_copy_offload_enable,
-                "Enable inter server to server copy offload. Default: false");
-
 extern struct svc_program      nfsd_program;
 static int                     nfsd(void *vrqstp);
 #if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
index a60ff5ce1a375732df4552d3bb1d52b49f9c15bf..c300885ae75ddee7866e519c0d71cebadfa5d6da 100644 (file)
@@ -568,7 +568,6 @@ struct nfsd4_copy {
        struct nfs_fh           c_fh;
        nfs4_stateid            stateid;
 };
-extern bool inter_copy_offload_enable;
 
 struct nfsd4_seek {
        /* request */
index 948c5203ca9c671ee94c1e05dc84034f2fe381ef..47accec68cb0fb89498d9c7174b371fa6f0f5e7e 100644 (file)
@@ -12,7 +12,6 @@
 
 #include <linux/keyctl.h>
 #include <linux/oid_registry.h>
-#include <crypto/akcipher.h>
 
 /*
  * Cryptographic data for the public-key subtype of the asymmetric key type.
index 38afb341c3f2b8d80ec6a0d0fb7354ca798717fd..abfcbe02001a0ba9410da0dc6cd1e31ceeec4171 100644 (file)
@@ -2,7 +2,7 @@
 /*
  * Copyright (C) 2010 IBM Corporation
  * Copyright (C) 2010 Politecnico di Torino, Italy
- *                    TORSEC group -- http://security.polito.it
+ *                    TORSEC group -- https://security.polito.it
  *
  * Authors:
  * Mimi Zohar <zohar@us.ibm.com>
index 47b021952ac753d92ad2137080ce771b2bbe46e6..d705b174d346ac54d344b732a6e6c1c327768c36 100644 (file)
@@ -447,8 +447,8 @@ enum {
        BLK_MQ_REQ_NOWAIT       = (__force blk_mq_req_flags_t)(1 << 0),
        /* allocate from reserved pool */
        BLK_MQ_REQ_RESERVED     = (__force blk_mq_req_flags_t)(1 << 1),
-       /* set RQF_PREEMPT */
-       BLK_MQ_REQ_PREEMPT      = (__force blk_mq_req_flags_t)(1 << 3),
+       /* set RQF_PM */
+       BLK_MQ_REQ_PM           = (__force blk_mq_req_flags_t)(1 << 2),
 };
 
 struct request *blk_mq_alloc_request(struct request_queue *q, unsigned int op,
index 070de09425ada7233f8899eecab0c404b30fb40b..f94ee3089e015ebe2424d899e70a3f824889c299 100644 (file)
@@ -79,9 +79,6 @@ typedef __u32 __bitwise req_flags_t;
 #define RQF_MQ_INFLIGHT                ((__force req_flags_t)(1 << 6))
 /* don't call prep for this one */
 #define RQF_DONTPREP           ((__force req_flags_t)(1 << 7))
-/* set for "ide_preempt" requests and also for requests for which the SCSI
-   "quiesce" state must be ignored. */
-#define RQF_PREEMPT            ((__force req_flags_t)(1 << 8))
 /* vaguely specified driver internal error.  Ignored by the block layer */
 #define RQF_FAILED             ((__force req_flags_t)(1 << 10))
 /* don't warn about errors */
@@ -430,8 +427,7 @@ struct request_queue {
        unsigned long           queue_flags;
        /*
         * Number of contexts that have called blk_set_pm_only(). If this
-        * counter is above zero then only RQF_PM and RQF_PREEMPT requests are
-        * processed.
+        * counter is above zero then only RQF_PM requests are processed.
         */
        atomic_t                pm_only;
 
@@ -696,6 +692,18 @@ static inline bool queue_is_mq(struct request_queue *q)
        return q->mq_ops;
 }
 
+#ifdef CONFIG_PM
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
+{
+       return q->rpm_status;
+}
+#else
+static inline enum rpm_status queue_rpm_status(struct request_queue *q)
+{
+       return RPM_ACTIVE;
+}
+#endif
+
 static inline enum blk_zoned_model
 blk_queue_zoned_model(struct request_queue *q)
 {
index b2a3f4f641a70745d94e6b84acbc2261d8e7a03d..ea5e04e75845c8e24612221dd77d5db062e36e2f 100644 (file)
  */
 #define __used                          __attribute__((__used__))
 
+/*
+ *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-warn_005funused_005fresult-function-attribute
+ * clang: https://clang.llvm.org/docs/AttributeReference.html#nodiscard-warn-unused-result
+ */
+#define __must_check                    __attribute__((__warn_unused_result__))
+
 /*
  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Function-Attributes.html#index-weak-function-attribute
  *   gcc: https://gcc.gnu.org/onlinedocs/gcc/Common-Variable-Attributes.html#index-weak-variable-attribute
index bbaa39e98f9fa916111490aee6ebea7420bbb3ef..e5dd5a4ae94608b9b5d0950a81ba272f0dde8997 100644 (file)
@@ -121,12 +121,6 @@ struct ftrace_likely_data {
        unsigned long                   constant;
 };
 
-#ifdef CONFIG_ENABLE_MUST_CHECK
-#define __must_check           __attribute__((__warn_unused_result__))
-#else
-#define __must_check
-#endif
-
 #if defined(CC_USING_HOTPATCH)
 #define notrace                        __attribute__((hotpatch(0, 0)))
 #elif defined(CC_USING_PATCHABLE_FUNCTION_ENTRY)
index 0042ef362511d431ff74d322373db1abe0c6726a..d3e26dc8149462949ff38c38c67863630907de67 100644 (file)
@@ -185,6 +185,7 @@ enum cpuhp_state {
        CPUHP_AP_PERF_POWERPC_TRACE_IMC_ONLINE,
        CPUHP_AP_PERF_POWERPC_HV_24x7_ONLINE,
        CPUHP_AP_PERF_POWERPC_HV_GPCI_ONLINE,
+       CPUHP_AP_PERF_CSKY_ONLINE,
        CPUHP_AP_WATCHDOG_ONLINE,
        CPUHP_AP_WORKQUEUE_ONLINE,
        CPUHP_AP_RCUTREE_ONLINE,
index d7b369fc15d36bb34d58f213765ccb4228d819db..81033567f25064a91e27c1a76cc3b047bc3843b5 100644 (file)
@@ -220,6 +220,7 @@ struct dentry_operations {
 #define DCACHE_PAR_LOOKUP              0x10000000 /* being looked up (with parent locked shared) */
 #define DCACHE_DENTRY_CURSOR           0x20000000
 #define DCACHE_NORCU                   0x40000000 /* No RCU delay for freeing */
+#define DCACHE_MOUNT_WATCH             0x80000000 /* There's a mount watch here */
 
 extern seqlock_t rename_lock;
 
index 2c300689a51a5ccf11df4ebdb0627b5cf362d83a..6bc6ba57b505f2c69114f5e306584a1248de169d 100644 (file)
@@ -1392,13 +1392,7 @@ int ide_pci_init_two(struct pci_dev *, struct pci_dev *,
                     const struct ide_port_info *, void *);
 void ide_pci_remove(struct pci_dev *);
 
-#ifdef CONFIG_PM
-int ide_pci_suspend(struct pci_dev *, pm_message_t);
-int ide_pci_resume(struct pci_dev *);
-#else
-#define ide_pci_suspend NULL
-#define ide_pci_resume NULL
-#endif
+extern const struct dev_pm_ops ide_pci_pm_ops;
 
 void ide_map_sg(ide_drive_t *, struct ide_cmd *);
 void ide_init_sg_cmd(struct ide_cmd *, unsigned int);
index 0f2e24f13c2bdff3c4136b5f2f914e394b1bcb15..7febc4881363ce87c8bfda9e07dc4df877d1c51a 100644 (file)
@@ -289,6 +289,7 @@ extern struct key *key_alloc(struct key_type *type,
 #define KEY_ALLOC_BUILT_IN             0x0004  /* Key is built into kernel */
 #define KEY_ALLOC_BYPASS_RESTRICTION   0x0008  /* Override the check on restricted keyrings */
 #define KEY_ALLOC_UID_KEYRING          0x0010  /* allocating a user or user session keyring */
+#define KEY_ALLOC_SET_KEEP             0x0020  /* Set the KEEP flag on the key/keyring */
 
 extern void key_revoke(struct key *key);
 extern void key_invalidate(struct key *key);
@@ -360,7 +361,7 @@ static inline struct key *request_key(struct key_type *type,
  * completion of keys undergoing construction with a non-interruptible wait.
  */
 #define request_key_net(type, description, net, callout_info) \
-       request_key_tag(type, description, net->key_domain, callout_info);
+       request_key_tag(type, description, net->key_domain, callout_info)
 
 /**
  * request_key_net_rcu - Request a key for a net namespace under RCU conditions
@@ -372,7 +373,7 @@ static inline struct key *request_key(struct key_type *type,
  * network namespace are used.
  */
 #define request_key_net_rcu(type, description, net) \
-       request_key_rcu(type, description, net->key_domain);
+       request_key_rcu(type, description, net->key_domain)
 #endif /* CONFIG_NET */
 
 extern int wait_for_key_construction(struct key *key, bool intr);
index 7aaa753b860816ec86bb11e9311be9e515f4255b..cf5d1149fa7ac192e72d8c332ce23f39efefabb2 100644 (file)
@@ -266,6 +266,9 @@ LSM_HOOK(int, 0, post_notification, const struct cred *w_cred,
 #if defined(CONFIG_SECURITY) && defined(CONFIG_KEY_NOTIFICATIONS)
 LSM_HOOK(int, 0, watch_key, struct key *key)
 #endif /* CONFIG_SECURITY && CONFIG_KEY_NOTIFICATIONS */
+#ifdef CONFIG_MOUNT_NOTIFICATIONS
+LSM_HOOK(int, 0, watch_mount, struct watch *watch, struct path *path)
+#endif
 
 #ifdef CONFIG_SECURITY_NETWORK
 LSM_HOOK(int, 0, unix_stream_connect, struct sock *sock, struct sock *other,
index a19adef1f088809919e49276c29a2fafdd6f5341..2e70d9f532bfc4b276bcc04cacccc2a1d0bd8b6b 100644 (file)
  *     from a key or keyring.
  *     @key: The key to watch.
  *
+ * @watch_mount:
+ *     Check to see if a process is allowed to watch for mount topology change
+ *     notifications on a mount subtree.
+ *     @watch: The watch object
+ *     @path: The root of the subtree to watch.
+ *
  * Security hooks for using the eBPF maps and programs functionalities through
  * eBPF syscalls.
  *
index de08264113111e0b43c531838489a6e3085f5326..fd02c5fa60cb1de2f18a6b6688ab1bc1471c39a7 100644 (file)
@@ -86,6 +86,12 @@ void rcu_sched_clock_irq(int user);
 void rcu_report_dead(unsigned int cpu);
 void rcutree_migrate_callbacks(int cpu);
 
+#ifdef CONFIG_TASKS_RCU_GENERIC
+void rcu_init_tasks_generic(void);
+#else
+static inline void rcu_init_tasks_generic(void) { }
+#endif
+
 #ifdef CONFIG_RCU_STALL_COMMON
 void rcu_sysrq_start(void);
 void rcu_sysrq_end(void);
index c35ea0ffccd9505a50d9e30643ce44fadf9c33df..7f905d07d231404dc054f252a8ba6051f8efaad9 100644 (file)
@@ -1328,6 +1328,14 @@ static inline int security_watch_key(struct key *key)
        return 0;
 }
 #endif
+#if defined(CONFIG_SECURITY) && defined(CONFIG_MOUNT_NOTIFICATIONS)
+int security_watch_mount(struct watch *watch, struct path *path);
+#else
+static inline int security_watch_mount(struct watch *watch, struct path *path)
+{
+       return 0;
+}
+#endif
 
 #ifdef CONFIG_SECURITY_NETWORK
 
index f3929aff39cf272fb73cd454516accc1146b9f4e..e2828e03fd98d120ace0e99a11e1faac97261507 100644 (file)
@@ -1013,6 +1013,8 @@ asmlinkage long sys_pidfd_send_signal(int pidfd, int sig,
                                       siginfo_t __user *info,
                                       unsigned int flags);
 asmlinkage long sys_pidfd_getfd(int pidfd, int fd, unsigned int flags);
+asmlinkage long sys_watch_mount(int dfd, const char __user *path,
+                               unsigned int at_flags, int watch_fd, int watch_id);
 
 /*
  * Architecture-specific system calls
index 911ab7c2b1ab39133f2e993eeaf818847912204c..a655923335aebd587b80c5da9db979958389ca84 100644 (file)
@@ -8,6 +8,8 @@
 #ifndef _LINUX_VERIFICATION_H
 #define _LINUX_VERIFICATION_H
 
+#include <linux/types.h>
+
 /*
  * Indicate that both builtin trusted keys and secondary trusted keys
  * should be used.
index c994d1b2cdbaa2abb313170749d0ee396a807a90..f1086d12cd03f66a3c288761cc2023672a5d9d5a 100644 (file)
@@ -120,7 +120,12 @@ static inline void remove_watch_list(struct watch_list *wlist, u64 id)
  * watch_sizeof - Calculate the information part of the size of a watch record,
  * given the structure size.
  */
-#define watch_sizeof(STRUCT) (sizeof(STRUCT) << WATCH_INFO_LENGTH__SHIFT)
+#define watch_sizeof(STRUCT) \
+       ({                                                              \
+               size_t max = WATCH_INFO_LENGTH >> WATCH_INFO_LENGTH__SHIFT; \
+               BUILD_BUG_ON(sizeof(STRUCT) > max);                     \
+               sizeof(STRUCT) << WATCH_INFO_LENGTH__SHIFT;             \
+       })
 
 #else
 static inline int watch_queue_init(struct pipe_inode_info *pipe)
index 4f4e93bf814c3e66b392f4bca8dd6e565685f0bc..cc17bc957548257602a5d9a6cdd59bb704f4e278 100644 (file)
@@ -58,10 +58,6 @@ struct xdp_sock {
 
        struct xsk_queue *tx ____cacheline_aligned_in_smp;
        struct list_head tx_list;
-       /* Mutual exclusion of NAPI TX thread and sendmsg error paths
-        * in the SKB destructor callback.
-        */
-       spinlock_t tx_completion_lock;
        /* Protects generic receive. */
        spinlock_t rx_lock;
 
index 01755b838c745079c53b6daefd7a44322ef1d7c3..eaa8386dbc630b3d30c9b8b1f76d4e18a538d9f5 100644 (file)
@@ -73,6 +73,11 @@ struct xsk_buff_pool {
        bool dma_need_sync;
        bool unaligned;
        void *addrs;
+       /* Mutual exclusion of the completion ring in the SKB mode. Two cases to protect:
+        * NAPI TX thread and sendmsg error paths in the SKB destructor callback and when
+        * sockets share a single cq when the same netdev and queue id is shared.
+        */
+       spinlock_t cq_lock;
        struct xdp_buff_xsk *free_heads[];
 };
 
index ecd24c719de4d3e2c24230941038d2ed20b86977..74b466dc20ac4352ccf4d7c1b4609abc7210c5a8 100644 (file)
@@ -99,7 +99,8 @@ struct btrfs_space_info;
        EM( ALLOC_CHUNK,                "ALLOC_CHUNK")                  \
        EM( ALLOC_CHUNK_FORCE,          "ALLOC_CHUNK_FORCE")            \
        EM( RUN_DELAYED_IPUTS,          "RUN_DELAYED_IPUTS")            \
-       EMe(COMMIT_TRANS,               "COMMIT_TRANS")
+       EM(COMMIT_TRANS,                "COMMIT_TRANS")                 \
+       EMe(FORCE_COMMIT_TRANS,         "FORCE_COMMIT_TRANS")
 
 /*
  * First define the enums in the above macros to be exported to userspace via
@@ -1111,15 +1112,16 @@ TRACE_EVENT(btrfs_trigger_flush,
 TRACE_EVENT(btrfs_flush_space,
 
        TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 num_bytes,
-                int state, int ret),
+                int state, int ret, int for_preempt),
 
-       TP_ARGS(fs_info, flags, num_bytes, state, ret),
+       TP_ARGS(fs_info, flags, num_bytes, state, ret, for_preempt),
 
        TP_STRUCT__entry_btrfs(
                __field(        u64,    flags                   )
                __field(        u64,    num_bytes               )
                __field(        int,    state                   )
                __field(        int,    ret                     )
+               __field(        int,    for_preempt             )
        ),
 
        TP_fast_assign_btrfs(fs_info,
@@ -1127,15 +1129,16 @@ TRACE_EVENT(btrfs_flush_space,
                __entry->num_bytes      =       num_bytes;
                __entry->state          =       state;
                __entry->ret            =       ret;
+               __entry->for_preempt    =       for_preempt;
        ),
 
-       TP_printk_btrfs("state=%d(%s) flags=%llu(%s) num_bytes=%llu ret=%d",
+       TP_printk_btrfs("state=%d(%s) flags=%llu(%s) num_bytes=%llu ret=%d for_preempt=%d",
                  __entry->state,
                  __print_symbolic(__entry->state, FLUSH_STATES),
                  __entry->flags,
                  __print_flags((unsigned long)__entry->flags, "|",
                                BTRFS_GROUP_FLAGS),
-                 __entry->num_bytes, __entry->ret)
+                 __entry->num_bytes, __entry->ret, __entry->for_preempt)
 );
 
 DECLARE_EVENT_CLASS(btrfs__reserved_extent,
@@ -2025,6 +2028,97 @@ TRACE_EVENT(btrfs_convert_extent_bit,
                  __print_flags(__entry->clear_bits, "|", EXTENT_FLAGS))
 );
 
+DECLARE_EVENT_CLASS(btrfs_dump_space_info,
+       TP_PROTO(const struct btrfs_fs_info *fs_info,
+                const struct btrfs_space_info *sinfo),
+
+       TP_ARGS(fs_info, sinfo),
+
+       TP_STRUCT__entry_btrfs(
+               __field(        u64,    flags                   )
+               __field(        u64,    total_bytes             )
+               __field(        u64,    bytes_used              )
+               __field(        u64,    bytes_pinned            )
+               __field(        u64,    bytes_reserved          )
+               __field(        u64,    bytes_may_use           )
+               __field(        u64,    bytes_readonly          )
+               __field(        u64,    reclaim_size            )
+               __field(        int,    clamp                   )
+               __field(        u64,    global_reserved         )
+               __field(        u64,    trans_reserved          )
+               __field(        u64,    delayed_refs_reserved   )
+               __field(        u64,    delayed_reserved        )
+               __field(        u64,    free_chunk_space        )
+       ),
+
+       TP_fast_assign_btrfs(fs_info,
+               __entry->flags                  =       sinfo->flags;
+               __entry->total_bytes            =       sinfo->total_bytes;
+               __entry->bytes_used             =       sinfo->bytes_used;
+               __entry->bytes_pinned           =       sinfo->bytes_pinned;
+               __entry->bytes_reserved         =       sinfo->bytes_reserved;
+               __entry->bytes_may_use          =       sinfo->bytes_may_use;
+               __entry->bytes_readonly         =       sinfo->bytes_readonly;
+               __entry->reclaim_size           =       sinfo->reclaim_size;
+               __entry->clamp                  =       sinfo->clamp;
+               __entry->global_reserved        =       fs_info->global_block_rsv.reserved;
+               __entry->trans_reserved         =       fs_info->trans_block_rsv.reserved;
+               __entry->delayed_refs_reserved  =       fs_info->delayed_refs_rsv.reserved;
+               __entry->delayed_reserved       =       fs_info->delayed_block_rsv.reserved;
+               __entry->free_chunk_space       =       atomic64_read(&fs_info->free_chunk_space);
+       ),
+
+       TP_printk_btrfs("flags=%s total_bytes=%llu bytes_used=%llu "
+                       "bytes_pinned=%llu bytes_reserved=%llu "
+                       "bytes_may_use=%llu bytes_readonly=%llu "
+                       "reclaim_size=%llu clamp=%d global_reserved=%llu "
+                       "trans_reserved=%llu delayed_refs_reserved=%llu "
+                       "delayed_reserved=%llu chunk_free_space=%llu",
+                       __print_flags(__entry->flags, "|", BTRFS_GROUP_FLAGS),
+                       __entry->total_bytes, __entry->bytes_used,
+                       __entry->bytes_pinned, __entry->bytes_reserved,
+                       __entry->bytes_may_use, __entry->bytes_readonly,
+                       __entry->reclaim_size, __entry->clamp,
+                       __entry->global_reserved, __entry->trans_reserved,
+                       __entry->delayed_refs_reserved,
+                       __entry->delayed_reserved, __entry->free_chunk_space)
+);
+
+DEFINE_EVENT(btrfs_dump_space_info, btrfs_done_preemptive_reclaim,
+       TP_PROTO(const struct btrfs_fs_info *fs_info,
+                const struct btrfs_space_info *sinfo),
+       TP_ARGS(fs_info, sinfo)
+);
+
+TRACE_EVENT(btrfs_reserve_ticket,
+       TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 bytes,
+                u64 start_ns, int flush, int error),
+
+       TP_ARGS(fs_info, flags, bytes, start_ns, flush, error),
+
+       TP_STRUCT__entry_btrfs(
+               __field(        u64,    flags           )
+               __field(        u64,    bytes           )
+               __field(        u64,    start_ns        )
+               __field(        int,    flush           )
+               __field(        int,    error           )
+       ),
+
+       TP_fast_assign_btrfs(fs_info,
+               __entry->flags          = flags;
+               __entry->bytes          = bytes;
+               __entry->start_ns       = start_ns;
+               __entry->flush          = flush;
+               __entry->error          = error;
+       ),
+
+       TP_printk_btrfs("flags=%s bytes=%llu start_ns=%llu flush=%s error=%d",
+                       __print_flags(__entry->flags, "|", BTRFS_GROUP_FLAGS),
+                       __entry->bytes, __entry->start_ns,
+                       __print_symbolic(__entry->flush, FLUSH_ACTIONS),
+                       __entry->error)
+);
+
 DECLARE_EVENT_CLASS(btrfs_sleep_tree_lock,
        TP_PROTO(const struct extent_buffer *eb, u64 start_ns),
 
index 728752917785efe99cc6a325edbb488a58ff4674..ad58f661f4aae4df115aa2c2925200bfe71d546b 100644 (file)
@@ -861,9 +861,11 @@ __SYSCALL(__NR_faccessat2, sys_faccessat2)
 __SYSCALL(__NR_process_madvise, sys_process_madvise)
 #define __NR_epoll_pwait2 441
 __SC_COMP(__NR_epoll_pwait2, sys_epoll_pwait2, compat_sys_epoll_pwait2)
+#define __NR_watch_mount 442
+__SYSCALL(__NR_watch_mount, sys_watch_mount)
 
 #undef __NR_syscalls
-#define __NR_syscalls 442
+#define __NR_syscalls 443
 
 /*
  * 32 bit systems traditionally used different
index 8dbecb3ad03684f69b14b4b2b7637a3d73e9f690..1cc5ce0ae062ec179d7badb3c9006dbef689cb21 100644 (file)
@@ -116,7 +116,7 @@ struct pppol2tp_ioc_stats {
 #define PPPIOCGCHAN    _IOR('t', 55, int)      /* get ppp channel number */
 #define PPPIOCGL2TPSTATS _IOR('t', 54, struct pppol2tp_ioc_stats)
 #define PPPIOCBRIDGECHAN _IOW('t', 53, int)    /* bridge one channel to another */
-#define PPPIOCUNBRIDGECHAN _IO('t', 54)        /* unbridge channel */
+#define PPPIOCUNBRIDGECHAN _IO('t', 52)        /* unbridge channel */
 
 #define SIOCGPPPSTATS   (SIOCDEVPRIVATE + 0)
 #define SIOCGPPPVER     (SIOCDEVPRIVATE + 1)   /* NEVER change this!! */
index c3d8320b5d3a6a2f7bc25b178391dcaeabaef6ff..83b11242c10e8defe0152e3ed5526ea45caabfd3 100644 (file)
@@ -14,7 +14,8 @@
 enum watch_notification_type {
        WATCH_TYPE_META         = 0,    /* Special record */
        WATCH_TYPE_KEY_NOTIFY   = 1,    /* Key change event notification */
-       WATCH_TYPE__NR          = 2
+       WATCH_TYPE_MOUNT_NOTIFY = 2,    /* Mount topology change notification */
+       WATCH_TYPE___NR         = 3
 };
 
 enum watch_meta_notification_subtype {
@@ -101,4 +102,32 @@ struct key_notification {
        __u32   aux;            /* Per-type auxiliary data */
 };
 
+/*
+ * Type of mount topology change notification.
+ */
+enum mount_notification_subtype {
+       NOTIFY_MOUNT_NEW_MOUNT  = 0, /* New mount added */
+       NOTIFY_MOUNT_UNMOUNT    = 1, /* Mount removed manually */
+       NOTIFY_MOUNT_EXPIRY     = 2, /* Automount expired */
+       NOTIFY_MOUNT_READONLY   = 3, /* Mount R/O state changed */
+       NOTIFY_MOUNT_SETATTR    = 4, /* Mount attributes changed */
+       NOTIFY_MOUNT_MOVE_FROM  = 5, /* Mount moved from here */
+       NOTIFY_MOUNT_MOVE_TO    = 6, /* Mount moved to here (compare op_id) */
+};
+
+#define NOTIFY_MOUNT_IN_SUBTREE                WATCH_INFO_FLAG_0 /* Event not actually at watched dentry */
+#define NOTIFY_MOUNT_IS_NOW_RO         WATCH_INFO_FLAG_1 /* Mount changed to R/O */
+#define NOTIFY_MOUNT_IS_SUBMOUNT       WATCH_INFO_FLAG_2 /* New mount is submount */
+
+/*
+ * Mount topology/configuration change notification record.
+ * - watch.type = WATCH_TYPE_MOUNT_NOTIFY
+ * - watch.subtype = enum mount_notification_subtype
+ */
+struct mount_notification {
+       struct watch_notification watch; /* WATCH_TYPE_MOUNT_NOTIFY */
+       __u64   triggered_on;           /* The mount that triggered the notification */
+       __u64   auxiliary_mount;        /* Added/moved/removed mount or 0 */
+};
+
 #endif /* _UAPI_LINUX_WATCH_QUEUE_H */
index 848f73ddaf821cabc469faf06fda173660e80d2a..04341ce7526dc4154cbffd4ec71f6349bef7d132 100644 (file)
@@ -1521,6 +1521,7 @@ static noinline void __init kernel_init_freeable(void)
 
        init_mm_internals();
 
+       rcu_init_tasks_generic();
        do_pre_smp_initcalls();
        lockup_detector_init();
 
index 7e848200cd268a0f9ed063f0b641d3c355787013..c1ac7f964bc997925fd427f5192168829d812e5d 100644 (file)
@@ -152,6 +152,7 @@ static void htab_init_buckets(struct bpf_htab *htab)
                        lockdep_set_class(&htab->buckets[i].lock,
                                          &htab->lockdep_key);
                }
+               cond_resched();
        }
 }
 
index 4caf06fe41524f81d204ba650f4791c1bac86156..c3bb03c8371fc7aabc60245780582a6fed05b12a 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/fs.h>
 #include <linux/license.h>
 #include <linux/filter.h>
-#include <linux/version.h>
 #include <linux/kernel.h>
 #include <linux/idr.h>
 #include <linux/cred.h>
index b1496e744c687f399cdd02a3e62d86bebe8c0f2c..e0d40be7820359d65b86ba9406104334274051bb 100644 (file)
@@ -214,6 +214,15 @@ static long map_benchmark_ioctl(struct file *file, unsigned int cmd,
                        return -EINVAL;
                }
 
+<<<<<<< HEAD
+=======
+               if (map->bparam.dma_bits < 20 ||
+                   map->bparam.dma_bits > 64) {
+                       pr_err("invalid dma_bits\n");
+                       return -EINVAL;
+               }
+
+>>>>>>> linux-next/akpm-base
                if (map->bparam.node != NUMA_NO_NODE &&
                    !node_possible(map->bparam.node)) {
                        pr_err("invalid numa node\n");
index 2c0c4d6d0f83afcc0d70b696e4b216ed06086106..dc0e2d7fbdfd927c98aade002389549f89477a0c 100644 (file)
@@ -402,7 +402,7 @@ int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
        struct msi_domain_ops *ops = info->ops;
        struct irq_data *irq_data;
        struct msi_desc *desc;
-       msi_alloc_info_t arg;
+       msi_alloc_info_t arg = { };
        int i, ret, virq;
        bool can_reserve;
 
index 35bdcfd84d42827dc95cdc57e75994ed1fe483d9..36607551f96652048ada29dfd8c70bd0482da55a 100644 (file)
@@ -241,7 +241,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
        }
 }
 
-/* Spawn RCU-tasks grace-period kthread, e.g., at core_initcall() time. */
+/* Spawn RCU-tasks grace-period kthread. */
 static void __init rcu_spawn_tasks_kthread_generic(struct rcu_tasks *rtp)
 {
        struct task_struct *t;
@@ -564,7 +564,6 @@ static int __init rcu_spawn_tasks_kthread(void)
        rcu_spawn_tasks_kthread_generic(&rcu_tasks);
        return 0;
 }
-core_initcall(rcu_spawn_tasks_kthread);
 
 #if !defined(CONFIG_TINY_RCU)
 void show_rcu_tasks_classic_gp_kthread(void)
@@ -692,7 +691,6 @@ static int __init rcu_spawn_tasks_rude_kthread(void)
        rcu_spawn_tasks_kthread_generic(&rcu_tasks_rude);
        return 0;
 }
-core_initcall(rcu_spawn_tasks_rude_kthread);
 
 #if !defined(CONFIG_TINY_RCU)
 void show_rcu_tasks_rude_gp_kthread(void)
@@ -968,6 +966,11 @@ static void rcu_tasks_trace_pregp_step(void)
 static void rcu_tasks_trace_pertask(struct task_struct *t,
                                    struct list_head *hop)
 {
+       // During early boot when there is only the one boot CPU, there
+       // is no idle task for the other CPUs. Just return.
+       if (unlikely(t == NULL))
+               return;
+
        WRITE_ONCE(t->trc_reader_special.b.need_qs, false);
        WRITE_ONCE(t->trc_reader_checked, false);
        t->trc_ipi_to_cpu = -1;
@@ -1193,7 +1196,6 @@ static int __init rcu_spawn_tasks_trace_kthread(void)
        rcu_spawn_tasks_kthread_generic(&rcu_tasks_trace);
        return 0;
 }
-core_initcall(rcu_spawn_tasks_trace_kthread);
 
 #if !defined(CONFIG_TINY_RCU)
 void show_rcu_tasks_trace_gp_kthread(void)
@@ -1222,6 +1224,21 @@ void show_rcu_tasks_gp_kthreads(void)
 }
 #endif /* #ifndef CONFIG_TINY_RCU */
 
+void __init rcu_init_tasks_generic(void)
+{
+#ifdef CONFIG_TASKS_RCU
+       rcu_spawn_tasks_kthread();
+#endif
+
+#ifdef CONFIG_TASKS_RUDE_RCU
+       rcu_spawn_tasks_rude_kthread();
+#endif
+
+#ifdef CONFIG_TASKS_TRACE_RCU
+       rcu_spawn_tasks_trace_kthread();
+#endif
+}
+
 #else /* #ifdef CONFIG_TASKS_RCU_GENERIC */
 static inline void rcu_tasks_bootup_oddness(void) {}
 void show_rcu_tasks_gp_kthreads(void) {}
index 19aa806890d52f62aff18e151ec659448fae0ca9..769ad6225ab14234265d47b5f9248cfb35432fa7 100644 (file)
@@ -87,6 +87,9 @@ COND_SYSCALL(ioprio_get);
 /* fs/locks.c */
 COND_SYSCALL(flock);
 
+/* fs/mount_notify.c */
+COND_SYSCALL(watch_mount);
+
 /* fs/namei.c */
 
 /* fs/namespace.c */
index e96f4baedd28dee4e1704e1f00d98415011be8e7..78361f0abe3acbd441fd47ea96e9016c50f9c538 100644 (file)
@@ -295,14 +295,6 @@ config GDB_SCRIPTS
 
 endif # DEBUG_INFO
 
-config ENABLE_MUST_CHECK
-       bool "Enable __must_check logic"
-       default y
-       help
-         Enable the __must_check logic in the kernel build.  Disable this to
-         suppress the "warning: ignoring return value of 'foo', declared with
-         attribute warn_unused_result" messages.
-
 config FRAME_WARN
        int "Warn for stack frames larger than"
        range 0 8192
index c3e59caf7ffa1004c2bbd1d3d04fefc17eece90e..9c9f40bd2b3d9b04e5ecc76825cdc1004f2dbc2a 100644 (file)
@@ -21,7 +21,6 @@ static inline unsigned long ex_to_insn(const struct exception_table_entry *x)
 }
 #endif
 
-#ifndef ARCH_HAS_SORT_EXTABLE
 #ifndef ARCH_HAS_RELATIVE_EXTABLE
 #define swap_ex                NULL
 #else
@@ -88,9 +87,6 @@ void trim_init_extable(struct module *m)
                m->num_exentries--;
 }
 #endif /* CONFIG_MODULES */
-#endif /* !ARCH_HAS_SORT_EXTABLE */
-
-#ifndef ARCH_HAS_SEARCH_EXTABLE
 
 static int cmp_ex_search(const void *key, const void *elt)
 {
@@ -120,4 +116,3 @@ search_extable(const struct exception_table_entry *base,
        return bsearch(&value, base, num,
                       sizeof(struct exception_table_entry), cmp_ex_search);
 }
-#endif
index 8294f43f498164c7e815593790a732924ad98e0b..8b1c318189ce801a0935133b4882556fede5bfd2 100644 (file)
@@ -1530,24 +1530,24 @@ static noinline void check_store_range(struct xarray *xa)
 
 #ifdef CONFIG_XARRAY_MULTI
 static void check_split_1(struct xarray *xa, unsigned long index,
-                                                       unsigned int order)
+                               unsigned int order, unsigned int new_order)
 {
-       XA_STATE(xas, xa, index);
-       void *entry;
-       unsigned int i = 0;
+       XA_STATE_ORDER(xas, xa, index, new_order);
+       unsigned int i;
 
        xa_store_order(xa, index, order, xa, GFP_KERNEL);
 
        xas_split_alloc(&xas, xa, order, GFP_KERNEL);
        xas_lock(&xas);
        xas_split(&xas, xa, order);
+       for (i = 0; i < (1 << order); i += (1 << new_order))
+               __xa_store(xa, index + i, xa_mk_index(index + i), 0);
        xas_unlock(&xas);
 
-       xa_for_each(xa, index, entry) {
-               XA_BUG_ON(xa, entry != xa);
-               i++;
+       for (i = 0; i < (1 << order); i++) {
+               unsigned int val = index + (i & ~((1 << new_order) - 1));
+               XA_BUG_ON(xa, xa_load(xa, index + i) != xa_mk_index(val));
        }
-       XA_BUG_ON(xa, i != 1 << order);
 
        xa_set_mark(xa, index, XA_MARK_0);
        XA_BUG_ON(xa, !xa_get_mark(xa, index, XA_MARK_0));
@@ -1557,14 +1557,16 @@ static void check_split_1(struct xarray *xa, unsigned long index,
 
 static noinline void check_split(struct xarray *xa)
 {
-       unsigned int order;
+       unsigned int order, new_order;
 
        XA_BUG_ON(xa, !xa_empty(xa));
 
        for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) {
-               check_split_1(xa, 0, order);
-               check_split_1(xa, 1UL << order, order);
-               check_split_1(xa, 3UL << order, order);
+               for (new_order = 0; new_order < order; new_order++) {
+                       check_split_1(xa, 0, order, new_order);
+                       check_split_1(xa, 1UL << order, order, new_order);
+                       check_split_1(xa, 3UL << order, order, new_order);
+               }
        }
 }
 #else
index 5fa51614802ada34af73623a7cab609e236af856..f5d8f54907b4f87e649211a60d1e0bf3cb170a16 100644 (file)
@@ -987,7 +987,7 @@ static void node_set_marks(struct xa_node *node, unsigned int offset,
  * xas_split_alloc() - Allocate memory for splitting an entry.
  * @xas: XArray operation state.
  * @entry: New entry which will be stored in the array.
- * @order: New entry order.
+ * @order: Current entry order.
  * @gfp: Memory allocation flags.
  *
  * This function should be called before calling xas_split().
@@ -1011,7 +1011,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
 
        do {
                unsigned int i;
-               void *sibling;
+               void *sibling = NULL;
                struct xa_node *node;
 
                node = kmem_cache_alloc(radix_tree_node_cachep, gfp);
@@ -1021,7 +1021,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
                for (i = 0; i < XA_CHUNK_SIZE; i++) {
                        if ((i & mask) == 0) {
                                RCU_INIT_POINTER(node->slots[i], entry);
-                               sibling = xa_mk_sibling(0);
+                               sibling = xa_mk_sibling(i);
                        } else {
                                RCU_INIT_POINTER(node->slots[i], sibling);
                        }
@@ -1041,9 +1041,10 @@ EXPORT_SYMBOL_GPL(xas_split_alloc);
  * xas_split() - Split a multi-index entry into smaller entries.
  * @xas: XArray operation state.
  * @entry: New entry to store in the array.
- * @order: New entry order.
+ * @order: Current entry order.
  *
- * The value in the entry is copied to all the replacement entries.
+ * The size of the new entries is set in @xas.  The value in @entry is
+ * copied to all the replacement entries.
  *
  * Context: Any context.  The caller should hold the xa_lock.
  */
index ad7a37ee74ef5f2ed8ead98c966a08f7d4e2384a..f53b89b793234690dbdf391abc3f873936c88d54 100644 (file)
@@ -2669,6 +2669,7 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
 {
        static int group_map[NR_CPUS] __initdata;
        static int group_cnt[NR_CPUS] __initdata;
+       static struct cpumask mask __initdata;
        const size_t static_size = __per_cpu_end - __per_cpu_start;
        int nr_groups = 1, nr_units = 0;
        size_t size_sum, min_unit_size, alloc_size;
@@ -2702,24 +2703,27 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
                upa--;
        max_upa = upa;
 
+       cpumask_copy(&mask, cpu_possible_mask);
+
        /* group cpus according to their proximity */
-       for_each_possible_cpu(cpu) {
-               group = 0;
-       next_group:
-               for_each_possible_cpu(tcpu) {
-                       if (cpu == tcpu)
-                               break;
-                       if (group_map[tcpu] == group && cpu_distance_fn &&
-                           (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE ||
-                            cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) {
-                               group++;
-                               nr_groups = max(nr_groups, group + 1);
-                               goto next_group;
-                       }
-               }
+       for (group = 0; !cpumask_empty(&mask); group++) {
+               /* pop the group's first cpu */
+               cpu = cpumask_first(&mask);
                group_map[cpu] = group;
                group_cnt[group]++;
+               cpumask_clear_cpu(cpu, &mask);
+
+               for_each_cpu(tcpu, &mask) {
+                       if (!cpu_distance_fn ||
+                           (cpu_distance_fn(cpu, tcpu) == LOCAL_DISTANCE &&
+                            cpu_distance_fn(tcpu, cpu) == LOCAL_DISTANCE)) {
+                               group_map[tcpu] = group;
+                               group_cnt[group]++;
+                               cpumask_clear_cpu(tcpu, &mask);
+                       }
+               }
        }
+       nr_groups = group;
 
        /*
         * Wasted space is caused by a ratio imbalance of upa to group_cnt.
index 4f1cd8063e720afe25df76e1e252a22b60b68c3c..23c0d77ea7370ccd833fe33d23c41c3602c9f619 100644 (file)
@@ -203,6 +203,23 @@ static void hci_acl_create_connection(struct hci_conn *conn)
 
        BT_DBG("hcon %p", conn);
 
+       /* Many controllers disallow HCI Create Connection while it is doing
+        * HCI Inquiry. So we cancel the Inquiry first before issuing HCI Create
+        * Connection. This may cause the MGMT discovering state to become false
+        * without user space's request but it is okay since the MGMT Discovery
+        * APIs do not promise that discovery should be done forever. Instead,
+        * the user space monitors the status of MGMT discovering and it may
+        * request for discovery again when this flag becomes false.
+        */
+       if (test_bit(HCI_INQUIRY, &hdev->flags)) {
+               /* Put this connection to "pending" state so that it will be
+                * executed after the inquiry cancel command complete event.
+                */
+               conn->state = BT_CONNECT2;
+               hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
+               return;
+       }
+
        conn->state = BT_CONNECT;
        conn->out = true;
        conn->role = HCI_ROLE_MASTER;
index 71bffd7454720436dbf0bf967d62b93ac36959ec..5aa7bd5030a218c1099568f615b2df8b8fd68f79 100644 (file)
@@ -1087,6 +1087,8 @@ void hci_req_add_le_passive_scan(struct hci_request *req)
        if (hdev->suspended) {
                window = hdev->le_scan_window_suspend;
                interval = hdev->le_scan_int_suspend;
+
+               set_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
        } else if (hci_is_le_conn_scanning(hdev)) {
                window = hdev->le_scan_window_connect;
                interval = hdev->le_scan_int_connect;
@@ -1170,19 +1172,6 @@ static void hci_req_set_event_filter(struct hci_request *req)
        hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
 }
 
-static void hci_req_config_le_suspend_scan(struct hci_request *req)
-{
-       /* Before changing params disable scan if enabled */
-       if (hci_dev_test_flag(req->hdev, HCI_LE_SCAN))
-               hci_req_add_le_scan_disable(req, false);
-
-       /* Configure params and enable scanning */
-       hci_req_add_le_passive_scan(req);
-
-       /* Block suspend notifier on response */
-       set_bit(SUSPEND_SCAN_ENABLE, req->hdev->suspend_tasks);
-}
-
 static void cancel_adv_timeout(struct hci_dev *hdev)
 {
        if (hdev->adv_instance_timeout) {
@@ -1245,8 +1234,10 @@ static void suspend_req_complete(struct hci_dev *hdev, u8 status, u16 opcode)
 {
        bt_dev_dbg(hdev, "Request complete opcode=0x%x, status=0x%x", opcode,
                   status);
-       if (test_and_clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
-           test_and_clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
+       if (test_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks) ||
+           test_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks)) {
+               clear_bit(SUSPEND_SCAN_ENABLE, hdev->suspend_tasks);
+               clear_bit(SUSPEND_SCAN_DISABLE, hdev->suspend_tasks);
                wake_up(&hdev->suspend_wait_q);
        }
 }
@@ -1336,7 +1327,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
                /* Enable event filter for paired devices */
                hci_req_set_event_filter(&req);
                /* Enable passive scan at lower duty cycle */
-               hci_req_config_le_suspend_scan(&req);
+               __hci_update_background_scan(&req);
                /* Pause scan changes again. */
                hdev->scanning_paused = true;
                hci_req_run(&req, suspend_req_complete);
@@ -1346,7 +1337,7 @@ void hci_req_prepare_suspend(struct hci_dev *hdev, enum suspended_state next)
 
                hci_req_clear_event_filter(&req);
                /* Reset passive/background scanning to normal */
-               hci_req_config_le_suspend_scan(&req);
+               __hci_update_background_scan(&req);
 
                /* Unpause directed advertising */
                hdev->advertising_paused = false;
index fa0f7a4a1d2fc8a5422a1407bc63407f6ccdb338..608dda5403b7327cd2845eddb3eb57012f77726d 100644 (file)
@@ -4798,6 +4798,14 @@ static int start_service_discovery(struct sock *sk, struct hci_dev *hdev,
                goto failed;
        }
 
+       if (hdev->discovery_paused) {
+               err = mgmt_cmd_complete(sk, hdev->id,
+                                       MGMT_OP_START_SERVICE_DISCOVERY,
+                                       MGMT_STATUS_BUSY, &cp->type,
+                                       sizeof(cp->type));
+               goto failed;
+       }
+
        uuid_count = __le16_to_cpu(cp->uuid_count);
        if (uuid_count > max_uuid_count) {
                bt_dev_err(hdev, "service_discovery: too big uuid_count value %u",
index 563b62b76a5f18aadafbd6dd7376e18d6285e43c..c576a63d09db1b5412becc51052441a7352f122f 100644 (file)
@@ -1379,7 +1379,7 @@ static int compat_get_entries(struct net *net,
        xt_compat_lock(NFPROTO_ARP);
        t = xt_find_table_lock(net, NFPROTO_ARP, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = t->private;
+               const struct xt_table_info *private = xt_table_get_private_protected(t);
                struct xt_table_info info;
 
                ret = compat_table_info(private, &info);
index 6e2851f8d3a3fa7c488c5e4894a9d0887e76b4cb..e8f6f9d86237635b26b37ef8d473a40a9f88c5b6 100644 (file)
@@ -1589,7 +1589,7 @@ compat_get_entries(struct net *net, struct compat_ipt_get_entries __user *uptr,
        xt_compat_lock(AF_INET);
        t = xt_find_table_lock(net, AF_INET, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = t->private;
+               const struct xt_table_info *private = xt_table_get_private_protected(t);
                struct xt_table_info info;
                ret = compat_table_info(private, &info);
                if (!ret && get.size == info.size)
index c4f532f4d311873391acc280746a3ce0e91398de..0d453fa9e327bde73da046a941361ce8a0052d35 100644 (file)
@@ -1598,7 +1598,7 @@ compat_get_entries(struct net *net, struct compat_ip6t_get_entries __user *uptr,
        xt_compat_lock(AF_INET6);
        t = xt_find_table_lock(net, AF_INET6, get.name);
        if (!IS_ERR(t)) {
-               const struct xt_table_info *private = t->private;
+               const struct xt_table_info *private = xt_table_get_private_protected(t);
                struct xt_table_info info;
                ret = compat_table_info(private, &info);
                if (!ret && get.size == info.size)
index 5f1208ad049eed33d54b6f201b2a17ce19dcbd54..6186358eac7c5a255e48eb9670311694f0d920cd 100644 (file)
@@ -141,20 +141,6 @@ htable_size(u8 hbits)
        return hsize * sizeof(struct hbucket *) + sizeof(struct htable);
 }
 
-/* Compute htable_bits from the user input parameter hashsize */
-static u8
-htable_bits(u32 hashsize)
-{
-       /* Assume that hashsize == 2^htable_bits */
-       u8 bits = fls(hashsize - 1);
-
-       if (jhash_size(bits) != hashsize)
-               /* Round up to the first 2^n value */
-               bits = fls(hashsize);
-
-       return bits;
-}
-
 #ifdef IP_SET_HASH_WITH_NETS
 #if IPSET_NET_COUNT > 1
 #define __CIDR(cidr, i)                (cidr[i])
@@ -640,7 +626,7 @@ mtype_resize(struct ip_set *set, bool retried)
        struct htype *h = set->data;
        struct htable *t, *orig;
        u8 htable_bits;
-       size_t dsize = set->dsize;
+       size_t hsize, dsize = set->dsize;
 #ifdef IP_SET_HASH_WITH_NETS
        u8 flags;
        struct mtype_elem *tmp;
@@ -664,14 +650,12 @@ mtype_resize(struct ip_set *set, bool retried)
 retry:
        ret = 0;
        htable_bits++;
-       if (!htable_bits) {
-               /* In case we have plenty of memory :-) */
-               pr_warn("Cannot increase the hashsize of set %s further\n",
-                       set->name);
-               ret = -IPSET_ERR_HASH_FULL;
-               goto out;
-       }
-       t = ip_set_alloc(htable_size(htable_bits));
+       if (!htable_bits)
+               goto hbwarn;
+       hsize = htable_size(htable_bits);
+       if (!hsize)
+               goto hbwarn;
+       t = ip_set_alloc(hsize);
        if (!t) {
                ret = -ENOMEM;
                goto out;
@@ -813,6 +797,12 @@ cleanup:
        if (ret == -EAGAIN)
                goto retry;
        goto out;
+
+hbwarn:
+       /* In case we have plenty of memory :-) */
+       pr_warn("Cannot increase the hashsize of set %s further\n", set->name);
+       ret = -IPSET_ERR_HASH_FULL;
+       goto out;
 }
 
 /* Get the current number of elements and ext_size in the set  */
@@ -1521,7 +1511,11 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
        if (!h)
                return -ENOMEM;
 
-       hbits = htable_bits(hashsize);
+       /* Compute htable_bits from the user input parameter hashsize.
+        * Assume that hashsize == 2^htable_bits,
+        * otherwise round up to the first 2^n value.
+        */
+       hbits = fls(hashsize - 1);
        hsize = htable_size(hbits);
        if (hsize == 0) {
                kfree(h);
index 8d5aa0ac45f4d3a22cdc3926df3c21722d6e35b9..4186b1e52d584e591df80006d251271dc9214f50 100644 (file)
@@ -5254,8 +5254,8 @@ static int nft_set_elem_expr_clone(const struct nft_ctx *ctx,
        return 0;
 
 err_expr:
-       for (k = i - 1; k >= 0; k++)
-               nft_expr_destroy(ctx, expr_array[i]);
+       for (k = i - 1; k >= 0; k--)
+               nft_expr_destroy(ctx, expr_array[k]);
 
        return -ENOMEM;
 }
index de8e8dbbdeb8c476addbb1d480aeb1ac848812b1..6bbc7a4485938304bb04fb68356395f3259805fc 100644 (file)
@@ -4595,7 +4595,9 @@ static void packet_seq_stop(struct seq_file *seq, void *v)
 static int packet_seq_show(struct seq_file *seq, void *v)
 {
        if (v == SEQ_START_TOKEN)
-               seq_puts(seq, "sk       RefCnt Type Proto  Iface R Rmem   User   Inode\n");
+               seq_printf(seq,
+                          "%*sRefCnt Type Proto  Iface R Rmem   User   Inode\n",
+                          IS_ENABLED(CONFIG_64BIT) ? -17 : -9, "sk");
        else {
                struct sock *s = sk_entry(v);
                const struct packet_sock *po = pkt_sk(s);
index c74817ec9964b7eb744f972cca0777111a78f0f2..6f775275826a45a19c519fb06b80c53b304e61ce 100644 (file)
@@ -1605,8 +1605,9 @@ static void taprio_reset(struct Qdisc *sch)
 
        hrtimer_cancel(&q->advance_timer);
        if (q->qdiscs) {
-               for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
-                       qdisc_reset(q->qdiscs[i]);
+               for (i = 0; i < dev->num_tx_queues; i++)
+                       if (q->qdiscs[i])
+                               qdisc_reset(q->qdiscs[i]);
        }
        sch->qstats.backlog = 0;
        sch->q.qlen = 0;
@@ -1626,7 +1627,7 @@ static void taprio_destroy(struct Qdisc *sch)
        taprio_disable_offload(dev, q, NULL);
 
        if (q->qdiscs) {
-               for (i = 0; i < dev->num_tx_queues && q->qdiscs[i]; i++)
+               for (i = 0; i < dev->num_tx_queues; i++)
                        qdisc_put(q->qdiscs[i]);
 
                kfree(q->qdiscs);
index b248f2349437da03e1d2e7e5f4d44886779eef90..c9766d07eb81a1cdee4cad8bfc4a393114543e6f 100644 (file)
@@ -1062,6 +1062,90 @@ err_noclose:
        return 0;       /* record not complete */
 }
 
+static int svc_tcp_send_kvec(struct socket *sock, const struct kvec *vec,
+                             int flags)
+{
+       return kernel_sendpage(sock, virt_to_page(vec->iov_base),
+                              offset_in_page(vec->iov_base),
+                              vec->iov_len, flags);
+}
+
+/*
+ * kernel_sendpage() is used exclusively to reduce the number of
+ * copy operations in this path. Therefore the caller must ensure
+ * that the pages backing @xdr are unchanging.
+ *
+ * In addition, the logic assumes that * .bv_len is never larger
+ * than PAGE_SIZE.
+ */
+static int svc_tcp_sendmsg(struct socket *sock, struct msghdr *msg,
+                          struct xdr_buf *xdr, rpc_fraghdr marker,
+                          unsigned int *sentp)
+{
+       const struct kvec *head = xdr->head;
+       const struct kvec *tail = xdr->tail;
+       struct kvec rm = {
+               .iov_base       = &marker,
+               .iov_len        = sizeof(marker),
+       };
+       int flags, ret;
+
+       *sentp = 0;
+       xdr_alloc_bvec(xdr, GFP_KERNEL);
+
+       msg->msg_flags = MSG_MORE;
+       ret = kernel_sendmsg(sock, msg, &rm, 1, rm.iov_len);
+       if (ret < 0)
+               return ret;
+       *sentp += ret;
+       if (ret != rm.iov_len)
+               return -EAGAIN;
+
+       flags = head->iov_len < xdr->len ? MSG_MORE | MSG_SENDPAGE_NOTLAST : 0;
+       ret = svc_tcp_send_kvec(sock, head, flags);
+       if (ret < 0)
+               return ret;
+       *sentp += ret;
+       if (ret != head->iov_len)
+               goto out;
+
+       if (xdr->page_len) {
+               unsigned int offset, len, remaining;
+               struct bio_vec *bvec;
+
+               bvec = xdr->bvec;
+               offset = xdr->page_base;
+               remaining = xdr->page_len;
+               flags = MSG_MORE | MSG_SENDPAGE_NOTLAST;
+               while (remaining > 0) {
+                       if (remaining <= PAGE_SIZE && tail->iov_len == 0)
+                               flags = 0;
+                       len = min(remaining, bvec->bv_len);
+                       ret = kernel_sendpage(sock, bvec->bv_page,
+                                             bvec->bv_offset + offset,
+                                             len, flags);
+                       if (ret < 0)
+                               return ret;
+                       *sentp += ret;
+                       if (ret != len)
+                               goto out;
+                       remaining -= len;
+                       offset = 0;
+                       bvec++;
+               }
+       }
+
+       if (tail->iov_len) {
+               ret = svc_tcp_send_kvec(sock, tail, 0);
+               if (ret < 0)
+                       return ret;
+               *sentp += ret;
+       }
+
+out:
+       return 0;
+}
+
 /**
  * svc_tcp_sendto - Send out a reply on a TCP socket
  * @rqstp: completed svc_rqst
@@ -1089,7 +1173,7 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
        mutex_lock(&xprt->xpt_mutex);
        if (svc_xprt_is_dead(xprt))
                goto out_notconn;
-       err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, marker, &sent);
+       err = svc_tcp_sendmsg(svsk->sk_sock, &msg, xdr, marker, &sent);
        xdr_free_bvec(xdr);
        trace_svcsock_tcp_send(xprt, err < 0 ? err : sent);
        if (err < 0 || sent != (xdr->len + sizeof(marker)))
index ac4a317038f1bb90f4c515e813a3c371b301eaaa..8037b04a9edd11cad8845d95a06b2d442c29f8f5 100644 (file)
@@ -423,9 +423,9 @@ static void xsk_destruct_skb(struct sk_buff *skb)
        struct xdp_sock *xs = xdp_sk(skb->sk);
        unsigned long flags;
 
-       spin_lock_irqsave(&xs->tx_completion_lock, flags);
+       spin_lock_irqsave(&xs->pool->cq_lock, flags);
        xskq_prod_submit_addr(xs->pool->cq, addr);
-       spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
+       spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
 
        sock_wfree(skb);
 }
@@ -437,6 +437,7 @@ static int xsk_generic_xmit(struct sock *sk)
        bool sent_frame = false;
        struct xdp_desc desc;
        struct sk_buff *skb;
+       unsigned long flags;
        int err = 0;
 
        mutex_lock(&xs->mutex);
@@ -468,10 +469,13 @@ static int xsk_generic_xmit(struct sock *sk)
                 * if there is space in it. This avoids having to implement
                 * any buffering in the Tx path.
                 */
+               spin_lock_irqsave(&xs->pool->cq_lock, flags);
                if (unlikely(err) || xskq_prod_reserve(xs->pool->cq)) {
+                       spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
                        kfree_skb(skb);
                        goto out;
                }
+               spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
 
                skb->dev = xs->dev;
                skb->priority = sk->sk_priority;
@@ -483,6 +487,9 @@ static int xsk_generic_xmit(struct sock *sk)
                if  (err == NETDEV_TX_BUSY) {
                        /* Tell user-space to retry the send */
                        skb->destructor = sock_wfree;
+                       spin_lock_irqsave(&xs->pool->cq_lock, flags);
+                       xskq_prod_cancel(xs->pool->cq);
+                       spin_unlock_irqrestore(&xs->pool->cq_lock, flags);
                        /* Free skb without triggering the perf drop trace */
                        consume_skb(skb);
                        err = -EAGAIN;
@@ -878,6 +885,10 @@ static int xsk_bind(struct socket *sock, struct sockaddr *addr, int addr_len)
                }
        }
 
+       /* FQ and CQ are now owned by the buffer pool and cleaned up with it. */
+       xs->fq_tmp = NULL;
+       xs->cq_tmp = NULL;
+
        xs->dev = dev;
        xs->zc = xs->umem->zc;
        xs->queue_id = qid;
@@ -1299,7 +1310,6 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
        xs->state = XSK_READY;
        mutex_init(&xs->mutex);
        spin_lock_init(&xs->rx_lock);
-       spin_lock_init(&xs->tx_completion_lock);
 
        INIT_LIST_HEAD(&xs->map_list);
        spin_lock_init(&xs->map_list_lock);
index 67a4494d63b681b80b2bb92428925f2edf5972ee..20598eea658c472fbea46f8365f7ca369c4b435a 100644 (file)
@@ -71,12 +71,11 @@ struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
        INIT_LIST_HEAD(&pool->free_list);
        INIT_LIST_HEAD(&pool->xsk_tx_list);
        spin_lock_init(&pool->xsk_tx_list_lock);
+       spin_lock_init(&pool->cq_lock);
        refcount_set(&pool->users, 1);
 
        pool->fq = xs->fq_tmp;
        pool->cq = xs->cq_tmp;
-       xs->fq_tmp = NULL;
-       xs->cq_tmp = NULL;
 
        for (i = 0; i < pool->free_heads_cnt; i++) {
                xskb = &pool->heads[i];
index 4a9663aa7afe6dc6c5e9ea3223263a42951ba9b3..2823b7c3302d0a24db4ce1f0720ca8094693bde7 100644 (file)
@@ -334,6 +334,11 @@ static inline bool xskq_prod_is_full(struct xsk_queue *q)
        return xskq_prod_nb_free(q, 1) ? false : true;
 }
 
+static inline void xskq_prod_cancel(struct xsk_queue *q)
+{
+       q->cached_prod--;
+}
+
 static inline int xskq_prod_reserve(struct xsk_queue *q)
 {
        if (xskq_prod_is_full(q))
index be6351e3f3cdfaa6c15d1b824907042d0365731d..1158cd0311d7d0efef498b9b8b6ffa3c8fd241a1 100644 (file)
@@ -660,7 +660,7 @@ resume:
                /* only the first xfrm gets the encap type */
                encap_type = 0;
 
-               if (async && x->repl->recheck(x, skb, seq)) {
+               if (x->repl->recheck(x, skb, seq)) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATESEQERROR);
                        goto drop_unlock;
                }
index 0ed6e4d71d87b16d68fc217126ac1caf36ec7caa..e76cdfc50e257d83e14a242bc8cb94e0196f2949 100644 (file)
@@ -210,7 +210,7 @@ config SAMPLE_WATCHDOG
        depends on CC_CAN_LINK
 
 config SAMPLE_WATCH_QUEUE
-       bool "Build example /dev/watch_queue notification consumer"
+       bool "Build example watch_queue notification API consumer"
        depends on CC_CAN_LINK && HEADERS_INSTALL
        help
          Build example userspace program to use the new mount_notify(),
index 46e618a897fef943c52749f46d8038459ca3f53a..d244594a035407b0094fa90d9f4ef803901af2eb 100644 (file)
@@ -1,5 +1,5 @@
 // SPDX-License-Identifier: GPL-2.0
-/* Use /dev/watch_queue to watch for notifications.
+/* Use watch_queue API to watch for notifications.
  *
  * Copyright (C) 2020 Red Hat, Inc. All Rights Reserved.
  * Written by David Howells (dhowells@redhat.com)
@@ -26,6 +26,9 @@
 #ifndef __NR_keyctl
 #define __NR_keyctl -1
 #endif
+#ifndef __NR_watch_mount
+#define __NR_watch_mount -1
+#endif
 
 #define BUF_SIZE 256
 
@@ -58,6 +61,29 @@ static void saw_key_change(struct watch_notification *n, size_t len)
               k->key_id, n->subtype, key_subtypes[n->subtype], k->aux);
 }
 
+static const char *mount_subtypes[256] = {
+       [NOTIFY_MOUNT_NEW_MOUNT]        = "new_mount",
+       [NOTIFY_MOUNT_UNMOUNT]          = "unmount",
+       [NOTIFY_MOUNT_EXPIRY]           = "expiry",
+       [NOTIFY_MOUNT_READONLY]         = "readonly",
+       [NOTIFY_MOUNT_SETATTR]          = "setattr",
+       [NOTIFY_MOUNT_MOVE_FROM]        = "move_from",
+       [NOTIFY_MOUNT_MOVE_TO]          = "move_to",
+};
+
+static void saw_mount_change(struct watch_notification *n, size_t len)
+{
+       struct mount_notification *m = (struct mount_notification *)n;
+
+       if (len != sizeof(struct mount_notification))
+               return;
+
+       printf("MOUNT %08llx change=%u[%s] aux=%llx\n",
+              (unsigned long long)m->triggered_on,
+              n->subtype, mount_subtypes[n->subtype],
+              (unsigned long long)m->auxiliary_mount);
+}
+
 /*
  * Consume and display events.
  */
@@ -134,6 +160,9 @@ static void consumer(int fd)
                        default:
                                printf("other type\n");
                                break;
+                       case WATCH_TYPE_MOUNT_NOTIFY:
+                               saw_mount_change(&n.n, len);
+                               break;
                        }
 
                        p += len;
@@ -142,12 +171,17 @@ static void consumer(int fd)
 }
 
 static struct watch_notification_filter filter = {
-       .nr_filters     = 1,
+       .nr_filters     = 2,
        .filters = {
                [0]     = {
                        .type                   = WATCH_TYPE_KEY_NOTIFY,
                        .subtype_filter[0]      = UINT_MAX,
                },
+               [1] = {
+                       .type                   = WATCH_TYPE_MOUNT_NOTIFY,
+                       // Reject move-from notifications
+                       .subtype_filter[0]      = UINT_MAX & ~(1 << NOTIFY_MOUNT_MOVE_FROM),
+               },
        },
 };
 
@@ -181,6 +215,11 @@ int main(int argc, char **argv)
                exit(1);
        }
 
+       if (syscall(__NR_watch_mount, AT_FDCWD, "/", 0, fd, 0xde) == -1) {
+               perror("watch_mount");
+               exit(1);
+       }
+
        consumer(fd);
        exit(0);
 }
index 36cadadbfba47394ef9e2cee74b11e302107bc01..1e5c01916173843a38691fc9f7eb43e2463f831f 100644 (file)
@@ -38,13 +38,12 @@ __init int ima_mok_init(void)
                                (KEY_POS_ALL & ~KEY_POS_SETATTR) |
                                KEY_USR_VIEW | KEY_USR_READ |
                                KEY_USR_WRITE | KEY_USR_SEARCH,
-                               KEY_ALLOC_NOT_IN_QUOTA,
+                               KEY_ALLOC_NOT_IN_QUOTA |
+                               KEY_ALLOC_SET_KEEP,
                                restriction, NULL);
 
        if (IS_ERR(ima_blacklist_keyring))
                panic("Can't allocate IMA blacklist keyring.");
-
-       set_bit(KEY_FLAG_KEEP, &ima_blacklist_keyring->flags);
        return 0;
 }
 device_initcall(ima_mok_init);
index 83bc23409164a61ea8e89cc2d341d050ca5d10d6..c161642a8484172a9dfb7a2fa9500da523fecfb3 100644 (file)
@@ -119,7 +119,7 @@ config KEY_NOTIFICATIONS
        bool "Provide key/keyring change notifications"
        depends on KEYS && WATCH_QUEUE
        help
-         This option provides support for getting change notifications on keys
-         and keyrings on which the caller has View permission.  This makes use
-         of the /dev/watch_queue misc device to handle the notification
-         buffer and provides KEYCTL_WATCH_KEY to enable/disable watches.
+         This option provides support for getting change notifications
+         on keys and keyrings on which the caller has View permission.
+         This makes use of pipes to handle the notification buffer and
+         provides KEYCTL_WATCH_KEY to enable/disable watches.
index 691347dea3c179be27db2678fd4736f0693e4162..d17e5f09eeb895ea761e34c83f56ded50230a974 100644 (file)
@@ -121,8 +121,7 @@ int big_key_preparse(struct key_preparsed_payload *prep)
                *path = file->f_path;
                path_get(path);
                fput(file);
-               memzero_explicit(buf, enclen);
-               kvfree(buf);
+               kvfree_sensitive(buf, enclen);
        } else {
                /* Just store the data in a buffer */
                void *data = kmalloc(datalen, GFP_KERNEL);
@@ -140,8 +139,7 @@ err_fput:
 err_enckey:
        kfree_sensitive(enckey);
 error:
-       memzero_explicit(buf, enclen);
-       kvfree(buf);
+       kvfree_sensitive(buf, enclen);
        return ret;
 }
 
@@ -273,8 +271,7 @@ long big_key_read(const struct key *key, char *buffer, size_t buflen)
 err_fput:
                fput(file);
 error:
-               memzero_explicit(buf, enclen);
-               kvfree(buf);
+               kvfree_sensitive(buf, enclen);
        } else {
                ret = datalen;
                memcpy(buffer, key->payload.data[big_key_data], datalen);
index ebe752b137aa1149615a0395b31f769d6ff59eac..c45afdd1dfbb4fa3dfa01ff927707671186e4b30 100644 (file)
@@ -303,6 +303,8 @@ struct key *key_alloc(struct key_type *type, const char *desc,
                key->flags |= 1 << KEY_FLAG_BUILTIN;
        if (flags & KEY_ALLOC_UID_KEYRING)
                key->flags |= 1 << KEY_FLAG_UID_KEYRING;
+       if (flags & KEY_ALLOC_SET_KEEP)
+               key->flags |= 1 << KEY_FLAG_KEEP;
 
 #ifdef KEY_DEBUGGING
        key->magic = KEY_DEBUG_MAGIC;
index 61a614c21b9b6e42b86748d608ef587d58a09722..96a92a645216de09e1870e93ad0464b09f522078 100644 (file)
@@ -506,7 +506,7 @@ error:
  * keyring, otherwise replace the link to the matching key with a link to the
  * new key.
  *
- * The key must grant the caller Link permission and the the keyring must grant
+ * The key must grant the caller Link permission and the keyring must grant
  * the caller Write permission.  Furthermore, if an additional link is created,
  * the keyring's quota will be extended.
  *
index 931d8dfb4a7f42172a934236d046d19de4130c86..5de0d599a2748f50f3f4b144b8440f08bc57581b 100644 (file)
@@ -166,8 +166,6 @@ long keyctl_pkey_query(key_serial_t id,
        struct kernel_pkey_query res;
        long ret;
 
-       memset(&params, 0, sizeof(params));
-
        ret = keyctl_pkey_params_get(id, _info, &params);
        if (ret < 0)
                goto error;
index 14abfe765b7e789765474c24070aaa738ca6dc70..5e6a907607530e2097ab5a4eba4f19b4d8426f2a 100644 (file)
@@ -452,7 +452,7 @@ static void keyring_describe(const struct key *keyring, struct seq_file *m)
 struct keyring_read_iterator_context {
        size_t                  buflen;
        size_t                  count;
-       key_serial_t __user     *buffer;
+       key_serial_t            *buffer;
 };
 
 static int keyring_read_iterator(const void *object, void *data)
@@ -479,7 +479,7 @@ static int keyring_read_iterator(const void *object, void *data)
  * times.
  */
 static long keyring_read(const struct key *keyring,
-                        char __user *buffer, size_t buflen)
+                        char *buffer, size_t buflen)
 {
        struct keyring_read_iterator_context ctx;
        long ret;
@@ -491,7 +491,7 @@ static long keyring_read(const struct key *keyring,
 
        /* Copy as many key IDs as fit into the buffer */
        if (buffer && buflen) {
-               ctx.buffer = (key_serial_t __user *)buffer;
+               ctx.buffer = (key_serial_t *)buffer;
                ctx.buflen = buflen;
                ctx.count = 0;
                ret = assoc_array_iterate(&keyring->keys,
@@ -881,7 +881,7 @@ found:
  *
  * Keys are matched to the type provided and are then filtered by the match
  * function, which is given the description to use in any way it sees fit.  The
- * match function may use any attributes of a key that it wishes to to
+ * match function may use any attributes of a key that it wishes to
  * determine the match.  Normally the match function from the key type would be
  * used.
  *
@@ -1204,7 +1204,7 @@ static int keyring_detect_cycle_iterator(const void *object,
 }
 
 /*
- * See if a cycle will will be created by inserting acyclic tree B in acyclic
+ * See if a cycle will be created by inserting acyclic tree B in acyclic
  * tree A at the topmost level (ie: as a direct child of A).
  *
  * Since we are adding B to A at the top level, checking for cycles should just
index 1fe8b934f656f9b966ae0584436070fea7f911d9..e3d79a7b6db661113eac26533d1182d8da5bf3fd 100644 (file)
@@ -783,6 +783,7 @@ try_again:
                                if (need_perm != KEY_AUTHTOKEN_OVERRIDE &&
                                    need_perm != KEY_DEFER_PERM_CHECK)
                                        goto invalid_key;
+                               break;
                        case 0:
                                break;
                        }
index 7b09cfbae94f7ce49cf25b02a58020792f9f9cc6..31f0e9ae4c6ec09aba16ace44cf460fbd9d4bc84 100644 (file)
@@ -2084,6 +2084,13 @@ int security_watch_key(struct key *key)
 }
 #endif
 
+#ifdef CONFIG_MOUNT_NOTIFICATIONS
+int security_watch_mount(struct watch *watch, struct path *path)
+{
+       return call_int_hook(watch_mount, 0, watch, path);
+}
+#endif
+
 #ifdef CONFIG_SECURITY_NETWORK
 
 int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk)
index 6852668f1bcb47633bb366460f2b87c0fd910cb5..18cdd67bcb866847002accebd56ffd2e95bc89c6 100644 (file)
@@ -2600,7 +2600,8 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_AMD_SB },
        /* ATI HDMI */
        { PCI_DEVICE(0x1002, 0x0002),
-         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+         AZX_DCAPS_PM_RUNTIME },
        { PCI_DEVICE(0x1002, 0x1308),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0x157a),
@@ -2662,9 +2663,11 @@ static const struct pci_device_id azx_ids[] = {
        { PCI_DEVICE(0x1002, 0xaab0),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaac0),
-         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+         AZX_DCAPS_PM_RUNTIME },
        { PCI_DEVICE(0x1002, 0xaac8),
-         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
+         AZX_DCAPS_PM_RUNTIME },
        { PCI_DEVICE(0x1002, 0xaad8),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS |
          AZX_DCAPS_PM_RUNTIME },
index 142373ec411adb31ed33f5a7cf0a321c72c91de8..9fe9471f4514dc61ebf97678930fe48b7b184372 100644 (file)
@@ -143,7 +143,7 @@ config SND_MCHP_SOC_SPDIFTX
          - sama7g5
 
          This S/PDIF TX driver is compliant with IEC-60958 standard and
-         includes programable User Data and Channel Status fields.
+         includes programmable User Data and Channel Status fields.
 
 config SND_MCHP_SOC_SPDIFRX
        tristate "Microchip ASoC driver for boards using S/PDIF RX"
@@ -157,5 +157,5 @@ config SND_MCHP_SOC_SPDIFRX
          - sama7g5
 
          This S/PDIF RX driver is compliant with IEC-60958 standard and
-         includes programable User Data and Channel Status fields.
+         includes programmable User Data and Channel Status fields.
 endif
index ba4eb54aafcb9f4e181d9692fe331863537b6bc3..9bf6bfdaf11e4e6fc8f30403700855a9f38f46bd 100644 (file)
@@ -457,7 +457,7 @@ config SND_SOC_ADAU7118_HW
        help
          Enable support for the Analog Devices ADAU7118 8 Channel PDM-to-I2S/TDM
          Converter. In this mode, the device works in standalone mode which
-         means that there is no bus to comunicate with it. Stereo mode is not
+         means that there is no bus to communicate with it. Stereo mode is not
          supported in this mode.
 
          To compile this driver as a module, choose M here: the module
index 92921e34f9486708242b60d6dfd02828c3c8f834..85f6865019d4a82d9ac924294c32452c86f8d028 100644 (file)
 #include <sound/tlv.h>
 #include "max98373.h"
 
+static const u32 max98373_i2c_cache_reg[] = {
+       MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK,
+       MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK,
+       MAX98373_R20B6_BDE_CUR_STATE_READBACK,
+};
+
 static struct reg_default max98373_reg[] = {
        {MAX98373_R2000_SW_RESET, 0x00},
        {MAX98373_R2001_INT_RAW1, 0x00},
@@ -472,6 +478,11 @@ static struct snd_soc_dai_driver max98373_dai[] = {
 static int max98373_suspend(struct device *dev)
 {
        struct max98373_priv *max98373 = dev_get_drvdata(dev);
+       int i;
+
+       /* cache feedback register values before suspend */
+       for (i = 0; i < max98373->cache_num; i++)
+               regmap_read(max98373->regmap, max98373->cache[i].reg, &max98373->cache[i].val);
 
        regcache_cache_only(max98373->regmap, true);
        regcache_mark_dirty(max98373->regmap);
@@ -509,6 +520,7 @@ static int max98373_i2c_probe(struct i2c_client *i2c,
 {
        int ret = 0;
        int reg = 0;
+       int i;
        struct max98373_priv *max98373 = NULL;
 
        max98373 = devm_kzalloc(&i2c->dev, sizeof(*max98373), GFP_KERNEL);
@@ -534,6 +546,14 @@ static int max98373_i2c_probe(struct i2c_client *i2c,
                return ret;
        }
 
+       max98373->cache_num = ARRAY_SIZE(max98373_i2c_cache_reg);
+       max98373->cache = devm_kcalloc(&i2c->dev, max98373->cache_num,
+                                      sizeof(*max98373->cache),
+                                      GFP_KERNEL);
+
+       for (i = 0; i < max98373->cache_num; i++)
+               max98373->cache[i].reg = max98373_i2c_cache_reg[i];
+
        /* voltage/current slot & gpio configuration */
        max98373_slot_config(&i2c->dev, max98373);
 
index ec2e79c57357729835f6165b9eeb999832656135..b8d471d79e939e885553691565cbf036d3afaa4c 100644 (file)
@@ -23,6 +23,12 @@ struct sdw_stream_data {
        struct sdw_stream_runtime *sdw_stream;
 };
 
+static const u32 max98373_sdw_cache_reg[] = {
+       MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK,
+       MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK,
+       MAX98373_R20B6_BDE_CUR_STATE_READBACK,
+};
+
 static struct reg_default max98373_reg[] = {
        {MAX98373_R0040_SCP_INIT_STAT_1, 0x00},
        {MAX98373_R0041_SCP_INIT_MASK_1, 0x00},
@@ -245,6 +251,11 @@ static const struct regmap_config max98373_sdw_regmap = {
 static __maybe_unused int max98373_suspend(struct device *dev)
 {
        struct max98373_priv *max98373 = dev_get_drvdata(dev);
+       int i;
+
+       /* cache feedback register values before suspend */
+       for (i = 0; i < max98373->cache_num; i++)
+               regmap_read(max98373->regmap, max98373->cache[i].reg, &max98373->cache[i].val);
 
        regcache_cache_only(max98373->regmap, true);
 
@@ -757,6 +768,7 @@ static int max98373_init(struct sdw_slave *slave, struct regmap *regmap)
 {
        struct max98373_priv *max98373;
        int ret;
+       int i;
        struct device *dev = &slave->dev;
 
        /*  Allocate and assign private driver data structure  */
@@ -768,6 +780,14 @@ static int max98373_init(struct sdw_slave *slave, struct regmap *regmap)
        max98373->regmap = regmap;
        max98373->slave = slave;
 
+       max98373->cache_num = ARRAY_SIZE(max98373_sdw_cache_reg);
+       max98373->cache = devm_kcalloc(dev, max98373->cache_num,
+                                      sizeof(*max98373->cache),
+                                      GFP_KERNEL);
+
+       for (i = 0; i < max98373->cache_num; i++)
+               max98373->cache[i].reg = max98373_sdw_cache_reg[i];
+
        /* Read voltage and slot configuration */
        max98373_slot_config(dev, max98373);
 
index 929bb1798c43f9a5c03af199052a9682aae8ae34..31d571d4fac1ce590e51386759ede696ab7ddba6 100644 (file)
@@ -168,6 +168,31 @@ static SOC_ENUM_SINGLE_DECL(max98373_adc_samplerate_enum,
                            MAX98373_R2051_MEAS_ADC_SAMPLING_RATE, 0,
                            max98373_ADC_samplerate_text);
 
+static int max98373_feedback_get(struct snd_kcontrol *kcontrol,
+                                struct snd_ctl_elem_value *ucontrol)
+{
+       struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+       struct soc_mixer_control *mc =
+               (struct soc_mixer_control *)kcontrol->private_value;
+       struct max98373_priv *max98373 = snd_soc_component_get_drvdata(component);
+       int i;
+
+       if (snd_soc_component_get_bias_level(component) == SND_SOC_BIAS_OFF) {
+               /*
+                * Register values will be cached before suspend. The cached value
+                * will be a valid value and userspace will happy with that.
+                */
+               for (i = 0; i < max98373->cache_num; i++) {
+                       if (mc->reg == max98373->cache[i].reg) {
+                               ucontrol->value.integer.value[0] = max98373->cache[i].val;
+                               return 0;
+                       }
+               }
+       }
+
+       return snd_soc_put_volsw(kcontrol, ucontrol);
+}
+
 static const struct snd_kcontrol_new max98373_snd_controls[] = {
 SOC_SINGLE("Digital Vol Sel Switch", MAX98373_R203F_AMP_DSP_CFG,
        MAX98373_AMP_VOL_SEL_SHIFT, 1, 0),
@@ -209,8 +234,10 @@ SOC_SINGLE("ADC PVDD FLT Switch", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
        MAX98373_FLT_EN_SHIFT, 1, 0),
 SOC_SINGLE("ADC TEMP FLT Switch", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
        MAX98373_FLT_EN_SHIFT, 1, 0),
-SOC_SINGLE("ADC PVDD", MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0, 0xFF, 0),
-SOC_SINGLE("ADC TEMP", MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0, 0xFF, 0),
+SOC_SINGLE_EXT("ADC PVDD", MAX98373_R2054_MEAS_ADC_PVDD_CH_READBACK, 0, 0xFF, 0,
+       max98373_feedback_get, NULL),
+SOC_SINGLE_EXT("ADC TEMP", MAX98373_R2055_MEAS_ADC_THERM_CH_READBACK, 0, 0xFF, 0,
+       max98373_feedback_get, NULL),
 SOC_SINGLE("ADC PVDD FLT Coeff", MAX98373_R2052_MEAS_ADC_PVDD_FLT_CFG,
        0, 0x3, 0),
 SOC_SINGLE("ADC TEMP FLT Coeff", MAX98373_R2053_MEAS_ADC_THERM_FLT_CFG,
@@ -226,7 +253,8 @@ SOC_SINGLE("BDE LVL1 Thresh", MAX98373_R2097_BDE_L1_THRESH, 0, 0xFF, 0),
 SOC_SINGLE("BDE LVL2 Thresh", MAX98373_R2098_BDE_L2_THRESH, 0, 0xFF, 0),
 SOC_SINGLE("BDE LVL3 Thresh", MAX98373_R2099_BDE_L3_THRESH, 0, 0xFF, 0),
 SOC_SINGLE("BDE LVL4 Thresh", MAX98373_R209A_BDE_L4_THRESH, 0, 0xFF, 0),
-SOC_SINGLE("BDE Active Level", MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0, 8, 0),
+SOC_SINGLE_EXT("BDE Active Level", MAX98373_R20B6_BDE_CUR_STATE_READBACK, 0, 8, 0,
+       max98373_feedback_get, NULL),
 SOC_SINGLE("BDE Clip Mode Switch", MAX98373_R2092_BDE_CLIPPER_MODE, 0, 1, 0),
 SOC_SINGLE("BDE Thresh Hysteresis", MAX98373_R209B_BDE_THRESH_HYST, 0, 0xFF, 0),
 SOC_SINGLE("BDE Hold Time", MAX98373_R2090_BDE_LVL_HOLD, 0, 0xFF, 0),
index 4ab29b9d51c74edd602d036e13c110b14ceb56f1..71f5a5228f34b1629644723bfb5bd47808bc6900 100644 (file)
 /* MAX98373_R2000_SW_RESET */
 #define MAX98373_SOFT_RESET (0x1 << 0)
 
+struct max98373_cache {
+       u32 reg;
+       u32 val;
+};
+
 struct max98373_priv {
        struct regmap *regmap;
        int reset_gpio;
@@ -212,6 +217,9 @@ struct max98373_priv {
        bool interleave_mode;
        unsigned int ch_size;
        bool tdm_mode;
+       /* cache for reading a valid fake feedback value */
+       struct max98373_cache *cache;
+       int cache_num;
        /* variables to support soundwire */
        struct sdw_slave *slave;
        bool hw_init;
index 5771c02c34596f5d5cc4f06f243f57f771ef95cc..85f744184a60fe4302baf6e9a58f0190d23ab5a3 100644 (file)
@@ -462,6 +462,8 @@ static int rt711_set_amp_gain_put(struct snd_kcontrol *kcontrol,
        unsigned int read_ll, read_rl;
        int i;
 
+       mutex_lock(&rt711->calibrate_mutex);
+
        /* Can't use update bit function, so read the original value first */
        addr_h = mc->reg;
        addr_l = mc->rreg;
@@ -547,6 +549,8 @@ static int rt711_set_amp_gain_put(struct snd_kcontrol *kcontrol,
        if (dapm->bias_level <= SND_SOC_BIAS_STANDBY)
                regmap_write(rt711->regmap,
                                RT711_SET_AUDIO_POWER_STATE, AC_PWRST_D3);
+
+       mutex_unlock(&rt711->calibrate_mutex);
        return 0;
 }
 
@@ -859,9 +863,11 @@ static int rt711_set_bias_level(struct snd_soc_component *component,
                break;
 
        case SND_SOC_BIAS_STANDBY:
+               mutex_lock(&rt711->calibrate_mutex);
                regmap_write(rt711->regmap,
                        RT711_SET_AUDIO_POWER_STATE,
                        AC_PWRST_D3);
+               mutex_unlock(&rt711->calibrate_mutex);
                break;
 
        default:
index 2c2a76a719401daa2751a1d72391164fe101f205..ede4a9ad1054cee66e49b2f1e85de85048103153 100644 (file)
@@ -164,6 +164,7 @@ static int imx_hdmi_probe(struct platform_device *pdev)
 
        if ((hdmi_out && hdmi_in) || (!hdmi_out && !hdmi_in)) {
                dev_err(&pdev->dev, "Invalid HDMI DAI link\n");
+               ret = -EINVAL;
                goto fail;
        }
 
index c55d1239e705b50e639cb543bb29a383510748dd..c763bfeb1f38fbfe89f3462a6ad866115ecc4d1f 100644 (file)
@@ -189,6 +189,7 @@ static struct platform_driver haswell_audio = {
        .probe = haswell_audio_probe,
        .driver = {
                .name = "haswell-audio",
+               .pm = &snd_soc_pm_ops,
        },
 };
 
index fcd8dff27ae8e837f88ab83e00920b0eded02e31..1275c149acc021986feb2048c12c63d91d75fe89 100644 (file)
@@ -224,6 +224,7 @@ static int cnl_set_dsp_D0(struct sst_dsp *ctx, unsigned int core_id)
                                "dsp boot timeout, status=%#x error=%#x\n",
                                sst_dsp_shim_read(ctx, CNL_ADSP_FW_STATUS),
                                sst_dsp_shim_read(ctx, CNL_ADSP_ERROR_CODE));
+                       ret = -ETIMEDOUT;
                        goto err;
                }
        } else {
index c8664ab80d45ad0b0774370221938a9d79f75620..87cac440b36933dc6180c13b9dec766d7b0725e5 100644 (file)
@@ -467,8 +467,20 @@ static int axg_tdm_iface_set_bias_level(struct snd_soc_component *component,
        return ret;
 }
 
+static const struct snd_soc_dapm_widget axg_tdm_iface_dapm_widgets[] = {
+       SND_SOC_DAPM_SIGGEN("Playback Signal"),
+};
+
+static const struct snd_soc_dapm_route axg_tdm_iface_dapm_routes[] = {
+       { "Loopback", NULL, "Playback Signal" },
+};
+
 static const struct snd_soc_component_driver axg_tdm_iface_component_drv = {
-       .set_bias_level = axg_tdm_iface_set_bias_level,
+       .dapm_widgets           = axg_tdm_iface_dapm_widgets,
+       .num_dapm_widgets       = ARRAY_SIZE(axg_tdm_iface_dapm_widgets),
+       .dapm_routes            = axg_tdm_iface_dapm_routes,
+       .num_dapm_routes        = ARRAY_SIZE(axg_tdm_iface_dapm_routes),
+       .set_bias_level         = axg_tdm_iface_set_bias_level,
 };
 
 static const struct of_device_id axg_tdm_iface_of_match[] = {
index 88ed95ae886bb506cda8c8ac74dae2babcd568c0..b4faf9d5c1aad15edb2ab34cf13b87e926c7fc87 100644 (file)
@@ -224,15 +224,6 @@ static const struct axg_tdm_formatter_ops axg_tdmin_ops = {
 };
 
 static const struct axg_tdm_formatter_driver axg_tdmin_drv = {
-       .component_drv  = &axg_tdmin_component_drv,
-       .regmap_cfg     = &axg_tdmin_regmap_cfg,
-       .ops            = &axg_tdmin_ops,
-       .quirks         = &(const struct axg_tdm_formatter_hw) {
-               .skew_offset    = 2,
-       },
-};
-
-static const struct axg_tdm_formatter_driver g12a_tdmin_drv = {
        .component_drv  = &axg_tdmin_component_drv,
        .regmap_cfg     = &axg_tdmin_regmap_cfg,
        .ops            = &axg_tdmin_ops,
@@ -247,10 +238,10 @@ static const struct of_device_id axg_tdmin_of_match[] = {
                .data = &axg_tdmin_drv,
        }, {
                .compatible = "amlogic,g12a-tdmin",
-               .data = &g12a_tdmin_drv,
+               .data = &axg_tdmin_drv,
        }, {
                .compatible = "amlogic,sm1-tdmin",
-               .data = &g12a_tdmin_drv,
+               .data = &axg_tdmin_drv,
        }, {}
 };
 MODULE_DEVICE_TABLE(of, axg_tdmin_of_match);
index af684fd19ab9e7b50f8f3147bb32e66ff1c2c2aa..c5e99c2d89c7ed65b3ae4eacd69a449df5b37d4b 100644 (file)
@@ -270,18 +270,6 @@ static int lpass_cpu_daiops_trigger(struct snd_pcm_substream *substream,
        struct lpaif_i2sctl *i2sctl = drvdata->i2sctl;
        unsigned int id = dai->driver->id;
        int ret = -EINVAL;
-       unsigned int val = 0;
-
-       ret = regmap_read(drvdata->lpaif_map,
-                               LPAIF_I2SCTL_REG(drvdata->variant, dai->driver->id), &val);
-       if (ret) {
-               dev_err(dai->dev, "error reading from i2sctl reg: %d\n", ret);
-               return ret;
-       }
-       if (val == LPAIF_I2SCTL_RESET_STATE) {
-               dev_err(dai->dev, "error in i2sctl register state\n");
-               return -ENOTRECOVERABLE;
-       }
 
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
@@ -454,20 +442,16 @@ static bool lpass_cpu_regmap_volatile(struct device *dev, unsigned int reg)
        struct lpass_variant *v = drvdata->variant;
        int i;
 
-       for (i = 0; i < v->i2s_ports; ++i)
-               if (reg == LPAIF_I2SCTL_REG(v, i))
-                       return true;
        for (i = 0; i < v->irq_ports; ++i)
                if (reg == LPAIF_IRQSTAT_REG(v, i))
                        return true;
 
        for (i = 0; i < v->rdma_channels; ++i)
-               if (reg == LPAIF_RDMACURR_REG(v, i) || reg == LPAIF_RDMACTL_REG(v, i))
+               if (reg == LPAIF_RDMACURR_REG(v, i))
                        return true;
 
        for (i = 0; i < v->wrdma_channels; ++i)
-               if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start) ||
-                       reg == LPAIF_WRDMACTL_REG(v, i + v->wrdma_channel_start))
+               if (reg == LPAIF_WRDMACURR_REG(v, i + v->wrdma_channel_start))
                        return true;
 
        return false;
index 80b09dede5f9cbefb4f6a6a7d968ea2166ada871..d1c248590f3ab6da7622dcf8d1909d74c340c88a 100644 (file)
@@ -452,7 +452,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
        unsigned int reg_irqclr = 0, val_irqclr = 0;
        unsigned int  reg_irqen = 0, val_irqen = 0, val_mask = 0;
        unsigned int dai_id = cpu_dai->driver->id;
-       unsigned int dma_ctrl_reg = 0;
 
        ch = pcm_data->dma_ch;
        if (dir ==  SNDRV_PCM_STREAM_PLAYBACK) {
@@ -469,17 +468,7 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                id = pcm_data->dma_ch - v->wrdma_channel_start;
                map = drvdata->lpaif_map;
        }
-       ret = regmap_read(map, LPAIF_DMACTL_REG(v, ch, dir, dai_id), &dma_ctrl_reg);
-       if (ret) {
-               dev_err(soc_runtime->dev, "error reading from rdmactl reg: %d\n", ret);
-               return ret;
-       }
 
-       if (dma_ctrl_reg == LPAIF_DMACTL_RESET_STATE ||
-               dma_ctrl_reg == LPAIF_DMACTL_RESET_STATE + 1) {
-               dev_err(soc_runtime->dev, "error in rdmactl register state\n");
-               return -ENOTRECOVERABLE;
-       }
        switch (cmd) {
        case SNDRV_PCM_TRIGGER_START:
        case SNDRV_PCM_TRIGGER_RESUME:
@@ -500,7 +489,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                                        "error writing to rdmactl reg: %d\n", ret);
                                return ret;
                        }
-                       map = drvdata->hdmiif_map;
                        reg_irqclr = LPASS_HDMITX_APP_IRQCLEAR_REG(v);
                        val_irqclr = (LPAIF_IRQ_ALL(ch) |
                                        LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
@@ -519,7 +507,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                        break;
                case MI2S_PRIMARY:
                case MI2S_SECONDARY:
-                       map = drvdata->lpaif_map;
                        reg_irqclr = LPAIF_IRQCLEAR_REG(v, LPAIF_IRQ_PORT_HOST);
                        val_irqclr = LPAIF_IRQ_ALL(ch);
 
@@ -563,7 +550,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                                        "error writing to rdmactl reg: %d\n", ret);
                                return ret;
                        }
-                       map = drvdata->hdmiif_map;
                        reg_irqen = LPASS_HDMITX_APP_IRQEN_REG(v);
                        val_mask = (LPAIF_IRQ_ALL(ch) |
                                        LPAIF_IRQ_HDMI_REQ_ON_PRELOAD(ch) |
@@ -573,7 +559,6 @@ static int lpass_platform_pcmops_trigger(struct snd_soc_component *component,
                        break;
                case MI2S_PRIMARY:
                case MI2S_SECONDARY:
-                       map = drvdata->lpaif_map;
                        reg_irqen = LPAIF_IRQEN_REG(v, LPAIF_IRQ_PORT_HOST);
                        val_mask = LPAIF_IRQ_ALL(ch);
                        val_irqen = 0;
@@ -838,6 +823,39 @@ static void lpass_platform_pcm_free(struct snd_soc_component *component,
        }
 }
 
+static int lpass_platform_pcmops_suspend(struct snd_soc_component *component)
+{
+       struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+       struct regmap *map;
+       unsigned int dai_id = component->id;
+
+       if (dai_id == LPASS_DP_RX)
+               map = drvdata->hdmiif_map;
+       else
+               map = drvdata->lpaif_map;
+
+       regcache_cache_only(map, true);
+       regcache_mark_dirty(map);
+
+       return 0;
+}
+
+static int lpass_platform_pcmops_resume(struct snd_soc_component *component)
+{
+       struct lpass_data *drvdata = snd_soc_component_get_drvdata(component);
+       struct regmap *map;
+       unsigned int dai_id = component->id;
+
+       if (dai_id == LPASS_DP_RX)
+               map = drvdata->hdmiif_map;
+       else
+               map = drvdata->lpaif_map;
+
+       regcache_cache_only(map, false);
+       return regcache_sync(map);
+}
+
+
 static const struct snd_soc_component_driver lpass_component_driver = {
        .name           = DRV_NAME,
        .open           = lpass_platform_pcmops_open,
@@ -850,6 +868,8 @@ static const struct snd_soc_component_driver lpass_component_driver = {
        .mmap           = lpass_platform_pcmops_mmap,
        .pcm_construct  = lpass_platform_pcm_new,
        .pcm_destruct   = lpass_platform_pcm_free,
+       .suspend                = lpass_platform_pcmops_suspend,
+       .resume                 = lpass_platform_pcmops_resume,
 
 };
 
index b9aacf3d3b29c9b0de99830a568371edbffffbe8..abdfd9cf91e2a06e2db05193dbe3ab0a31cc2307 100644 (file)
@@ -366,25 +366,27 @@ void rsnd_adg_clk_control(struct rsnd_priv *priv, int enable)
        struct rsnd_adg *adg = rsnd_priv_to_adg(priv);
        struct device *dev = rsnd_priv_to_dev(priv);
        struct clk *clk;
-       int i, ret;
+       int i;
 
        for_each_rsnd_clk(clk, adg, i) {
-               ret = 0;
                if (enable) {
-                       ret = clk_prepare_enable(clk);
+                       int ret = clk_prepare_enable(clk);
 
                        /*
                         * We shouldn't use clk_get_rate() under
                         * atomic context. Let's keep it when
                         * rsnd_adg_clk_enable() was called
                         */
-                       adg->clk_rate[i] = clk_get_rate(adg->clk[i]);
+                       adg->clk_rate[i] = 0;
+                       if (ret < 0)
+                               dev_warn(dev, "can't use clk %d\n", i);
+                       else
+                               adg->clk_rate[i] = clk_get_rate(clk);
                } else {
-                       clk_disable_unprepare(clk);
+                       if (adg->clk_rate[i])
+                               clk_disable_unprepare(clk);
+                       adg->clk_rate[i] = 0;
                }
-
-               if (ret < 0)
-                       dev_warn(dev, "can't use clk %d\n", i);
        }
 }
 
index 9f0c86cbdcca2160d4e75c0db226f36b5b86a9aa..2b75d0139e478c6f1feaff88601ecb15933c6221 100644 (file)
@@ -2486,6 +2486,7 @@ void snd_soc_dapm_free_widget(struct snd_soc_dapm_widget *w)
        enum snd_soc_dapm_direction dir;
 
        list_del(&w->list);
+       list_del(&w->dirty);
        /*
         * remove source and sink paths associated to this widget.
         * While removing the path, remove reference to it from both
index 031dad5fc4c701b8107828927e34ae7c2513eb63..3e8b6c035ce3ffba2cbe48c7b69597d3a71ecc0b 100644 (file)
@@ -122,7 +122,7 @@ config SND_SOC_SOF_DEBUG_XRUN_STOP
        bool "SOF stop on XRUN"
        help
          This option forces PCMs to stop on any XRUN event. This is useful to
-         preserve any trace data ond pipeline status prior to the XRUN.
+         preserve any trace data and pipeline status prior to the XRUN.
          Say Y if you are debugging SOF FW pipeline XRUNs.
          If unsure select "N".
 
index afbab4aeef3c894546e95d44f9d9e539e50ae945..8a917cb4426a05e30f718764485025ffab99ed39 100644 (file)
@@ -77,8 +77,10 @@ TARGETS += zram
 TARGETS_HOTPLUG = cpu-hotplug
 TARGETS_HOTPLUG += memory-hotplug
 
-# User can optionally provide a TARGETS skiplist.
-SKIP_TARGETS ?=
+# User can optionally provide a TARGETS skiplist.  By default we skip
+# BPF since it has cutting edge build time dependencies which require
+# more effort to install.
+SKIP_TARGETS ?= bpf
 ifneq ($(SKIP_TARGETS),)
        TMP := $(filter-out $(SKIP_TARGETS), $(TARGETS))
        override TARGETS := $(TMP)
index 8c33e999319a9aa6549f4d91e6e622cd97d945fd..c51df6b91befe50b6740e6f25c05d503885b7c1f 100644 (file)
@@ -121,6 +121,9 @@ VMLINUX_BTF_PATHS ?= $(if $(O),$(O)/vmlinux)                                \
                     /sys/kernel/btf/vmlinux                            \
                     /boot/vmlinux-$(shell uname -r)
 VMLINUX_BTF ?= $(abspath $(firstword $(wildcard $(VMLINUX_BTF_PATHS))))
+ifeq ($(VMLINUX_BTF),)
+$(error Cannot find a vmlinux for VMLINUX_BTF at any of "$(VMLINUX_BTF_PATHS)")
+endif
 
 # Define simple and short `make test_progs`, `make test_sysctl`, etc targets
 # to build individual tests.
index 014dedaa4dd28f342244d4ea6227e3f7aac361fc..1e722ee76b1fca01b4539649b1545d71179897b1 100644 (file)
@@ -715,7 +715,7 @@ static void worker_pkt_dump(void)
                int payload = *((uint32_t *)(pkt_buf[iter]->payload + PKT_HDR_SIZE));
 
                if (payload == EOT) {
-                       ksft_print_msg("End-of-tranmission frame received\n");
+                       ksft_print_msg("End-of-transmission frame received\n");
                        fprintf(stdout, "---------------------------------------\n");
                        break;
                }
@@ -747,7 +747,7 @@ static void worker_pkt_validate(void)
                        }
 
                        if (payloadseqnum == EOT) {
-                               ksft_print_msg("End-of-tranmission frame received: PASS\n");
+                               ksft_print_msg("End-of-transmission frame received: PASS\n");
                                sigvar = 1;
                                break;
                        }
index 5eb64d41e54199b47c0b05a1db9f5a37979c17ca..a8dc51af5a9c0c5e5e897163191ebd6e06d9b865 100644 (file)
@@ -1,5 +1,8 @@
 # SPDX-License-Identifier: GPL-2.0-only
 vdso_test
+vdso_test_abi
+vdso_test_clock_getres
+vdso_test_correctness
 vdso_test_gettimeofday
 vdso_test_getcpu
 vdso_standalone_test_x86
index 5029ef9b228c3867efa13d2f81341ba3412ac3a2..c4aea794725a7e502b1334efdfdc512446b40c52 100644 (file)
@@ -349,7 +349,7 @@ static void test_one_clock_gettime64(int clock, const char *name)
                return;
        }
 
-       printf("\t%llu.%09ld %llu.%09ld %llu.%09ld\n",
+       printf("\t%llu.%09lld %llu.%09lld %llu.%09lld\n",
               (unsigned long long)start.tv_sec, start.tv_nsec,
               (unsigned long long)vdso.tv_sec, vdso.tv_nsec,
               (unsigned long long)end.tv_sec, end.tv_nsec);
index b50c2085c1ac0f1e50abc219a2bdab60b2b790bf..fe07d97df9fa89044d6493452226b0d31c3534c1 100644 (file)
@@ -1,5 +1,4 @@
 CONFIG_LOCALVERSION="-debug"
-CONFIG_ENABLE_MUST_CHECK=y
 CONFIG_FRAME_POINTER=y
 CONFIG_STACK_VALIDATION=y
 CONFIG_DEBUG_KERNEL=y