From 498c48c66eb600535f1221652509eefb2dce7770 Mon Sep 17 00:00:00 2001 From: Kairui Song Date: Thu, 30 Jan 2025 19:51:31 +0800 Subject: [PATCH 01/16] mm, swap: fix reclaim offset calculation error during allocation There is a code error that will cause the swap entry allocator to reclaim and check the whole cluster with an unexpected tail offset instead of the part that needs to be reclaimed. This may cause corruption of the swap map, so fix it. Link: https://lkml.kernel.org/r/20250130115131.37777-1-ryncsn@gmail.com Fixes: 3b644773eefd ("mm, swap: reduce contention on device lock") Signed-off-by: Kairui Song Cc: Chris Li Signed-off-by: Andrew Morton --- mm/swapfile.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/swapfile.c b/mm/swapfile.c index 6e867c16ea93..ba19430dd4ea 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -794,7 +794,7 @@ static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim)) continue; if (need_reclaim) { - ret = cluster_reclaim_range(si, ci, start, end); + ret = cluster_reclaim_range(si, ci, offset, offset + nr_pages); /* * Reclaim drops ci->lock and cluster could be used * by another order. Not checking flag as off-list -- 2.51.0 From 1aaf8c122918aa8897605a9aa1e8ed6600d6f930 Mon Sep 17 00:00:00 2001 From: Zhaoyang Huang Date: Tue, 21 Jan 2025 10:01:59 +0800 Subject: [PATCH 02/16] mm: gup: fix infinite loop within __get_longterm_locked We can run into an infinite loop in __get_longterm_locked() when collect_longterm_unpinnable_folios() finds only folios that are isolated from the LRU or were never added to the LRU. This can happen when all folios to be pinned are never added to the LRU, for example when vm_ops->fault allocated pages using cma_alloc() and never added them to the LRU. Fix it by simply taking a look at the list in the single caller, to see if anything was added. [zhaoyang.huang@unisoc.com: move definition of local] Link: https://lkml.kernel.org/r/20250122012604.3654667-1-zhaoyang.huang@unisoc.com Link: https://lkml.kernel.org/r/20250121020159.3636477-1-zhaoyang.huang@unisoc.com Fixes: 67e139b02d99 ("mm/gup.c: refactor check_and_migrate_movable_pages()") Signed-off-by: Zhaoyang Huang Reviewed-by: John Hubbard Reviewed-by: David Hildenbrand Suggested-by: David Hildenbrand Acked-by: David Hildenbrand Cc: Aijun Sun Cc: Alistair Popple Cc: Signed-off-by: Andrew Morton --- mm/gup.c | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/mm/gup.c b/mm/gup.c index 9aaf338cc1f4..3883b307780e 100644 --- a/mm/gup.c +++ b/mm/gup.c @@ -2320,13 +2320,13 @@ static void pofs_unpin(struct pages_or_folios *pofs) /* * Returns the number of collected folios. Return value is always >= 0. */ -static unsigned long collect_longterm_unpinnable_folios( +static void collect_longterm_unpinnable_folios( struct list_head *movable_folio_list, struct pages_or_folios *pofs) { - unsigned long i, collected = 0; struct folio *prev_folio = NULL; bool drain_allow = true; + unsigned long i; for (i = 0; i < pofs->nr_entries; i++) { struct folio *folio = pofs_get_folio(pofs, i); @@ -2338,8 +2338,6 @@ static unsigned long collect_longterm_unpinnable_folios( if (folio_is_longterm_pinnable(folio)) continue; - collected++; - if (folio_is_device_coherent(folio)) continue; @@ -2361,8 +2359,6 @@ static unsigned long collect_longterm_unpinnable_folios( NR_ISOLATED_ANON + folio_is_file_lru(folio), folio_nr_pages(folio)); } - - return collected; } /* @@ -2439,11 +2435,9 @@ static long check_and_migrate_movable_pages_or_folios(struct pages_or_folios *pofs) { LIST_HEAD(movable_folio_list); - unsigned long collected; - collected = collect_longterm_unpinnable_folios(&movable_folio_list, - pofs); - if (!collected) + collect_longterm_unpinnable_folios(&movable_folio_list, pofs); + if (list_empty(&movable_folio_list)) return 0; return migrate_longterm_unpinnable_folios(&movable_folio_list, pofs); -- 2.51.0 From 76e961157e078bc5d3cd2df08317e00b00a829eb Mon Sep 17 00:00:00 2001 From: "Ritesh Harjani (IBM)" Date: Sat, 11 Jan 2025 16:36:55 +0530 Subject: [PATCH 03/16] mm/hugetlb: fix hugepage allocation for interleaved memory nodes gather_bootmem_prealloc() assumes the start nid as 0 and size as num_node_state(N_MEMORY). That means in case if memory attached numa nodes are interleaved, then gather_bootmem_prealloc_parallel() will fail to scan few of these nodes. Since memory attached numa nodes can be interleaved in any fashion, hence ensure that the current code checks for all numa node ids (.size = nr_node_ids). Let's still keep max_threads as N_MEMORY, so that it can distributes all nr_node_ids among the these many no. threads. e.g. qemu cmdline ======================== numa_cmd="-numa node,nodeid=1,memdev=mem1,cpus=2-3 -numa node,nodeid=0,cpus=0-1 -numa dist,src=0,dst=1,val=20" mem_cmd="-object memory-backend-ram,id=mem1,size=16G" w/o this patch for cmdline (default_hugepagesz=1GB hugepagesz=1GB hugepages=2): ========================== ~ # cat /proc/meminfo |grep -i huge AnonHugePages: 0 kB ShmemHugePages: 0 kB FileHugePages: 0 kB HugePages_Total: 0 HugePages_Free: 0 HugePages_Rsvd: 0 HugePages_Surp: 0 Hugepagesize: 1048576 kB Hugetlb: 0 kB with this patch for cmdline (default_hugepagesz=1GB hugepagesz=1GB hugepages=2): =========================== ~ # cat /proc/meminfo |grep -i huge AnonHugePages: 0 kB ShmemHugePages: 0 kB FileHugePages: 0 kB HugePages_Total: 2 HugePages_Free: 2 HugePages_Rsvd: 0 HugePages_Surp: 0 Hugepagesize: 1048576 kB Hugetlb: 2097152 kB Link: https://lkml.kernel.org/r/f8d8dad3a5471d284f54185f65d575a6aaab692b.1736592534.git.ritesh.list@gmail.com Fixes: b78b27d02930 ("hugetlb: parallelize 1G hugetlb initialization") Signed-off-by: Ritesh Harjani (IBM) Reported-by: Pavithra Prakash Suggested-by: Muchun Song Tested-by: Sourabh Jain Reviewed-by: Luiz Capitulino Acked-by: David Rientjes Cc: Donet Tom Cc: Gang Li Cc: Daniel Jordan Cc: Signed-off-by: Andrew Morton --- mm/hugetlb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 3b25b69aa94f..65068671e460 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3309,7 +3309,7 @@ static void __init gather_bootmem_prealloc(void) .thread_fn = gather_bootmem_prealloc_parallel, .fn_arg = NULL, .start = 0, - .size = num_node_state(N_MEMORY), + .size = nr_node_ids, .align = 1, .min_chunk = 1, .max_threads = num_node_state(N_MEMORY), -- 2.51.0 From e5eaa1bbe2813ac34788e485283be75f9d07137b Mon Sep 17 00:00:00 2001 From: Carlos Bilbao Date: Wed, 29 Jan 2025 19:22:44 -0600 Subject: [PATCH 04/16] mailmap, MAINTAINERS, docs: update Carlos's email address Update .mailmap to reflect my new (and final) primary email address, carlos.bilbao@kernel.org. Also update contact information in files Documentation/translations/sp_SP/index.rst and MAINTAINERS. Link: https://lkml.kernel.org/r/20250130012248.1196208-1-carlos.bilbao@kernel.org Signed-off-by: Carlos Bilbao Cc: Carlos Bilbao Cc: Jonathan Corbet Cc: Mattew Wilcox Signed-off-by: Andrew Morton --- .mailmap | 4 +++- Documentation/translations/sp_SP/index.rst | 2 +- MAINTAINERS | 8 ++++---- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/.mailmap b/.mailmap index fec6b455b576..9a270c53675b 100644 --- a/.mailmap +++ b/.mailmap @@ -148,7 +148,9 @@ Bryan Tan Cai Huoqing Can Guo Carl Huang -Carlos Bilbao +Carlos Bilbao +Carlos Bilbao +Carlos Bilbao Changbin Du Changbin Du Chao Yu diff --git a/Documentation/translations/sp_SP/index.rst b/Documentation/translations/sp_SP/index.rst index aae7018b0d1a..2b50283e1608 100644 --- a/Documentation/translations/sp_SP/index.rst +++ b/Documentation/translations/sp_SP/index.rst @@ -7,7 +7,7 @@ Traducción al español \kerneldocCJKoff -:maintainer: Carlos Bilbao +:maintainer: Carlos Bilbao .. _sp_disclaimer: diff --git a/MAINTAINERS b/MAINTAINERS index d269d3c6e317..1824df1f61f0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1090,7 +1090,7 @@ F: drivers/video/fbdev/geode/ AMD HSMP DRIVER M: Naveen Krishna Chatradhi -R: Carlos Bilbao +R: Carlos Bilbao L: platform-driver-x86@vger.kernel.org S: Maintained F: Documentation/arch/x86/amd_hsmp.rst @@ -5856,7 +5856,7 @@ F: drivers/usb/atm/cxacru.c CONFIDENTIAL COMPUTING THREAT MODEL FOR X86 VIRTUALIZATION (SNP/TDX) M: Elena Reshetova -M: Carlos Bilbao +M: Carlos Bilbao S: Maintained F: Documentation/security/snp-tdx-threat-model.rst @@ -11323,7 +11323,7 @@ S: Orphan F: drivers/video/fbdev/imsttfb.c INDEX OF FURTHER KERNEL DOCUMENTATION -M: Carlos Bilbao +M: Carlos Bilbao S: Maintained F: Documentation/process/kernel-docs.rst @@ -22205,7 +22205,7 @@ Q: http://patchwork.linuxtv.org/project/linux-media/list/ F: drivers/media/dvb-frontends/sp2* SPANISH DOCUMENTATION -M: Carlos Bilbao +M: Carlos Bilbao R: Avadhut Naik S: Maintained F: Documentation/translations/sp_SP/ -- 2.51.0 From 0ca2a41e0ccc573845428b686ff09e9322c82b16 Mon Sep 17 00:00:00 2001 From: Tamir Duberstein Date: Wed, 29 Jan 2025 16:13:49 -0500 Subject: [PATCH 05/16] MAINTAINERS: add lib/test_xarray.c Ensure test-only changes are sent to the relevant maintainer. Link: https://lkml.kernel.org/r/20250129-xarray-test-maintainer-v1-1-482e31f30f47@gmail.com Signed-off-by: Tamir Duberstein Cc: Mattew Wilcox Signed-off-by: Andrew Morton --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 1824df1f61f0..f52a004982c9 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -25734,6 +25734,7 @@ F: Documentation/core-api/xarray.rst F: include/linux/idr.h F: include/linux/xarray.h F: lib/idr.c +F: lib/test_xarray.c F: lib/xarray.c F: tools/testing/radix-tree -- 2.51.0 From 050339050f6f2b18d32a61a0f725f423804ad2a5 Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 30 Jan 2025 16:09:20 -0800 Subject: [PATCH 06/16] revert "xarray: port tests to kunit" Revert c7bb5cf9fc4e ("xarray: port tests to kunit"). It broke the build when compiing the xarray userspace test harness code. Reported-by: Sidhartha Kumar Closes: https://lkml.kernel.org/r/07cf896e-adf8-414f-a629-a808fc26014a@oracle.com Cc: David Gow Cc: Matthew Wilcox Cc: Tamir Duberstein Cc: "Liam R. Howlett" Cc: Geert Uytterhoeven Cc: Lorenzo Stoakes Signed-off-by: Andrew Morton --- arch/m68k/configs/amiga_defconfig | 1 + arch/m68k/configs/apollo_defconfig | 1 + arch/m68k/configs/atari_defconfig | 1 + arch/m68k/configs/bvme6000_defconfig | 1 + arch/m68k/configs/hp300_defconfig | 1 + arch/m68k/configs/mac_defconfig | 1 + arch/m68k/configs/multi_defconfig | 1 + arch/m68k/configs/mvme147_defconfig | 1 + arch/m68k/configs/mvme16x_defconfig | 1 + arch/m68k/configs/q40_defconfig | 1 + arch/m68k/configs/sun3_defconfig | 1 + arch/m68k/configs/sun3x_defconfig | 1 + arch/powerpc/configs/ppc64_defconfig | 1 + lib/Kconfig.debug | 18 +- lib/Makefile | 2 +- lib/test_xarray.c | 671 +++++++++++---------------- 16 files changed, 294 insertions(+), 410 deletions(-) diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 8acfa66e1095..dbf2ea561c85 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -626,6 +626,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index 35e9a0872304..b0fd199cc0a4 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -583,6 +583,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index 32891ddd3cc5..bb5b2d3b6c10 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -603,6 +603,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index ca276f0db3dd..8315a13bab73 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -575,6 +575,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index e83f14fe1a4f..350370657e5f 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -585,6 +585,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 6b58be24da79..f942b4755702 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -602,6 +602,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 0e8d24f82565..b1eaad02efab 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -689,6 +689,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index 24a7608c13ac..6309a4442bb3 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -575,6 +575,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index c415f75821f3..3feb0731f814 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -576,6 +576,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 2c715a8ff551..ea04b1b0da7d 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -592,6 +592,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 15ff37fcccbf..f52d9af92153 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -572,6 +572,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 40a44bf9f48d..f348447824da 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -573,6 +573,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index e9c46b59ebbc..465eb96c755e 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -448,6 +448,7 @@ CONFIG_TEST_PRINTF=m CONFIG_TEST_SCANF=m CONFIG_TEST_BITMAP=m CONFIG_TEST_UUID=m +CONFIG_TEST_XARRAY=m CONFIG_TEST_MAPLE_TREE=m CONFIG_TEST_RHASHTABLE=m CONFIG_TEST_IDA=m diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 775966cf6114..1af972a92d06 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -2456,22 +2456,8 @@ config TEST_BITMAP config TEST_UUID tristate "Test functions located in the uuid module at runtime" -config XARRAY_KUNIT - tristate "KUnit test XArray code at runtime" if !KUNIT_ALL_TESTS - depends on KUNIT - default KUNIT_ALL_TESTS - help - Enable this option to test the Xarray code at boot. - - KUnit tests run during boot and output the results to the debug log - in TAP format (http://testanything.org/). Only useful for kernel devs - running the KUnit test harness, and not intended for inclusion into a - production build. - - For more information on KUnit and unit tests in general please refer - to the KUnit documentation in Documentation/dev-tools/kunit/. - - If unsure, say N. +config TEST_XARRAY + tristate "Test the XArray code at runtime" config TEST_MAPLE_TREE tristate "Test the Maple Tree code at runtime or module load" diff --git a/lib/Makefile b/lib/Makefile index f1c6e9d76a7c..d5cfc7afbbb8 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -94,6 +94,7 @@ GCOV_PROFILE_test_bitmap.o := n endif obj-$(CONFIG_TEST_UUID) += test_uuid.o +obj-$(CONFIG_TEST_XARRAY) += test_xarray.o obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o obj-$(CONFIG_TEST_PARMAN) += test_parman.o obj-$(CONFIG_TEST_KMOD) += test_kmod.o @@ -372,7 +373,6 @@ CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN) obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o -obj-$(CONFIG_XARRAY_KUNIT) += test_xarray.o obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o diff --git a/lib/test_xarray.c b/lib/test_xarray.c index eab5971d0a48..6932a26f4927 100644 --- a/lib/test_xarray.c +++ b/lib/test_xarray.c @@ -6,10 +6,11 @@ * Author: Matthew Wilcox */ -#include - -#include #include +#include + +static unsigned int tests_run; +static unsigned int tests_passed; static const unsigned int order_limit = IS_ENABLED(CONFIG_XARRAY_MULTI) ? BITS_PER_LONG : 1; @@ -19,12 +20,15 @@ static const unsigned int order_limit = void xa_dump(const struct xarray *xa) { } # endif #undef XA_BUG_ON -#define XA_BUG_ON(xa, x) do { \ - if (x) { \ - KUNIT_FAIL(test, #x); \ - xa_dump(xa); \ - dump_stack(); \ - } \ +#define XA_BUG_ON(xa, x) do { \ + tests_run++; \ + if (x) { \ + printk("BUG at %s:%d\n", __func__, __LINE__); \ + xa_dump(xa); \ + dump_stack(); \ + } else { \ + tests_passed++; \ + } \ } while (0) #endif @@ -38,13 +42,13 @@ static void *xa_store_index(struct xarray *xa, unsigned long index, gfp_t gfp) return xa_store(xa, index, xa_mk_index(index), gfp); } -static void xa_insert_index(struct kunit *test, struct xarray *xa, unsigned long index) +static void xa_insert_index(struct xarray *xa, unsigned long index) { XA_BUG_ON(xa, xa_insert(xa, index, xa_mk_index(index), GFP_KERNEL) != 0); } -static void xa_alloc_index(struct kunit *test, struct xarray *xa, unsigned long index, gfp_t gfp) +static void xa_alloc_index(struct xarray *xa, unsigned long index, gfp_t gfp) { u32 id; @@ -53,7 +57,7 @@ static void xa_alloc_index(struct kunit *test, struct xarray *xa, unsigned long XA_BUG_ON(xa, id != index); } -static void xa_erase_index(struct kunit *test, struct xarray *xa, unsigned long index) +static void xa_erase_index(struct xarray *xa, unsigned long index) { XA_BUG_ON(xa, xa_erase(xa, index) != xa_mk_index(index)); XA_BUG_ON(xa, xa_load(xa, index) != NULL); @@ -79,15 +83,8 @@ static void *xa_store_order(struct xarray *xa, unsigned long index, return curr; } -static inline struct xarray *xa_param(struct kunit *test) +static noinline void check_xa_err(struct xarray *xa) { - return *(struct xarray **)test->param_value; -} - -static noinline void check_xa_err(struct kunit *test) -{ - struct xarray *xa = xa_param(test); - XA_BUG_ON(xa, xa_err(xa_store_index(xa, 0, GFP_NOWAIT)) != 0); XA_BUG_ON(xa, xa_err(xa_erase(xa, 0)) != 0); #ifndef __KERNEL__ @@ -102,10 +99,8 @@ static noinline void check_xa_err(struct kunit *test) // XA_BUG_ON(xa, xa_err(xa_store(xa, 0, xa_mk_internal(0), 0)) != -EINVAL); } -static noinline void check_xas_retry(struct kunit *test) +static noinline void check_xas_retry(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); void *entry; @@ -114,7 +109,7 @@ static noinline void check_xas_retry(struct kunit *test) rcu_read_lock(); XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != xa_mk_value(0)); - xa_erase_index(test, xa, 1); + xa_erase_index(xa, 1); XA_BUG_ON(xa, !xa_is_retry(xas_reload(&xas))); XA_BUG_ON(xa, xas_retry(&xas, NULL)); XA_BUG_ON(xa, xas_retry(&xas, xa_mk_value(0))); @@ -145,14 +140,12 @@ static noinline void check_xas_retry(struct kunit *test) } xas_unlock(&xas); - xa_erase_index(test, xa, 0); - xa_erase_index(test, xa, 1); + xa_erase_index(xa, 0); + xa_erase_index(xa, 1); } -static noinline void check_xa_load(struct kunit *test) +static noinline void check_xa_load(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned long i, j; for (i = 0; i < 1024; i++) { @@ -174,15 +167,13 @@ static noinline void check_xa_load(struct kunit *test) else XA_BUG_ON(xa, entry); } - xa_erase_index(test, xa, i); + xa_erase_index(xa, i); } XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_xa_mark_1(struct kunit *test, unsigned long index) +static noinline void check_xa_mark_1(struct xarray *xa, unsigned long index) { - struct xarray *xa = xa_param(test); - unsigned int order; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 8 : 1; @@ -202,7 +193,7 @@ static noinline void check_xa_mark_1(struct kunit *test, unsigned long index) XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_1)); /* Storing NULL clears marks, and they can't be set again */ - xa_erase_index(test, xa, index); + xa_erase_index(xa, index); XA_BUG_ON(xa, !xa_empty(xa)); XA_BUG_ON(xa, xa_get_mark(xa, index, XA_MARK_0)); xa_set_mark(xa, index, XA_MARK_0); @@ -253,17 +244,15 @@ static noinline void check_xa_mark_1(struct kunit *test, unsigned long index) XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_0)); XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_1)); XA_BUG_ON(xa, xa_get_mark(xa, next, XA_MARK_2)); - xa_erase_index(test, xa, index); - xa_erase_index(test, xa, next); + xa_erase_index(xa, index); + xa_erase_index(xa, next); XA_BUG_ON(xa, !xa_empty(xa)); } XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_xa_mark_2(struct kunit *test) +static noinline void check_xa_mark_2(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); unsigned long index; unsigned int count = 0; @@ -300,11 +289,9 @@ static noinline void check_xa_mark_2(struct kunit *test) xa_destroy(xa); } -static noinline void check_xa_mark_3(struct kunit *test) +static noinline void check_xa_mark_3(struct xarray *xa) { #ifdef CONFIG_XARRAY_MULTI - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0x41); void *entry; int count = 0; @@ -323,21 +310,19 @@ static noinline void check_xa_mark_3(struct kunit *test) #endif } -static noinline void check_xa_mark(struct kunit *test) +static noinline void check_xa_mark(struct xarray *xa) { unsigned long index; for (index = 0; index < 16384; index += 4) - check_xa_mark_1(test, index); + check_xa_mark_1(xa, index); - check_xa_mark_2(test); - check_xa_mark_3(test); + check_xa_mark_2(xa); + check_xa_mark_3(xa); } -static noinline void check_xa_shrink(struct kunit *test) +static noinline void check_xa_shrink(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 1); struct xa_node *node; unsigned int order; @@ -362,7 +347,7 @@ static noinline void check_xa_shrink(struct kunit *test) XA_BUG_ON(xa, xas_load(&xas) != NULL); xas_unlock(&xas); XA_BUG_ON(xa, xa_load(xa, 0) != xa_mk_value(0)); - xa_erase_index(test, xa, 0); + xa_erase_index(xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); for (order = 0; order < max_order; order++) { @@ -379,49 +364,45 @@ static noinline void check_xa_shrink(struct kunit *test) XA_BUG_ON(xa, xa_head(xa) == node); rcu_read_unlock(); XA_BUG_ON(xa, xa_load(xa, max + 1) != NULL); - xa_erase_index(test, xa, ULONG_MAX); + xa_erase_index(xa, ULONG_MAX); XA_BUG_ON(xa, xa->xa_head != node); - xa_erase_index(test, xa, 0); + xa_erase_index(xa, 0); } } -static noinline void check_insert(struct kunit *test) +static noinline void check_insert(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned long i; for (i = 0; i < 1024; i++) { - xa_insert_index(test, xa, i); + xa_insert_index(xa, i); XA_BUG_ON(xa, xa_load(xa, i - 1) != NULL); XA_BUG_ON(xa, xa_load(xa, i + 1) != NULL); - xa_erase_index(test, xa, i); + xa_erase_index(xa, i); } for (i = 10; i < BITS_PER_LONG; i++) { - xa_insert_index(test, xa, 1UL << i); + xa_insert_index(xa, 1UL << i); XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 1) != NULL); XA_BUG_ON(xa, xa_load(xa, (1UL << i) + 1) != NULL); - xa_erase_index(test, xa, 1UL << i); + xa_erase_index(xa, 1UL << i); - xa_insert_index(test, xa, (1UL << i) - 1); + xa_insert_index(xa, (1UL << i) - 1); XA_BUG_ON(xa, xa_load(xa, (1UL << i) - 2) != NULL); XA_BUG_ON(xa, xa_load(xa, 1UL << i) != NULL); - xa_erase_index(test, xa, (1UL << i) - 1); + xa_erase_index(xa, (1UL << i) - 1); } - xa_insert_index(test, xa, ~0UL); + xa_insert_index(xa, ~0UL); XA_BUG_ON(xa, xa_load(xa, 0UL) != NULL); XA_BUG_ON(xa, xa_load(xa, ~1UL) != NULL); - xa_erase_index(test, xa, ~0UL); + xa_erase_index(xa, ~0UL); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_cmpxchg(struct kunit *test) +static noinline void check_cmpxchg(struct xarray *xa) { - struct xarray *xa = xa_param(test); - void *FIVE = xa_mk_value(5); void *SIX = xa_mk_value(6); void *LOTS = xa_mk_value(12345678); @@ -437,16 +418,14 @@ static noinline void check_cmpxchg(struct kunit *test) XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) != -EBUSY); XA_BUG_ON(xa, xa_cmpxchg(xa, 5, FIVE, NULL, GFP_KERNEL) != FIVE); XA_BUG_ON(xa, xa_insert(xa, 5, FIVE, GFP_KERNEL) == -EBUSY); - xa_erase_index(test, xa, 12345678); - xa_erase_index(test, xa, 5); + xa_erase_index(xa, 12345678); + xa_erase_index(xa, 5); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_cmpxchg_order(struct kunit *test) +static noinline void check_cmpxchg_order(struct xarray *xa) { #ifdef CONFIG_XARRAY_MULTI - struct xarray *xa = xa_param(test); - void *FIVE = xa_mk_value(5); unsigned int i, order = 3; @@ -497,10 +476,8 @@ static noinline void check_cmpxchg_order(struct kunit *test) #endif } -static noinline void check_reserve(struct kunit *test) +static noinline void check_reserve(struct xarray *xa) { - struct xarray *xa = xa_param(test); - void *entry; unsigned long index; int count; @@ -517,7 +494,7 @@ static noinline void check_reserve(struct kunit *test) XA_BUG_ON(xa, xa_reserve(xa, 12345678, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_store_index(xa, 12345678, GFP_NOWAIT) != NULL); xa_release(xa, 12345678); - xa_erase_index(test, xa, 12345678); + xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* cmpxchg sees a reserved entry as ZERO */ @@ -525,7 +502,7 @@ static noinline void check_reserve(struct kunit *test) XA_BUG_ON(xa, xa_cmpxchg(xa, 12345678, XA_ZERO_ENTRY, xa_mk_value(12345678), GFP_NOWAIT) != NULL); xa_release(xa, 12345678); - xa_erase_index(test, xa, 12345678); + xa_erase_index(xa, 12345678); XA_BUG_ON(xa, !xa_empty(xa)); /* xa_insert treats it as busy */ @@ -565,10 +542,8 @@ static noinline void check_reserve(struct kunit *test) xa_destroy(xa); } -static noinline void check_xas_erase(struct kunit *test) +static noinline void check_xas_erase(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); void *entry; unsigned long i, j; @@ -606,11 +581,9 @@ static noinline void check_xas_erase(struct kunit *test) } #ifdef CONFIG_XARRAY_MULTI -static noinline void check_multi_store_1(struct kunit *test, unsigned long index, +static noinline void check_multi_store_1(struct xarray *xa, unsigned long index, unsigned int order) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, index); unsigned long min = index & ~((1UL << order) - 1); unsigned long max = min + (1UL << order); @@ -629,15 +602,13 @@ static noinline void check_multi_store_1(struct kunit *test, unsigned long index XA_BUG_ON(xa, xa_load(xa, max) != NULL); XA_BUG_ON(xa, xa_load(xa, min - 1) != NULL); - xa_erase_index(test, xa, min); + xa_erase_index(xa, min); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_store_2(struct kunit *test, unsigned long index, +static noinline void check_multi_store_2(struct xarray *xa, unsigned long index, unsigned int order) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, index); xa_store_order(xa, index, order, xa_mk_value(0), GFP_KERNEL); @@ -649,11 +620,9 @@ static noinline void check_multi_store_2(struct kunit *test, unsigned long index XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_store_3(struct kunit *test, unsigned long index, +static noinline void check_multi_store_3(struct xarray *xa, unsigned long index, unsigned int order) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); void *entry; int n = 0; @@ -678,11 +647,9 @@ static noinline void check_multi_store_3(struct kunit *test, unsigned long index } #endif -static noinline void check_multi_store(struct kunit *test) +static noinline void check_multi_store(struct xarray *xa) { #ifdef CONFIG_XARRAY_MULTI - struct xarray *xa = xa_param(test); - unsigned long i, j, k; unsigned int max_order = (sizeof(long) == 4) ? 30 : 60; @@ -747,28 +714,26 @@ static noinline void check_multi_store(struct kunit *test) } for (i = 0; i < 20; i++) { - check_multi_store_1(test, 200, i); - check_multi_store_1(test, 0, i); - check_multi_store_1(test, (1UL << i) + 1, i); + check_multi_store_1(xa, 200, i); + check_multi_store_1(xa, 0, i); + check_multi_store_1(xa, (1UL << i) + 1, i); } - check_multi_store_2(test, 4095, 9); + check_multi_store_2(xa, 4095, 9); for (i = 1; i < 20; i++) { - check_multi_store_3(test, 0, i); - check_multi_store_3(test, 1UL << i, i); + check_multi_store_3(xa, 0, i); + check_multi_store_3(xa, 1UL << i, i); } #endif } #ifdef CONFIG_XARRAY_MULTI /* mimics page cache __filemap_add_folio() */ -static noinline void check_xa_multi_store_adv_add(struct kunit *test, +static noinline void check_xa_multi_store_adv_add(struct xarray *xa, unsigned long index, unsigned int order, void *p) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, index); unsigned int nrpages = 1UL << order; @@ -796,12 +761,10 @@ static noinline void check_xa_multi_store_adv_add(struct kunit *test, } /* mimics page_cache_delete() */ -static noinline void check_xa_multi_store_adv_del_entry(struct kunit *test, +static noinline void check_xa_multi_store_adv_del_entry(struct xarray *xa, unsigned long index, unsigned int order) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, index); xas_set_order(&xas, index, order); @@ -809,14 +772,12 @@ static noinline void check_xa_multi_store_adv_del_entry(struct kunit *test, xas_init_marks(&xas); } -static noinline void check_xa_multi_store_adv_delete(struct kunit *test, +static noinline void check_xa_multi_store_adv_delete(struct xarray *xa, unsigned long index, unsigned int order) { - struct xarray *xa = xa_param(test); - xa_lock_irq(xa); - check_xa_multi_store_adv_del_entry(test, index, order); + check_xa_multi_store_adv_del_entry(xa, index, order); xa_unlock_irq(xa); } @@ -853,12 +814,10 @@ static unsigned long some_val = 0xdeadbeef; static unsigned long some_val_2 = 0xdeaddead; /* mimics the page cache usage */ -static noinline void check_xa_multi_store_adv(struct kunit *test, +static noinline void check_xa_multi_store_adv(struct xarray *xa, unsigned long pos, unsigned int order) { - struct xarray *xa = xa_param(test); - unsigned int nrpages = 1UL << order; unsigned long index, base, next_index, next_next_index; unsigned int i; @@ -868,7 +827,7 @@ static noinline void check_xa_multi_store_adv(struct kunit *test, next_index = round_down(base + nrpages, nrpages); next_next_index = round_down(next_index + nrpages, nrpages); - check_xa_multi_store_adv_add(test, base, order, &some_val); + check_xa_multi_store_adv_add(xa, base, order, &some_val); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, base + i) != &some_val); @@ -876,20 +835,20 @@ static noinline void check_xa_multi_store_adv(struct kunit *test, XA_BUG_ON(xa, test_get_entry(xa, next_index) != NULL); /* Use order 0 for the next item */ - check_xa_multi_store_adv_add(test, next_index, 0, &some_val_2); + check_xa_multi_store_adv_add(xa, next_index, 0, &some_val_2); XA_BUG_ON(xa, test_get_entry(xa, next_index) != &some_val_2); /* Remove the next item */ - check_xa_multi_store_adv_delete(test, next_index, 0); + check_xa_multi_store_adv_delete(xa, next_index, 0); /* Now use order for a new pointer */ - check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); + check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, next_index + i) != &some_val_2); - check_xa_multi_store_adv_delete(test, next_index, order); - check_xa_multi_store_adv_delete(test, base, order); + check_xa_multi_store_adv_delete(xa, next_index, order); + check_xa_multi_store_adv_delete(xa, base, order); XA_BUG_ON(xa, !xa_empty(xa)); /* starting fresh again */ @@ -897,7 +856,7 @@ static noinline void check_xa_multi_store_adv(struct kunit *test, /* let's test some holes now */ /* hole at base and next_next */ - check_xa_multi_store_adv_add(test, next_index, order, &some_val_2); + check_xa_multi_store_adv_add(xa, next_index, order, &some_val_2); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); @@ -908,12 +867,12 @@ static noinline void check_xa_multi_store_adv(struct kunit *test, for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != NULL); - check_xa_multi_store_adv_delete(test, next_index, order); + check_xa_multi_store_adv_delete(xa, next_index, order); XA_BUG_ON(xa, !xa_empty(xa)); /* hole at base and next */ - check_xa_multi_store_adv_add(test, next_next_index, order, &some_val_2); + check_xa_multi_store_adv_add(xa, next_next_index, order, &some_val_2); for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, base + i) != NULL); @@ -924,12 +883,12 @@ static noinline void check_xa_multi_store_adv(struct kunit *test, for (i = 0; i < nrpages; i++) XA_BUG_ON(xa, test_get_entry(xa, next_next_index + i) != &some_val_2); - check_xa_multi_store_adv_delete(test, next_next_index, order); + check_xa_multi_store_adv_delete(xa, next_next_index, order); XA_BUG_ON(xa, !xa_empty(xa)); } #endif -static noinline void check_multi_store_advanced(struct kunit *test) +static noinline void check_multi_store_advanced(struct xarray *xa) { #ifdef CONFIG_XARRAY_MULTI unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; @@ -941,59 +900,59 @@ static noinline void check_multi_store_advanced(struct kunit *test) */ for (pos = 7; pos < end; pos = (pos * pos) + 564) { for (i = 0; i < max_order; i++) { - check_xa_multi_store_adv(test, pos, i); - check_xa_multi_store_adv(test, pos + 157, i); + check_xa_multi_store_adv(xa, pos, i); + check_xa_multi_store_adv(xa, pos + 157, i); } } #endif } -static noinline void check_xa_alloc_1(struct kunit *test, struct xarray *xa, unsigned int base) +static noinline void check_xa_alloc_1(struct xarray *xa, unsigned int base) { int i; u32 id; XA_BUG_ON(xa, !xa_empty(xa)); /* An empty array should assign %base to the first alloc */ - xa_alloc_index(test, xa, base, GFP_KERNEL); + xa_alloc_index(xa, base, GFP_KERNEL); /* Erasing it should make the array empty again */ - xa_erase_index(test, xa, base); + xa_erase_index(xa, base); XA_BUG_ON(xa, !xa_empty(xa)); /* And it should assign %base again */ - xa_alloc_index(test, xa, base, GFP_KERNEL); + xa_alloc_index(xa, base, GFP_KERNEL); /* Allocating and then erasing a lot should not lose base */ for (i = base + 1; i < 2 * XA_CHUNK_SIZE; i++) - xa_alloc_index(test, xa, i, GFP_KERNEL); + xa_alloc_index(xa, i, GFP_KERNEL); for (i = base; i < 2 * XA_CHUNK_SIZE; i++) - xa_erase_index(test, xa, i); - xa_alloc_index(test, xa, base, GFP_KERNEL); + xa_erase_index(xa, i); + xa_alloc_index(xa, base, GFP_KERNEL); /* Destroying the array should do the same as erasing */ xa_destroy(xa); /* And it should assign %base again */ - xa_alloc_index(test, xa, base, GFP_KERNEL); + xa_alloc_index(xa, base, GFP_KERNEL); /* The next assigned ID should be base+1 */ - xa_alloc_index(test, xa, base + 1, GFP_KERNEL); - xa_erase_index(test, xa, base + 1); + xa_alloc_index(xa, base + 1, GFP_KERNEL); + xa_erase_index(xa, base + 1); /* Storing a value should mark it used */ xa_store_index(xa, base + 1, GFP_KERNEL); - xa_alloc_index(test, xa, base + 2, GFP_KERNEL); + xa_alloc_index(xa, base + 2, GFP_KERNEL); /* If we then erase base, it should be free */ - xa_erase_index(test, xa, base); - xa_alloc_index(test, xa, base, GFP_KERNEL); + xa_erase_index(xa, base); + xa_alloc_index(xa, base, GFP_KERNEL); - xa_erase_index(test, xa, base + 1); - xa_erase_index(test, xa, base + 2); + xa_erase_index(xa, base + 1); + xa_erase_index(xa, base + 2); for (i = 1; i < 5000; i++) { - xa_alloc_index(test, xa, base + i, GFP_KERNEL); + xa_alloc_index(xa, base + i, GFP_KERNEL); } xa_destroy(xa); @@ -1016,14 +975,14 @@ static noinline void check_xa_alloc_1(struct kunit *test, struct xarray *xa, uns XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), GFP_KERNEL) != -EBUSY); - XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != NULL); + XA_BUG_ON(xa, xa_store_index(xa, 3, GFP_KERNEL) != 0); XA_BUG_ON(xa, xa_alloc(xa, &id, xa_mk_index(10), XA_LIMIT(10, 5), GFP_KERNEL) != -EBUSY); - xa_erase_index(test, xa, 3); + xa_erase_index(xa, 3); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_xa_alloc_2(struct kunit *test, struct xarray *xa, unsigned int base) +static noinline void check_xa_alloc_2(struct xarray *xa, unsigned int base) { unsigned int i, id; unsigned long index; @@ -1059,7 +1018,7 @@ static noinline void check_xa_alloc_2(struct kunit *test, struct xarray *xa, uns XA_BUG_ON(xa, id != 5); xa_for_each(xa, index, entry) { - xa_erase_index(test, xa, index); + xa_erase_index(xa, index); } for (i = base; i < base + 9; i++) { @@ -1074,7 +1033,7 @@ static noinline void check_xa_alloc_2(struct kunit *test, struct xarray *xa, uns xa_destroy(xa); } -static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, unsigned int base) +static noinline void check_xa_alloc_3(struct xarray *xa, unsigned int base) { struct xa_limit limit = XA_LIMIT(1, 0x3fff); u32 next = 0; @@ -1090,8 +1049,8 @@ static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, uns XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(0x3ffd), limit, &next, GFP_KERNEL) != 0); XA_BUG_ON(xa, id != 0x3ffd); - xa_erase_index(test, xa, 0x3ffd); - xa_erase_index(test, xa, 1); + xa_erase_index(xa, 0x3ffd); + xa_erase_index(xa, 1); XA_BUG_ON(xa, !xa_empty(xa)); for (i = 0x3ffe; i < 0x4003; i++) { @@ -1106,8 +1065,8 @@ static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, uns /* Check wrap-around is handled correctly */ if (base != 0) - xa_erase_index(test, xa, base); - xa_erase_index(test, xa, base + 1); + xa_erase_index(xa, base); + xa_erase_index(xa, base + 1); next = UINT_MAX; XA_BUG_ON(xa, xa_alloc_cyclic(xa, &id, xa_mk_index(UINT_MAX), xa_limit_32b, &next, GFP_KERNEL) != 0); @@ -1120,7 +1079,7 @@ static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, uns XA_BUG_ON(xa, id != base + 1); xa_for_each(xa, index, entry) - xa_erase_index(test, xa, index); + xa_erase_index(xa, index); XA_BUG_ON(xa, !xa_empty(xa)); } @@ -1128,21 +1087,19 @@ static noinline void check_xa_alloc_3(struct kunit *test, struct xarray *xa, uns static DEFINE_XARRAY_ALLOC(xa0); static DEFINE_XARRAY_ALLOC1(xa1); -static noinline void check_xa_alloc(struct kunit *test) +static noinline void check_xa_alloc(void) { - check_xa_alloc_1(test, &xa0, 0); - check_xa_alloc_1(test, &xa1, 1); - check_xa_alloc_2(test, &xa0, 0); - check_xa_alloc_2(test, &xa1, 1); - check_xa_alloc_3(test, &xa0, 0); - check_xa_alloc_3(test, &xa1, 1); + check_xa_alloc_1(&xa0, 0); + check_xa_alloc_1(&xa1, 1); + check_xa_alloc_2(&xa0, 0); + check_xa_alloc_2(&xa1, 1); + check_xa_alloc_3(&xa0, 0); + check_xa_alloc_3(&xa1, 1); } -static noinline void __check_store_iter(struct kunit *test, unsigned long start, +static noinline void __check_store_iter(struct xarray *xa, unsigned long start, unsigned int order, unsigned int present) { - struct xarray *xa = xa_param(test); - XA_STATE_ORDER(xas, xa, start, order); void *entry; unsigned int count = 0; @@ -1166,54 +1123,50 @@ retry: XA_BUG_ON(xa, xa_load(xa, start) != xa_mk_index(start)); XA_BUG_ON(xa, xa_load(xa, start + (1UL << order) - 1) != xa_mk_index(start)); - xa_erase_index(test, xa, start); + xa_erase_index(xa, start); } -static noinline void check_store_iter(struct kunit *test) +static noinline void check_store_iter(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned int i, j; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; for (i = 0; i < max_order; i++) { unsigned int min = 1 << i; unsigned int max = (2 << i) - 1; - __check_store_iter(test, 0, i, 0); + __check_store_iter(xa, 0, i, 0); XA_BUG_ON(xa, !xa_empty(xa)); - __check_store_iter(test, min, i, 0); + __check_store_iter(xa, min, i, 0); XA_BUG_ON(xa, !xa_empty(xa)); xa_store_index(xa, min, GFP_KERNEL); - __check_store_iter(test, min, i, 1); + __check_store_iter(xa, min, i, 1); XA_BUG_ON(xa, !xa_empty(xa)); xa_store_index(xa, max, GFP_KERNEL); - __check_store_iter(test, min, i, 1); + __check_store_iter(xa, min, i, 1); XA_BUG_ON(xa, !xa_empty(xa)); for (j = 0; j < min; j++) xa_store_index(xa, j, GFP_KERNEL); - __check_store_iter(test, 0, i, min); + __check_store_iter(xa, 0, i, min); XA_BUG_ON(xa, !xa_empty(xa)); for (j = 0; j < min; j++) xa_store_index(xa, min + j, GFP_KERNEL); - __check_store_iter(test, min, i, min); + __check_store_iter(xa, min, i, min); XA_BUG_ON(xa, !xa_empty(xa)); } #ifdef CONFIG_XARRAY_MULTI xa_store_index(xa, 63, GFP_KERNEL); xa_store_index(xa, 65, GFP_KERNEL); - __check_store_iter(test, 64, 2, 1); - xa_erase_index(test, xa, 63); + __check_store_iter(xa, 64, 2, 1); + xa_erase_index(xa, 63); #endif XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_multi_find_1(struct kunit *test, unsigned int order) +static noinline void check_multi_find_1(struct xarray *xa, unsigned order) { #ifdef CONFIG_XARRAY_MULTI - struct xarray *xa = xa_param(test); - unsigned long multi = 3 << order; unsigned long next = 4 << order; unsigned long index; @@ -1236,17 +1189,15 @@ static noinline void check_multi_find_1(struct kunit *test, unsigned int order) XA_BUG_ON(xa, xa_find_after(xa, &index, next, XA_PRESENT) != NULL); XA_BUG_ON(xa, index != next); - xa_erase_index(test, xa, multi); - xa_erase_index(test, xa, next); - xa_erase_index(test, xa, next + 1); + xa_erase_index(xa, multi); + xa_erase_index(xa, next); + xa_erase_index(xa, next + 1); XA_BUG_ON(xa, !xa_empty(xa)); #endif } -static noinline void check_multi_find_2(struct kunit *test) +static noinline void check_multi_find_2(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 10 : 1; unsigned int i, j; void *entry; @@ -1260,19 +1211,17 @@ static noinline void check_multi_find_2(struct kunit *test) GFP_KERNEL); rcu_read_lock(); xas_for_each(&xas, entry, ULONG_MAX) { - xa_erase_index(test, xa, index); + xa_erase_index(xa, index); } rcu_read_unlock(); - xa_erase_index(test, xa, index - 1); + xa_erase_index(xa, index - 1); XA_BUG_ON(xa, !xa_empty(xa)); } } } -static noinline void check_multi_find_3(struct kunit *test) +static noinline void check_multi_find_3(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned int order; for (order = 5; order < order_limit; order++) { @@ -1281,14 +1230,12 @@ static noinline void check_multi_find_3(struct kunit *test) XA_BUG_ON(xa, !xa_empty(xa)); xa_store_order(xa, 0, order - 4, xa_mk_index(0), GFP_KERNEL); XA_BUG_ON(xa, xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT)); - xa_erase_index(test, xa, 0); + xa_erase_index(xa, 0); } } -static noinline void check_find_1(struct kunit *test) +static noinline void check_find_1(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned long i, j, k; XA_BUG_ON(xa, !xa_empty(xa)); @@ -1325,20 +1272,18 @@ static noinline void check_find_1(struct kunit *test) else XA_BUG_ON(xa, entry != NULL); } - xa_erase_index(test, xa, j); + xa_erase_index(xa, j); XA_BUG_ON(xa, xa_get_mark(xa, j, XA_MARK_0)); XA_BUG_ON(xa, !xa_get_mark(xa, i, XA_MARK_0)); } - xa_erase_index(test, xa, i); + xa_erase_index(xa, i); XA_BUG_ON(xa, xa_get_mark(xa, i, XA_MARK_0)); } XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_find_2(struct kunit *test) +static noinline void check_find_2(struct xarray *xa) { - struct xarray *xa = xa_param(test); - void *entry; unsigned long i, j, index; @@ -1358,10 +1303,8 @@ static noinline void check_find_2(struct kunit *test) xa_destroy(xa); } -static noinline void check_find_3(struct kunit *test) +static noinline void check_find_3(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); unsigned long i, j, k; void *entry; @@ -1385,10 +1328,8 @@ static noinline void check_find_3(struct kunit *test) xa_destroy(xa); } -static noinline void check_find_4(struct kunit *test) +static noinline void check_find_4(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned long index = 0; void *entry; @@ -1400,22 +1341,22 @@ static noinline void check_find_4(struct kunit *test) entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT); XA_BUG_ON(xa, entry); - xa_erase_index(test, xa, ULONG_MAX); + xa_erase_index(xa, ULONG_MAX); } -static noinline void check_find(struct kunit *test) +static noinline void check_find(struct xarray *xa) { unsigned i; - check_find_1(test); - check_find_2(test); - check_find_3(test); - check_find_4(test); + check_find_1(xa); + check_find_2(xa); + check_find_3(xa); + check_find_4(xa); for (i = 2; i < 10; i++) - check_multi_find_1(test, i); - check_multi_find_2(test); - check_multi_find_3(test); + check_multi_find_1(xa, i); + check_multi_find_2(xa); + check_multi_find_3(xa); } /* See find_swap_entry() in mm/shmem.c */ @@ -1441,10 +1382,8 @@ static noinline unsigned long xa_find_entry(struct xarray *xa, void *item) return entry ? xas.xa_index : -1; } -static noinline void check_find_entry(struct kunit *test) +static noinline void check_find_entry(struct xarray *xa) { - struct xarray *xa = xa_param(test); - #ifdef CONFIG_XARRAY_MULTI unsigned int order; unsigned long offset, index; @@ -1471,14 +1410,12 @@ static noinline void check_find_entry(struct kunit *test) xa_store_index(xa, ULONG_MAX, GFP_KERNEL); XA_BUG_ON(xa, xa_find_entry(xa, xa) != -1); XA_BUG_ON(xa, xa_find_entry(xa, xa_mk_index(ULONG_MAX)) != -1); - xa_erase_index(test, xa, ULONG_MAX); + xa_erase_index(xa, ULONG_MAX); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_pause(struct kunit *test) +static noinline void check_pause(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); void *entry; unsigned int order; @@ -1548,10 +1485,8 @@ static noinline void check_pause(struct kunit *test) } -static noinline void check_move_tiny(struct kunit *test) +static noinline void check_move_tiny(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); @@ -1568,14 +1503,12 @@ static noinline void check_move_tiny(struct kunit *test) XA_BUG_ON(xa, xas_prev(&xas) != xa_mk_index(0)); XA_BUG_ON(xa, xas_prev(&xas) != NULL); rcu_read_unlock(); - xa_erase_index(test, xa, 0); + xa_erase_index(xa, 0); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_move_max(struct kunit *test) +static noinline void check_move_max(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); xa_store_index(xa, ULONG_MAX, GFP_KERNEL); @@ -1591,14 +1524,12 @@ static noinline void check_move_max(struct kunit *test) XA_BUG_ON(xa, xas_find(&xas, ULONG_MAX) != NULL); rcu_read_unlock(); - xa_erase_index(test, xa, ULONG_MAX); + xa_erase_index(xa, ULONG_MAX); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_move_small(struct kunit *test, unsigned long idx) +static noinline void check_move_small(struct xarray *xa, unsigned long idx) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); unsigned long i; @@ -1640,15 +1571,13 @@ static noinline void check_move_small(struct kunit *test, unsigned long idx) XA_BUG_ON(xa, xas.xa_index != ULONG_MAX); rcu_read_unlock(); - xa_erase_index(test, xa, 0); - xa_erase_index(test, xa, idx); + xa_erase_index(xa, 0); + xa_erase_index(xa, idx); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_move(struct kunit *test) +static noinline void check_move(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, (1 << 16) - 1); unsigned long i; @@ -1675,7 +1604,7 @@ static noinline void check_move(struct kunit *test) rcu_read_unlock(); for (i = (1 << 8); i < (1 << 15); i++) - xa_erase_index(test, xa, i); + xa_erase_index(xa, i); i = xas.xa_index; @@ -1706,17 +1635,17 @@ static noinline void check_move(struct kunit *test) xa_destroy(xa); - check_move_tiny(test); - check_move_max(test); + check_move_tiny(xa); + check_move_max(xa); for (i = 0; i < 16; i++) - check_move_small(test, 1UL << i); + check_move_small(xa, 1UL << i); for (i = 2; i < 16; i++) - check_move_small(test, (1UL << i) - 1); + check_move_small(xa, (1UL << i) - 1); } -static noinline void xa_store_many_order(struct kunit *test, struct xarray *xa, +static noinline void xa_store_many_order(struct xarray *xa, unsigned long index, unsigned order) { XA_STATE_ORDER(xas, xa, index, order); @@ -1739,34 +1668,30 @@ unlock: XA_BUG_ON(xa, xas_error(&xas)); } -static noinline void check_create_range_1(struct kunit *test, +static noinline void check_create_range_1(struct xarray *xa, unsigned long index, unsigned order) { - struct xarray *xa = xa_param(test); - unsigned long i; - xa_store_many_order(test, xa, index, order); + xa_store_many_order(xa, index, order); for (i = index; i < index + (1UL << order); i++) - xa_erase_index(test, xa, i); + xa_erase_index(xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_create_range_2(struct kunit *test, unsigned int order) +static noinline void check_create_range_2(struct xarray *xa, unsigned order) { - struct xarray *xa = xa_param(test); - unsigned long i; unsigned long nr = 1UL << order; for (i = 0; i < nr * nr; i += nr) - xa_store_many_order(test, xa, i, order); + xa_store_many_order(xa, i, order); for (i = 0; i < nr * nr; i++) - xa_erase_index(test, xa, i); + xa_erase_index(xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_create_range_3(struct kunit *test) +static noinline void check_create_range_3(void) { XA_STATE(xas, NULL, 0); xas_set_err(&xas, -EEXIST); @@ -1774,11 +1699,9 @@ static noinline void check_create_range_3(struct kunit *test) XA_BUG_ON(NULL, xas_error(&xas) != -EEXIST); } -static noinline void check_create_range_4(struct kunit *test, +static noinline void check_create_range_4(struct xarray *xa, unsigned long index, unsigned order) { - struct xarray *xa = xa_param(test); - XA_STATE_ORDER(xas, xa, index, order); unsigned long base = xas.xa_index; unsigned long i = 0; @@ -1804,15 +1727,13 @@ unlock: XA_BUG_ON(xa, xas_error(&xas)); for (i = base; i < base + (1UL << order); i++) - xa_erase_index(test, xa, i); + xa_erase_index(xa, i); XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_create_range_5(struct kunit *test, +static noinline void check_create_range_5(struct xarray *xa, unsigned long index, unsigned int order) { - struct xarray *xa = xa_param(test); - XA_STATE_ORDER(xas, xa, index, order); unsigned int i; @@ -1829,46 +1750,44 @@ static noinline void check_create_range_5(struct kunit *test, xa_destroy(xa); } -static noinline void check_create_range(struct kunit *test) +static noinline void check_create_range(struct xarray *xa) { unsigned int order; unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 12 : 1; for (order = 0; order < max_order; order++) { - check_create_range_1(test, 0, order); - check_create_range_1(test, 1U << order, order); - check_create_range_1(test, 2U << order, order); - check_create_range_1(test, 3U << order, order); - check_create_range_1(test, 1U << 24, order); + check_create_range_1(xa, 0, order); + check_create_range_1(xa, 1U << order, order); + check_create_range_1(xa, 2U << order, order); + check_create_range_1(xa, 3U << order, order); + check_create_range_1(xa, 1U << 24, order); if (order < 10) - check_create_range_2(test, order); - - check_create_range_4(test, 0, order); - check_create_range_4(test, 1U << order, order); - check_create_range_4(test, 2U << order, order); - check_create_range_4(test, 3U << order, order); - check_create_range_4(test, 1U << 24, order); - - check_create_range_4(test, 1, order); - check_create_range_4(test, (1U << order) + 1, order); - check_create_range_4(test, (2U << order) + 1, order); - check_create_range_4(test, (2U << order) - 1, order); - check_create_range_4(test, (3U << order) + 1, order); - check_create_range_4(test, (3U << order) - 1, order); - check_create_range_4(test, (1U << 24) + 1, order); - - check_create_range_5(test, 0, order); - check_create_range_5(test, (1U << order), order); + check_create_range_2(xa, order); + + check_create_range_4(xa, 0, order); + check_create_range_4(xa, 1U << order, order); + check_create_range_4(xa, 2U << order, order); + check_create_range_4(xa, 3U << order, order); + check_create_range_4(xa, 1U << 24, order); + + check_create_range_4(xa, 1, order); + check_create_range_4(xa, (1U << order) + 1, order); + check_create_range_4(xa, (2U << order) + 1, order); + check_create_range_4(xa, (2U << order) - 1, order); + check_create_range_4(xa, (3U << order) + 1, order); + check_create_range_4(xa, (3U << order) - 1, order); + check_create_range_4(xa, (1U << 24) + 1, order); + + check_create_range_5(xa, 0, order); + check_create_range_5(xa, (1U << order), order); } - check_create_range_3(test); + check_create_range_3(); } -static noinline void __check_store_range(struct kunit *test, unsigned long first, +static noinline void __check_store_range(struct xarray *xa, unsigned long first, unsigned long last) { - struct xarray *xa = xa_param(test); - #ifdef CONFIG_XARRAY_MULTI xa_store_range(xa, first, last, xa_mk_index(first), GFP_KERNEL); @@ -1883,28 +1802,26 @@ static noinline void __check_store_range(struct kunit *test, unsigned long first XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_store_range(struct kunit *test) +static noinline void check_store_range(struct xarray *xa) { unsigned long i, j; for (i = 0; i < 128; i++) { for (j = i; j < 128; j++) { - __check_store_range(test, i, j); - __check_store_range(test, 128 + i, 128 + j); - __check_store_range(test, 4095 + i, 4095 + j); - __check_store_range(test, 4096 + i, 4096 + j); - __check_store_range(test, 123456 + i, 123456 + j); - __check_store_range(test, (1 << 24) + i, (1 << 24) + j); + __check_store_range(xa, i, j); + __check_store_range(xa, 128 + i, 128 + j); + __check_store_range(xa, 4095 + i, 4095 + j); + __check_store_range(xa, 4096 + i, 4096 + j); + __check_store_range(xa, 123456 + i, 123456 + j); + __check_store_range(xa, (1 << 24) + i, (1 << 24) + j); } } } #ifdef CONFIG_XARRAY_MULTI -static void check_split_1(struct kunit *test, unsigned long index, +static void check_split_1(struct xarray *xa, unsigned long index, unsigned int order, unsigned int new_order) { - struct xarray *xa = xa_param(test); - XA_STATE_ORDER(xas, xa, index, new_order); unsigned int i, found; void *entry; @@ -1940,30 +1857,26 @@ static void check_split_1(struct kunit *test, unsigned long index, xa_destroy(xa); } -static noinline void check_split(struct kunit *test) +static noinline void check_split(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned int order, new_order; XA_BUG_ON(xa, !xa_empty(xa)); for (order = 1; order < 2 * XA_CHUNK_SHIFT; order++) { for (new_order = 0; new_order < order; new_order++) { - check_split_1(test, 0, order, new_order); - check_split_1(test, 1UL << order, order, new_order); - check_split_1(test, 3UL << order, order, new_order); + check_split_1(xa, 0, order, new_order); + check_split_1(xa, 1UL << order, order, new_order); + check_split_1(xa, 3UL << order, order, new_order); } } } #else -static void check_split(struct kunit *test) { } +static void check_split(struct xarray *xa) { } #endif -static void check_align_1(struct kunit *test, char *name) +static void check_align_1(struct xarray *xa, char *name) { - struct xarray *xa = xa_param(test); - int i; unsigned int id; unsigned long index; @@ -1983,10 +1896,8 @@ static void check_align_1(struct kunit *test, char *name) * We should always be able to store without allocating memory after * reserving a slot. */ -static void check_align_2(struct kunit *test, char *name) +static void check_align_2(struct xarray *xa, char *name) { - struct xarray *xa = xa_param(test); - int i; XA_BUG_ON(xa, !xa_empty(xa)); @@ -2005,15 +1916,15 @@ static void check_align_2(struct kunit *test, char *name) XA_BUG_ON(xa, !xa_empty(xa)); } -static noinline void check_align(struct kunit *test) +static noinline void check_align(struct xarray *xa) { char name[] = "Motorola 68000"; - check_align_1(test, name); - check_align_1(test, name + 1); - check_align_1(test, name + 2); - check_align_1(test, name + 3); - check_align_2(test, name); + check_align_1(xa, name); + check_align_1(xa, name + 1); + check_align_1(xa, name + 2); + check_align_1(xa, name + 3); + check_align_2(xa, name); } static LIST_HEAD(shadow_nodes); @@ -2029,7 +1940,7 @@ static void test_update_node(struct xa_node *node) } } -static noinline void shadow_remove(struct kunit *test, struct xarray *xa) +static noinline void shadow_remove(struct xarray *xa) { struct xa_node *node; @@ -2043,17 +1954,8 @@ static noinline void shadow_remove(struct kunit *test, struct xarray *xa) xa_unlock(xa); } -struct workingset_testcase { - struct xarray *xa; - unsigned long index; -}; - -static noinline void check_workingset(struct kunit *test) +static noinline void check_workingset(struct xarray *xa, unsigned long index) { - struct workingset_testcase tc = *(struct workingset_testcase *)test->param_value; - struct xarray *xa = tc.xa; - unsigned long index = tc.index; - XA_STATE(xas, xa, index); xas_set_update(&xas, test_update_node); @@ -2076,7 +1978,7 @@ static noinline void check_workingset(struct kunit *test) xas_unlock(&xas); XA_BUG_ON(xa, list_empty(&shadow_nodes)); - shadow_remove(test, xa); + shadow_remove(xa); XA_BUG_ON(xa, !list_empty(&shadow_nodes)); XA_BUG_ON(xa, !xa_empty(xa)); } @@ -2085,11 +1987,9 @@ static noinline void check_workingset(struct kunit *test) * Check that the pointer / value / sibling entries are accounted the * way we expect them to be. */ -static noinline void check_account(struct kunit *test) +static noinline void check_account(struct xarray *xa) { #ifdef CONFIG_XARRAY_MULTI - struct xarray *xa = xa_param(test); - unsigned int order; for (order = 1; order < 12; order++) { @@ -2116,10 +2016,8 @@ static noinline void check_account(struct kunit *test) #endif } -static noinline void check_get_order(struct kunit *test) +static noinline void check_get_order(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; unsigned int order; unsigned long i, j; @@ -2138,10 +2036,8 @@ static noinline void check_get_order(struct kunit *test) } } -static noinline void check_xas_get_order(struct kunit *test) +static noinline void check_xas_get_order(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); unsigned int max_order = IS_ENABLED(CONFIG_XARRAY_MULTI) ? 20 : 1; @@ -2173,10 +2069,8 @@ static noinline void check_xas_get_order(struct kunit *test) } } -static noinline void check_xas_conflict_get_order(struct kunit *test) +static noinline void check_xas_conflict_get_order(struct xarray *xa) { - struct xarray *xa = xa_param(test); - XA_STATE(xas, xa, 0); void *entry; @@ -2233,10 +2127,8 @@ static noinline void check_xas_conflict_get_order(struct kunit *test) } -static noinline void check_destroy(struct kunit *test) +static noinline void check_destroy(struct xarray *xa) { - struct xarray *xa = xa_param(test); - unsigned long index; XA_BUG_ON(xa, !xa_empty(xa)); @@ -2269,59 +2161,52 @@ static noinline void check_destroy(struct kunit *test) } static DEFINE_XARRAY(array); -static struct xarray *arrays[] = { &array }; -KUNIT_ARRAY_PARAM(array, arrays, NULL); - -static struct xarray *xa0s[] = { &xa0 }; -KUNIT_ARRAY_PARAM(xa0, xa0s, NULL); - -static struct workingset_testcase workingset_testcases[] = { - { &array, 0 }, - { &array, 64 }, - { &array, 4096 }, -}; -KUNIT_ARRAY_PARAM(workingset, workingset_testcases, NULL); - -static struct kunit_case xarray_cases[] = { - KUNIT_CASE_PARAM(check_xa_err, array_gen_params), - KUNIT_CASE_PARAM(check_xas_retry, array_gen_params), - KUNIT_CASE_PARAM(check_xa_load, array_gen_params), - KUNIT_CASE_PARAM(check_xa_mark, array_gen_params), - KUNIT_CASE_PARAM(check_xa_shrink, array_gen_params), - KUNIT_CASE_PARAM(check_xas_erase, array_gen_params), - KUNIT_CASE_PARAM(check_insert, array_gen_params), - KUNIT_CASE_PARAM(check_cmpxchg, array_gen_params), - KUNIT_CASE_PARAM(check_cmpxchg_order, array_gen_params), - KUNIT_CASE_PARAM(check_reserve, array_gen_params), - KUNIT_CASE_PARAM(check_reserve, xa0_gen_params), - KUNIT_CASE_PARAM(check_multi_store, array_gen_params), - KUNIT_CASE_PARAM(check_multi_store_advanced, array_gen_params), - KUNIT_CASE_PARAM(check_get_order, array_gen_params), - KUNIT_CASE_PARAM(check_xas_get_order, array_gen_params), - KUNIT_CASE_PARAM(check_xas_conflict_get_order, array_gen_params), - KUNIT_CASE(check_xa_alloc), - KUNIT_CASE_PARAM(check_find, array_gen_params), - KUNIT_CASE_PARAM(check_find_entry, array_gen_params), - KUNIT_CASE_PARAM(check_pause, array_gen_params), - KUNIT_CASE_PARAM(check_account, array_gen_params), - KUNIT_CASE_PARAM(check_destroy, array_gen_params), - KUNIT_CASE_PARAM(check_move, array_gen_params), - KUNIT_CASE_PARAM(check_create_range, array_gen_params), - KUNIT_CASE_PARAM(check_store_range, array_gen_params), - KUNIT_CASE_PARAM(check_store_iter, array_gen_params), - KUNIT_CASE_PARAM(check_align, xa0_gen_params), - KUNIT_CASE_PARAM(check_split, array_gen_params), - KUNIT_CASE_PARAM(check_workingset, workingset_gen_params), - {}, -}; - -static struct kunit_suite xarray_suite = { - .name = "xarray", - .test_cases = xarray_cases, -}; - -kunit_test_suite(xarray_suite); +static int xarray_checks(void) +{ + check_xa_err(&array); + check_xas_retry(&array); + check_xa_load(&array); + check_xa_mark(&array); + check_xa_shrink(&array); + check_xas_erase(&array); + check_insert(&array); + check_cmpxchg(&array); + check_cmpxchg_order(&array); + check_reserve(&array); + check_reserve(&xa0); + check_multi_store(&array); + check_multi_store_advanced(&array); + check_get_order(&array); + check_xas_get_order(&array); + check_xas_conflict_get_order(&array); + check_xa_alloc(); + check_find(&array); + check_find_entry(&array); + check_pause(&array); + check_account(&array); + check_destroy(&array); + check_move(&array); + check_create_range(&array); + check_store_range(&array); + check_store_iter(&array); + check_align(&xa0); + check_split(&array); + + check_workingset(&array, 0); + check_workingset(&array, 64); + check_workingset(&array, 4096); + + printk("XArray: %u of %u tests passed\n", tests_passed, tests_run); + return (tests_run == tests_passed) ? 0 : -EINVAL; +} + +static void xarray_exit(void) +{ +} + +module_init(xarray_checks); +module_exit(xarray_exit); MODULE_AUTHOR("Matthew Wilcox "); MODULE_DESCRIPTION("XArray API test module"); MODULE_LICENSE("GPL"); -- 2.51.0 From e5b2a356dc8a88708d97bd47cca3b8f7ed7af6cb Mon Sep 17 00:00:00 2001 From: Andrew Morton Date: Thu, 30 Jan 2025 16:16:20 -0800 Subject: [PATCH 07/16] MAINTAINERS: include linux-mm for xarray maintenance MM developers have an interest in the xarray code. Cc: David Gow Cc: Geert Uytterhoeven Cc: "Liam R. Howlett" Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Sidhartha Kumar Cc: Tamir Duberstein Signed-off-by: Andrew Morton --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index f52a004982c9..ab7463b2f165 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -25729,6 +25729,7 @@ F: arch/x86/entry/vdso/ XARRAY M: Matthew Wilcox L: linux-fsdevel@vger.kernel.org +L: linux-mm@kvack.org S: Supported F: Documentation/core-api/xarray.rst F: include/linux/idr.h -- 2.51.0 From 2c4627c8ced77855b106c7104ecab70837d53799 Mon Sep 17 00:00:00 2001 From: Len Brown Date: Sun, 2 Feb 2025 10:43:02 -0600 Subject: [PATCH 08/16] tools/power turbostat: version 2025.02.02 Summary of Changes since 2024.11.30: Fix regression in 2023.11.07 that affinitized forked child in one-shot mode. Harden one-shot mode against hotplug online/offline Enable RAPL SysWatt column by default. Add initial PTL, CWF platform support. Harden initial PMT code in response to early use. Enable first built-in PMT counter: CWF c1e residency Refuse to run on unsupported platforms without --force, to encourage updating to a version that supports the system, and to avoid no-so-useful measurement results. Signed-off-by: Len Brown --- tools/power/x86/turbostat/turbostat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index 364a44a7d7ae..8d5011a0bf60 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -9559,7 +9559,7 @@ int get_and_dump_counters(void) void print_version() { - fprintf(outf, "turbostat version 2025.01.14 - Len Brown \n"); + fprintf(outf, "turbostat version 2025.02.02 - Len Brown \n"); } #define COMMAND_LINE_SIZE 2048 -- 2.51.0 From 2014c95afecee3e76ca4a56956a936e23283f05b Mon Sep 17 00:00:00 2001 From: Linus Torvalds Date: Sun, 2 Feb 2025 15:39:26 -0800 Subject: [PATCH 09/16] Linux 6.14-rc1 --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 4117cc79748b..9e0d63d9d94b 100644 --- a/Makefile +++ b/Makefile @@ -1,8 +1,8 @@ # SPDX-License-Identifier: GPL-2.0 VERSION = 6 -PATCHLEVEL = 13 +PATCHLEVEL = 14 SUBLEVEL = 0 -EXTRAVERSION = +EXTRAVERSION = -rc1 NAME = Baby Opossum Posse # *DOCUMENTATION* -- 2.51.0 From 67407b84e0ed3915d77bfd1d05e7bd51ddbf03ee Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Wed, 5 Mar 2025 14:34:09 +0100 Subject: [PATCH 10/16] scsi: ufs: dt-bindings: renesas,ufs: Add calibration data On R-Car S4-8 ES1.2, the E-FUSE block contains PLL and AFE tuning parameters for the Universal Flash Storage controller. Document the related NVMEM properties, and update the example. Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/2f337169f8183d48b7d94ee13565fea804aade84.1741179611.git.geert+renesas@glider.be Acked-by: Conor Dooley Signed-off-by: Martin K. Petersen --- .../devicetree/bindings/ufs/renesas,ufs.yaml | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/Documentation/devicetree/bindings/ufs/renesas,ufs.yaml b/Documentation/devicetree/bindings/ufs/renesas,ufs.yaml index 1949a15e73d2..ac11ac7d1d12 100644 --- a/Documentation/devicetree/bindings/ufs/renesas,ufs.yaml +++ b/Documentation/devicetree/bindings/ufs/renesas,ufs.yaml @@ -33,6 +33,16 @@ properties: resets: maxItems: 1 + nvmem-cells: + maxItems: 1 + + nvmem-cell-names: + items: + - const: calibration + +dependencies: + nvmem-cells: [ nvmem-cell-names ] + required: - compatible - reg @@ -58,4 +68,6 @@ examples: freq-table-hz = <200000000 200000000>, <38400000 38400000>; power-domains = <&sysc R8A779F0_PD_ALWAYS_ON>; resets = <&cpg 1514>; + nvmem-cells = <&ufs_tune>; + nvmem-cell-names = "calibration"; }; -- 2.51.0 From c4e83573c3d01b3679f241ecf78080d67fec3159 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 5 Mar 2025 14:34:10 +0100 Subject: [PATCH 11/16] scsi: ufs: renesas: Replace init data by init code Since initialization of the UFS controller on R-Car S4-8 ES1.0 requires only static values, the driver uses initialization data stored in the const ufs_param[] array. However, other UFS controller variants (R-Car S4-8 ES1.2) require dynamic values, like those obtained from E-FUSE. Refactor the initialization code to prepare for this. This also reduces kernel size by almost 30 KiB. Signed-off-by: Yoshihiro Shimoda Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/3520e27ac7ff512de6508f630eee3c1689a7c73d.1741179611.git.geert+renesas@glider.be Signed-off-by: Martin K. Petersen --- drivers/ufs/host/ufs-renesas.c | 511 ++++++++++++++++++--------------- 1 file changed, 286 insertions(+), 225 deletions(-) diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c index 03cd82db751b..ac096d013287 100644 --- a/drivers/ufs/host/ufs-renesas.c +++ b/drivers/ufs/host/ufs-renesas.c @@ -39,98 +39,6 @@ enum ufs_renesas_init_param_mode { MODE_WRITE, }; -#define PARAM_RESTORE(_reg, _index) \ - { .mode = MODE_RESTORE, .reg = _reg, .index = _index } -#define PARAM_SET(_index, _set) \ - { .mode = MODE_SET, .index = _index, .u.set = _set } -#define PARAM_SAVE(_reg, _mask, _index) \ - { .mode = MODE_SAVE, .reg = _reg, .mask = (u32)(_mask), \ - .index = _index } -#define PARAM_POLL(_reg, _expected, _mask) \ - { .mode = MODE_POLL, .reg = _reg, .u.expected = _expected, \ - .mask = (u32)(_mask) } -#define PARAM_WAIT(_delay_us) \ - { .mode = MODE_WAIT, .u.delay_us = _delay_us } - -#define PARAM_WRITE(_reg, _val) \ - { .mode = MODE_WRITE, .reg = _reg, .u.val = _val } - -#define PARAM_WRITE_D0_D4(_d0, _d4) \ - PARAM_WRITE(0xd0, _d0), PARAM_WRITE(0xd4, _d4) - -#define PARAM_WRITE_800_80C_POLL(_addr, _data_800) \ - PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \ - PARAM_WRITE_D0_D4(0x00000800, ((_data_800) << 16) | BIT(8) | (_addr)), \ - PARAM_WRITE(0xd0, 0x0000080c), \ - PARAM_POLL(0xd4, BIT(8), BIT(8)) - -#define PARAM_RESTORE_800_80C_POLL(_index) \ - PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \ - PARAM_WRITE(0xd0, 0x00000800), \ - PARAM_RESTORE(0xd4, _index), \ - PARAM_WRITE(0xd0, 0x0000080c), \ - PARAM_POLL(0xd4, BIT(8), BIT(8)) - -#define PARAM_WRITE_804_80C_POLL(_addr, _data_804) \ - PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), \ - PARAM_WRITE_D0_D4(0x00000804, ((_data_804) << 16) | BIT(8) | (_addr)), \ - PARAM_WRITE(0xd0, 0x0000080c), \ - PARAM_POLL(0xd4, BIT(8), BIT(8)) - -#define PARAM_WRITE_828_82C_POLL(_data_828) \ - PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), \ - PARAM_WRITE_D0_D4(0x00000828, _data_828), \ - PARAM_WRITE(0xd0, 0x0000082c), \ - PARAM_POLL(0xd4, _data_828, _data_828) - -#define PARAM_WRITE_PHY(_addr16, _data16) \ - PARAM_WRITE(0xf0, 1), \ - PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \ - PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \ - PARAM_WRITE_800_80C_POLL(0x18, (_data16) & 0xff), \ - PARAM_WRITE_800_80C_POLL(0x19, ((_data16) >> 8) & 0xff), \ - PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \ - PARAM_WRITE_828_82C_POLL(0x0f000000), \ - PARAM_WRITE(0xf0, 0) - -#define PARAM_SET_PHY(_addr16, _data16) \ - PARAM_WRITE(0xf0, 1), \ - PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \ - PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \ - PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \ - PARAM_WRITE_828_82C_POLL(0x0f000000), \ - PARAM_WRITE_804_80C_POLL(0x1a, 0), \ - PARAM_WRITE(0xd0, 0x00000808), \ - PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_LO), \ - PARAM_WRITE_804_80C_POLL(0x1b, 0), \ - PARAM_WRITE(0xd0, 0x00000808), \ - PARAM_SAVE(0xd4, 0xff, SET_PHY_INDEX_HI), \ - PARAM_WRITE_828_82C_POLL(0x0f000000), \ - PARAM_WRITE(0xf0, 0), \ - PARAM_WRITE(0xf0, 1), \ - PARAM_WRITE_800_80C_POLL(0x16, (_addr16) & 0xff), \ - PARAM_WRITE_800_80C_POLL(0x17, ((_addr16) >> 8) & 0xff), \ - PARAM_SET(SET_PHY_INDEX_LO, ((_data16 & 0xff) << 16) | BIT(8) | 0x18), \ - PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_LO), \ - PARAM_SET(SET_PHY_INDEX_HI, (((_data16 >> 8) & 0xff) << 16) | BIT(8) | 0x19), \ - PARAM_RESTORE_800_80C_POLL(SET_PHY_INDEX_HI), \ - PARAM_WRITE_800_80C_POLL(0x1c, 0x01), \ - PARAM_WRITE_828_82C_POLL(0x0f000000), \ - PARAM_WRITE(0xf0, 0) - -#define PARAM_INDIRECT_WRITE(_gpio, _addr, _data_800) \ - PARAM_WRITE(0xf0, _gpio), \ - PARAM_WRITE_800_80C_POLL(_addr, _data_800), \ - PARAM_WRITE_828_82C_POLL(0x0f000000), \ - PARAM_WRITE(0xf0, 0) - -#define PARAM_INDIRECT_POLL(_gpio, _addr, _expected, _mask) \ - PARAM_WRITE(0xf0, _gpio), \ - PARAM_WRITE_800_80C_POLL(_addr, 0), \ - PARAM_WRITE(0xd0, 0x00000808), \ - PARAM_POLL(0xd4, _expected, _mask), \ - PARAM_WRITE(0xf0, 0) - struct ufs_renesas_init_param { enum ufs_renesas_init_param_mode mode; u32 reg; @@ -144,135 +52,6 @@ struct ufs_renesas_init_param { u32 index; }; -/* This setting is for SERIES B */ -static const struct ufs_renesas_init_param ufs_param[] = { - PARAM_WRITE(0xc0, 0x49425308), - PARAM_WRITE_D0_D4(0x00000104, 0x00000002), - PARAM_WAIT(1), - PARAM_WRITE_D0_D4(0x00000828, 0x00000200), - PARAM_WAIT(1), - PARAM_WRITE_D0_D4(0x00000828, 0x00000000), - PARAM_WRITE_D0_D4(0x00000104, 0x00000001), - PARAM_WRITE_D0_D4(0x00000940, 0x00000001), - PARAM_WAIT(1), - PARAM_WRITE_D0_D4(0x00000940, 0x00000000), - - PARAM_WRITE(0xc0, 0x49425308), - PARAM_WRITE(0xc0, 0x41584901), - - PARAM_WRITE_D0_D4(0x0000080c, 0x00000100), - PARAM_WRITE_D0_D4(0x00000804, 0x00000000), - PARAM_WRITE(0xd0, 0x0000080c), - PARAM_POLL(0xd4, BIT(8), BIT(8)), - - PARAM_WRITE(REG_CONTROLLER_ENABLE, 0x00000001), - - PARAM_WRITE(0xd0, 0x00000804), - PARAM_POLL(0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0)), - - PARAM_WRITE(0xd0, 0x00000d00), - PARAM_SAVE(0xd4, 0x0000ffff, TIMER_INDEX), - PARAM_WRITE(0xd4, 0x00000000), - PARAM_WRITE_D0_D4(0x0000082c, 0x0f000000), - PARAM_WRITE_D0_D4(0x00000828, 0x08000000), - PARAM_WRITE(0xd0, 0x0000082c), - PARAM_POLL(0xd4, BIT(27), BIT(27)), - PARAM_WRITE(0xd0, 0x00000d2c), - PARAM_POLL(0xd4, BIT(0), BIT(0)), - - /* phy setup */ - PARAM_INDIRECT_WRITE(1, 0x01, 0x001f), - PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014), - PARAM_INDIRECT_WRITE(7, 0x5e, 0x0014), - PARAM_INDIRECT_WRITE(7, 0x0d, 0x0003), - PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007), - PARAM_INDIRECT_WRITE(7, 0x5f, 0x0003), - PARAM_INDIRECT_WRITE(7, 0x60, 0x0003), - PARAM_INDIRECT_WRITE(7, 0x5b, 0x00a6), - PARAM_INDIRECT_WRITE(7, 0x5c, 0x0003), - - PARAM_INDIRECT_POLL(7, 0x3c, 0, BIT(7)), - PARAM_INDIRECT_POLL(7, 0x4c, 0, BIT(4)), - - PARAM_INDIRECT_WRITE(1, 0x32, 0x0080), - PARAM_INDIRECT_WRITE(1, 0x1f, 0x0001), - PARAM_INDIRECT_WRITE(0, 0x2c, 0x0001), - PARAM_INDIRECT_WRITE(0, 0x32, 0x0087), - - PARAM_INDIRECT_WRITE(1, 0x4d, 0x0061), - PARAM_INDIRECT_WRITE(4, 0x9b, 0x0009), - PARAM_INDIRECT_WRITE(4, 0xa6, 0x0005), - PARAM_INDIRECT_WRITE(4, 0xa5, 0x0058), - PARAM_INDIRECT_WRITE(1, 0x39, 0x0027), - PARAM_INDIRECT_WRITE(1, 0x47, 0x004c), - - PARAM_INDIRECT_WRITE(7, 0x0d, 0x0002), - PARAM_INDIRECT_WRITE(7, 0x0e, 0x0007), - - PARAM_WRITE_PHY(0x0028, 0x0061), - PARAM_WRITE_PHY(0x4014, 0x0061), - PARAM_SET_PHY(0x401c, BIT(2)), - PARAM_WRITE_PHY(0x4000, 0x0000), - PARAM_WRITE_PHY(0x4001, 0x0000), - - PARAM_WRITE_PHY(0x10ae, 0x0001), - PARAM_WRITE_PHY(0x10ad, 0x0000), - PARAM_WRITE_PHY(0x10af, 0x0001), - PARAM_WRITE_PHY(0x10b6, 0x0001), - PARAM_WRITE_PHY(0x10ae, 0x0000), - - PARAM_WRITE_PHY(0x10ae, 0x0001), - PARAM_WRITE_PHY(0x10ad, 0x0000), - PARAM_WRITE_PHY(0x10af, 0x0002), - PARAM_WRITE_PHY(0x10b6, 0x0001), - PARAM_WRITE_PHY(0x10ae, 0x0000), - - PARAM_WRITE_PHY(0x10ae, 0x0001), - PARAM_WRITE_PHY(0x10ad, 0x0080), - PARAM_WRITE_PHY(0x10af, 0x0000), - PARAM_WRITE_PHY(0x10b6, 0x0001), - PARAM_WRITE_PHY(0x10ae, 0x0000), - - PARAM_WRITE_PHY(0x10ae, 0x0001), - PARAM_WRITE_PHY(0x10ad, 0x0080), - PARAM_WRITE_PHY(0x10af, 0x001a), - PARAM_WRITE_PHY(0x10b6, 0x0001), - PARAM_WRITE_PHY(0x10ae, 0x0000), - - PARAM_INDIRECT_WRITE(7, 0x70, 0x0016), - PARAM_INDIRECT_WRITE(7, 0x71, 0x0016), - PARAM_INDIRECT_WRITE(7, 0x72, 0x0014), - PARAM_INDIRECT_WRITE(7, 0x73, 0x0014), - PARAM_INDIRECT_WRITE(7, 0x74, 0x0000), - PARAM_INDIRECT_WRITE(7, 0x75, 0x0000), - PARAM_INDIRECT_WRITE(7, 0x76, 0x0010), - PARAM_INDIRECT_WRITE(7, 0x77, 0x0010), - PARAM_INDIRECT_WRITE(7, 0x78, 0x00ff), - PARAM_INDIRECT_WRITE(7, 0x79, 0x0000), - - PARAM_INDIRECT_WRITE(7, 0x19, 0x0007), - - PARAM_INDIRECT_WRITE(7, 0x1a, 0x0007), - - PARAM_INDIRECT_WRITE(7, 0x24, 0x000c), - - PARAM_INDIRECT_WRITE(7, 0x25, 0x000c), - - PARAM_INDIRECT_WRITE(7, 0x62, 0x0000), - PARAM_INDIRECT_WRITE(7, 0x63, 0x0000), - PARAM_INDIRECT_WRITE(7, 0x5d, 0x0014), - PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017), - PARAM_INDIRECT_WRITE(7, 0x5d, 0x0004), - PARAM_INDIRECT_WRITE(7, 0x5e, 0x0017), - PARAM_INDIRECT_POLL(7, 0x55, 0, BIT(6)), - PARAM_INDIRECT_POLL(7, 0x41, 0, BIT(7)), - /* end of phy setup */ - - PARAM_WRITE(0xf0, 0), - PARAM_WRITE(0xd0, 0x00000d00), - PARAM_RESTORE(0xd4, TIMER_INDEX), -}; - static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba) { ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + "); @@ -320,13 +99,295 @@ static void ufs_renesas_reg_control(struct ufs_hba *hba, } } +static void ufs_renesas_poll(struct ufs_hba *hba, u32 reg, u32 expected, u32 mask) +{ + struct ufs_renesas_init_param param = { + .mode = MODE_POLL, + .reg = reg, + .u.expected = expected, + .mask = mask, + }; + + ufs_renesas_reg_control(hba, ¶m); +} + +static void ufs_renesas_restore(struct ufs_hba *hba, u32 reg, u32 index) +{ + struct ufs_renesas_init_param param = { + .mode = MODE_RESTORE, + .reg = reg, + .index = index, + }; + + ufs_renesas_reg_control(hba, ¶m); +} + +static void ufs_renesas_save(struct ufs_hba *hba, u32 reg, u32 mask, u32 index) +{ + struct ufs_renesas_init_param param = { + .mode = MODE_SAVE, + .reg = reg, + .mask = mask, + .index = index, + }; + + ufs_renesas_reg_control(hba, ¶m); +} + +static void ufs_renesas_set(struct ufs_hba *hba, u32 index, u32 set) +{ + struct ufs_renesas_init_param param = { + .mode = MODE_SAVE, + .index = index, + .u.set = set, + }; + + ufs_renesas_reg_control(hba, ¶m); +} + +static void ufs_renesas_wait(struct ufs_hba *hba, u32 delay_us) +{ + struct ufs_renesas_init_param param = { + .mode = MODE_WAIT, + .u.delay_us = delay_us, + }; + + ufs_renesas_reg_control(hba, ¶m); +} + +static void ufs_renesas_write(struct ufs_hba *hba, u32 reg, u32 value) +{ + struct ufs_renesas_init_param param = { + .mode = MODE_WRITE, + .reg = reg, + .u.val = value, + }; + + ufs_renesas_reg_control(hba, ¶m); +} + +static void ufs_renesas_write_d0_d4(struct ufs_hba *hba, u32 data_d0, u32 data_d4) +{ + ufs_renesas_write(hba, 0xd0, data_d0); + ufs_renesas_write(hba, 0xd4, data_d4); +} + +static void ufs_renesas_write_800_80c_poll(struct ufs_hba *hba, u32 addr, + u32 data_800) +{ + ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100); + ufs_renesas_write_d0_d4(hba, 0x00000800, (data_800 << 16) | BIT(8) | addr); + ufs_renesas_write(hba, 0xd0, 0x0000080c); + ufs_renesas_poll(hba, 0xd4, BIT(8), BIT(8)); +} + +static void ufs_renesas_restore_800_80c_poll(struct ufs_hba *hba, u32 index) +{ + ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100); + ufs_renesas_write(hba, 0xd0, 0x00000800); + ufs_renesas_restore(hba, 0xd4, index); + ufs_renesas_write(hba, 0xd0, 0x0000080c); + ufs_renesas_poll(hba, 0xd4, BIT(8), BIT(8)); +} + +static void ufs_renesas_write_804_80c_poll(struct ufs_hba *hba, u32 addr, u32 data_804) +{ + ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100); + ufs_renesas_write_d0_d4(hba, 0x00000804, (data_804 << 16) | BIT(8) | addr); + ufs_renesas_write(hba, 0xd0, 0x0000080c); + ufs_renesas_poll(hba, 0xd4, BIT(8), BIT(8)); +} + +static void ufs_renesas_write_828_82c_poll(struct ufs_hba *hba, u32 data_828) +{ + ufs_renesas_write_d0_d4(hba, 0x0000082c, 0x0f000000); + ufs_renesas_write_d0_d4(hba, 0x00000828, data_828); + ufs_renesas_write(hba, 0xd0, 0x0000082c); + ufs_renesas_poll(hba, 0xd4, data_828, data_828); +} + +static void ufs_renesas_write_phy(struct ufs_hba *hba, u32 addr16, u32 data16) +{ + ufs_renesas_write(hba, 0xf0, 1); + ufs_renesas_write_800_80c_poll(hba, 0x16, addr16 & 0xff); + ufs_renesas_write_800_80c_poll(hba, 0x17, (addr16 >> 8) & 0xff); + ufs_renesas_write_800_80c_poll(hba, 0x18, data16 & 0xff); + ufs_renesas_write_800_80c_poll(hba, 0x19, (data16 >> 8) & 0xff); + ufs_renesas_write_800_80c_poll(hba, 0x1c, 0x01); + ufs_renesas_write_828_82c_poll(hba, 0x0f000000); + ufs_renesas_write(hba, 0xf0, 0); +} + +static void ufs_renesas_set_phy(struct ufs_hba *hba, u32 addr16, u32 data16) +{ + ufs_renesas_write(hba, 0xf0, 1); + ufs_renesas_write_800_80c_poll(hba, 0x16, addr16 & 0xff); + ufs_renesas_write_800_80c_poll(hba, 0x17, (addr16 >> 8) & 0xff); + ufs_renesas_write_800_80c_poll(hba, 0x1c, 0x01); + ufs_renesas_write_828_82c_poll(hba, 0x0f000000); + ufs_renesas_write_804_80c_poll(hba, 0x1a, 0); + ufs_renesas_write(hba, 0xd0, 0x00000808); + ufs_renesas_save(hba, 0xd4, 0xff, SET_PHY_INDEX_LO); + ufs_renesas_write_804_80c_poll(hba, 0x1b, 0); + ufs_renesas_write(hba, 0xd0, 0x00000808); + ufs_renesas_save(hba, 0xd4, 0xff, SET_PHY_INDEX_HI); + ufs_renesas_write_828_82c_poll(hba, 0x0f000000); + ufs_renesas_write(hba, 0xf0, 0); + ufs_renesas_write(hba, 0xf0, 1); + ufs_renesas_write_800_80c_poll(hba, 0x16, addr16 & 0xff); + ufs_renesas_write_800_80c_poll(hba, 0x17, (addr16 >> 8) & 0xff); + ufs_renesas_set(hba, SET_PHY_INDEX_LO, ((data16 & 0xff) << 16) | BIT(8) | 0x18); + ufs_renesas_restore_800_80c_poll(hba, SET_PHY_INDEX_LO); + ufs_renesas_set(hba, SET_PHY_INDEX_HI, (((data16 >> 8) & 0xff) << 16) | BIT(8) | 0x19); + ufs_renesas_restore_800_80c_poll(hba, SET_PHY_INDEX_HI); + ufs_renesas_write_800_80c_poll(hba, 0x1c, 0x01); + ufs_renesas_write_828_82c_poll(hba, 0x0f000000); + ufs_renesas_write(hba, 0xf0, 0); +} + +static void ufs_renesas_indirect_write(struct ufs_hba *hba, u32 gpio, u32 addr, + u32 data_800) +{ + ufs_renesas_write(hba, 0xf0, gpio); + ufs_renesas_write_800_80c_poll(hba, addr, data_800); + ufs_renesas_write_828_82c_poll(hba, 0x0f000000); + ufs_renesas_write(hba, 0xf0, 0); +} + +static void ufs_renesas_indirect_poll(struct ufs_hba *hba, u32 gpio, u32 addr, + u32 expected, u32 mask) +{ + ufs_renesas_write(hba, 0xf0, gpio); + ufs_renesas_write_800_80c_poll(hba, addr, 0); + ufs_renesas_write(hba, 0xd0, 0x00000808); + ufs_renesas_poll(hba, 0xd4, expected, mask); + ufs_renesas_write(hba, 0xf0, 0); +} + static void ufs_renesas_pre_init(struct ufs_hba *hba) { - const struct ufs_renesas_init_param *p = ufs_param; - unsigned int i; + /* This setting is for SERIES B */ + ufs_renesas_write(hba, 0xc0, 0x49425308); + ufs_renesas_write_d0_d4(hba, 0x00000104, 0x00000002); + ufs_renesas_wait(hba, 1); + ufs_renesas_write_d0_d4(hba, 0x00000828, 0x00000200); + ufs_renesas_wait(hba, 1); + ufs_renesas_write_d0_d4(hba, 0x00000828, 0x00000000); + ufs_renesas_write_d0_d4(hba, 0x00000104, 0x00000001); + ufs_renesas_write_d0_d4(hba, 0x00000940, 0x00000001); + ufs_renesas_wait(hba, 1); + ufs_renesas_write_d0_d4(hba, 0x00000940, 0x00000000); + + ufs_renesas_write(hba, 0xc0, 0x49425308); + ufs_renesas_write(hba, 0xc0, 0x41584901); + + ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100); + ufs_renesas_write_d0_d4(hba, 0x00000804, 0x00000000); + ufs_renesas_write(hba, 0xd0, 0x0000080c); + ufs_renesas_poll(hba, 0xd4, BIT(8), BIT(8)); + + ufs_renesas_write(hba, REG_CONTROLLER_ENABLE, 0x00000001); + + ufs_renesas_write(hba, 0xd0, 0x00000804); + ufs_renesas_poll(hba, 0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0)); + + ufs_renesas_write(hba, 0xd0, 0x00000d00); + ufs_renesas_save(hba, 0xd4, 0x0000ffff, TIMER_INDEX); + ufs_renesas_write(hba, 0xd4, 0x00000000); + ufs_renesas_write_d0_d4(hba, 0x0000082c, 0x0f000000); + ufs_renesas_write_d0_d4(hba, 0x00000828, 0x08000000); + ufs_renesas_write(hba, 0xd0, 0x0000082c); + ufs_renesas_poll(hba, 0xd4, BIT(27), BIT(27)); + ufs_renesas_write(hba, 0xd0, 0x00000d2c); + ufs_renesas_poll(hba, 0xd4, BIT(0), BIT(0)); + + /* phy setup */ + ufs_renesas_indirect_write(hba, 1, 0x01, 0x001f); + ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014); + ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0014); + ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0003); + ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007); + ufs_renesas_indirect_write(hba, 7, 0x5f, 0x0003); + ufs_renesas_indirect_write(hba, 7, 0x60, 0x0003); + ufs_renesas_indirect_write(hba, 7, 0x5b, 0x00a6); + ufs_renesas_indirect_write(hba, 7, 0x5c, 0x0003); + + ufs_renesas_indirect_poll(hba, 7, 0x3c, 0, BIT(7)); + ufs_renesas_indirect_poll(hba, 7, 0x4c, 0, BIT(4)); + + ufs_renesas_indirect_write(hba, 1, 0x32, 0x0080); + ufs_renesas_indirect_write(hba, 1, 0x1f, 0x0001); + ufs_renesas_indirect_write(hba, 0, 0x2c, 0x0001); + ufs_renesas_indirect_write(hba, 0, 0x32, 0x0087); + + ufs_renesas_indirect_write(hba, 1, 0x4d, 0x0061); + ufs_renesas_indirect_write(hba, 4, 0x9b, 0x0009); + ufs_renesas_indirect_write(hba, 4, 0xa6, 0x0005); + ufs_renesas_indirect_write(hba, 4, 0xa5, 0x0058); + ufs_renesas_indirect_write(hba, 1, 0x39, 0x0027); + ufs_renesas_indirect_write(hba, 1, 0x47, 0x004c); + + ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0002); + ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007); + + ufs_renesas_write_phy(hba, 0x0028, 0x0061); + ufs_renesas_write_phy(hba, 0x4014, 0x0061); + ufs_renesas_set_phy(hba, 0x401c, BIT(2)); + ufs_renesas_write_phy(hba, 0x4000, 0x0000); + ufs_renesas_write_phy(hba, 0x4001, 0x0000); + + ufs_renesas_write_phy(hba, 0x10ae, 0x0001); + ufs_renesas_write_phy(hba, 0x10ad, 0x0000); + ufs_renesas_write_phy(hba, 0x10af, 0x0001); + ufs_renesas_write_phy(hba, 0x10b6, 0x0001); + ufs_renesas_write_phy(hba, 0x10ae, 0x0000); + + ufs_renesas_write_phy(hba, 0x10ae, 0x0001); + ufs_renesas_write_phy(hba, 0x10ad, 0x0000); + ufs_renesas_write_phy(hba, 0x10af, 0x0002); + ufs_renesas_write_phy(hba, 0x10b6, 0x0001); + ufs_renesas_write_phy(hba, 0x10ae, 0x0000); + + ufs_renesas_write_phy(hba, 0x10ae, 0x0001); + ufs_renesas_write_phy(hba, 0x10ad, 0x0080); + ufs_renesas_write_phy(hba, 0x10af, 0x0000); + ufs_renesas_write_phy(hba, 0x10b6, 0x0001); + ufs_renesas_write_phy(hba, 0x10ae, 0x0000); + + ufs_renesas_write_phy(hba, 0x10ae, 0x0001); + ufs_renesas_write_phy(hba, 0x10ad, 0x0080); + ufs_renesas_write_phy(hba, 0x10af, 0x001a); + ufs_renesas_write_phy(hba, 0x10b6, 0x0001); + ufs_renesas_write_phy(hba, 0x10ae, 0x0000); + + ufs_renesas_indirect_write(hba, 7, 0x70, 0x0016); + ufs_renesas_indirect_write(hba, 7, 0x71, 0x0016); + ufs_renesas_indirect_write(hba, 7, 0x72, 0x0014); + ufs_renesas_indirect_write(hba, 7, 0x73, 0x0014); + ufs_renesas_indirect_write(hba, 7, 0x74, 0x0000); + ufs_renesas_indirect_write(hba, 7, 0x75, 0x0000); + ufs_renesas_indirect_write(hba, 7, 0x76, 0x0010); + ufs_renesas_indirect_write(hba, 7, 0x77, 0x0010); + ufs_renesas_indirect_write(hba, 7, 0x78, 0x00ff); + ufs_renesas_indirect_write(hba, 7, 0x79, 0x0000); + + ufs_renesas_indirect_write(hba, 7, 0x19, 0x0007); + ufs_renesas_indirect_write(hba, 7, 0x1a, 0x0007); + ufs_renesas_indirect_write(hba, 7, 0x24, 0x000c); + ufs_renesas_indirect_write(hba, 7, 0x25, 0x000c); + ufs_renesas_indirect_write(hba, 7, 0x62, 0x0000); + ufs_renesas_indirect_write(hba, 7, 0x63, 0x0000); + ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014); + ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017); + ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0004); + ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017); + ufs_renesas_indirect_poll(hba, 7, 0x55, 0, BIT(6)); + ufs_renesas_indirect_poll(hba, 7, 0x41, 0, BIT(7)); + /* end of phy setup */ - for (i = 0; i < ARRAY_SIZE(ufs_param); i++) - ufs_renesas_reg_control(hba, &p[i]); + ufs_renesas_write(hba, 0xf0, 0); + ufs_renesas_write(hba, 0xd0, 0x00000d00); + ufs_renesas_restore(hba, 0xd4, TIMER_INDEX); } static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba, -- 2.51.0 From 5129aa6275997eefb5313b2d16f07e2583e9e7be Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 5 Mar 2025 14:34:11 +0100 Subject: [PATCH 12/16] scsi: ufs: renesas: Add register read to remove save/set/restore Add support for returning read register values from ufs_renesas_reg_control(), so ufs_renesas_set_phy() can use the existing ufs_renesas_write_phy() helper. Remove the now unused code to save to, set, and restore from a static array inside ufs_renesas_reg_control(). Signed-off-by: Yoshihiro Shimoda Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/9fa240a9dc0308d6675138f8434eccb77f051650.1741179611.git.geert+renesas@glider.be Signed-off-by: Martin K. Petersen --- drivers/ufs/host/ufs-renesas.c | 99 ++++++++-------------------------- 1 file changed, 23 insertions(+), 76 deletions(-) diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c index ac096d013287..100186a2e180 100644 --- a/drivers/ufs/host/ufs-renesas.c +++ b/drivers/ufs/host/ufs-renesas.c @@ -23,18 +23,9 @@ struct ufs_renesas_priv { bool initialized; /* The hardware needs initialization once */ }; -enum { - SET_PHY_INDEX_LO = 0, - SET_PHY_INDEX_HI, - TIMER_INDEX, - MAX_INDEX -}; - enum ufs_renesas_init_param_mode { - MODE_RESTORE, - MODE_SET, - MODE_SAVE, MODE_POLL, + MODE_READ, MODE_WAIT, MODE_WRITE, }; @@ -45,7 +36,6 @@ struct ufs_renesas_init_param { union { u32 expected; u32 delay_us; - u32 set; u32 val; } u; u32 mask; @@ -57,25 +47,13 @@ static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba) ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + "); } -static void ufs_renesas_reg_control(struct ufs_hba *hba, - const struct ufs_renesas_init_param *p) +static u32 ufs_renesas_reg_control(struct ufs_hba *hba, + const struct ufs_renesas_init_param *p) { - static u32 save[MAX_INDEX]; + u32 val = 0; int ret; - u32 val; - - WARN_ON(p->index >= MAX_INDEX); switch (p->mode) { - case MODE_RESTORE: - ufshcd_writel(hba, save[p->index], p->reg); - break; - case MODE_SET: - save[p->index] |= p->u.set; - break; - case MODE_SAVE: - save[p->index] = ufshcd_readl(hba, p->reg) & p->mask; - break; case MODE_POLL: ret = readl_poll_timeout_atomic(hba->mmio_base + p->reg, val, @@ -85,6 +63,9 @@ static void ufs_renesas_reg_control(struct ufs_hba *hba, dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n", __func__, ret, val, p->mask, p->u.expected); break; + case MODE_READ: + val = ufshcd_readl(hba, p->reg); + break; case MODE_WAIT: if (p->u.delay_us > 1000) mdelay(DIV_ROUND_UP(p->u.delay_us, 1000)); @@ -97,6 +78,8 @@ static void ufs_renesas_reg_control(struct ufs_hba *hba, default: break; } + + return val; } static void ufs_renesas_poll(struct ufs_hba *hba, u32 reg, u32 expected, u32 mask) @@ -111,38 +94,14 @@ static void ufs_renesas_poll(struct ufs_hba *hba, u32 reg, u32 expected, u32 mas ufs_renesas_reg_control(hba, ¶m); } -static void ufs_renesas_restore(struct ufs_hba *hba, u32 reg, u32 index) -{ - struct ufs_renesas_init_param param = { - .mode = MODE_RESTORE, - .reg = reg, - .index = index, - }; - - ufs_renesas_reg_control(hba, ¶m); -} - -static void ufs_renesas_save(struct ufs_hba *hba, u32 reg, u32 mask, u32 index) +static u32 ufs_renesas_read(struct ufs_hba *hba, u32 reg) { struct ufs_renesas_init_param param = { - .mode = MODE_SAVE, + .mode = MODE_READ, .reg = reg, - .mask = mask, - .index = index, - }; - - ufs_renesas_reg_control(hba, ¶m); -} - -static void ufs_renesas_set(struct ufs_hba *hba, u32 index, u32 set) -{ - struct ufs_renesas_init_param param = { - .mode = MODE_SAVE, - .index = index, - .u.set = set, }; - ufs_renesas_reg_control(hba, ¶m); + return ufs_renesas_reg_control(hba, ¶m); } static void ufs_renesas_wait(struct ufs_hba *hba, u32 delay_us) @@ -181,15 +140,6 @@ static void ufs_renesas_write_800_80c_poll(struct ufs_hba *hba, u32 addr, ufs_renesas_poll(hba, 0xd4, BIT(8), BIT(8)); } -static void ufs_renesas_restore_800_80c_poll(struct ufs_hba *hba, u32 index) -{ - ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100); - ufs_renesas_write(hba, 0xd0, 0x00000800); - ufs_renesas_restore(hba, 0xd4, index); - ufs_renesas_write(hba, 0xd0, 0x0000080c); - ufs_renesas_poll(hba, 0xd4, BIT(8), BIT(8)); -} - static void ufs_renesas_write_804_80c_poll(struct ufs_hba *hba, u32 addr, u32 data_804) { ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100); @@ -220,6 +170,8 @@ static void ufs_renesas_write_phy(struct ufs_hba *hba, u32 addr16, u32 data16) static void ufs_renesas_set_phy(struct ufs_hba *hba, u32 addr16, u32 data16) { + u32 low, high; + ufs_renesas_write(hba, 0xf0, 1); ufs_renesas_write_800_80c_poll(hba, 0x16, addr16 & 0xff); ufs_renesas_write_800_80c_poll(hba, 0x17, (addr16 >> 8) & 0xff); @@ -227,22 +179,15 @@ static void ufs_renesas_set_phy(struct ufs_hba *hba, u32 addr16, u32 data16) ufs_renesas_write_828_82c_poll(hba, 0x0f000000); ufs_renesas_write_804_80c_poll(hba, 0x1a, 0); ufs_renesas_write(hba, 0xd0, 0x00000808); - ufs_renesas_save(hba, 0xd4, 0xff, SET_PHY_INDEX_LO); + low = ufs_renesas_read(hba, 0xd4) & 0xff; ufs_renesas_write_804_80c_poll(hba, 0x1b, 0); ufs_renesas_write(hba, 0xd0, 0x00000808); - ufs_renesas_save(hba, 0xd4, 0xff, SET_PHY_INDEX_HI); - ufs_renesas_write_828_82c_poll(hba, 0x0f000000); - ufs_renesas_write(hba, 0xf0, 0); - ufs_renesas_write(hba, 0xf0, 1); - ufs_renesas_write_800_80c_poll(hba, 0x16, addr16 & 0xff); - ufs_renesas_write_800_80c_poll(hba, 0x17, (addr16 >> 8) & 0xff); - ufs_renesas_set(hba, SET_PHY_INDEX_LO, ((data16 & 0xff) << 16) | BIT(8) | 0x18); - ufs_renesas_restore_800_80c_poll(hba, SET_PHY_INDEX_LO); - ufs_renesas_set(hba, SET_PHY_INDEX_HI, (((data16 >> 8) & 0xff) << 16) | BIT(8) | 0x19); - ufs_renesas_restore_800_80c_poll(hba, SET_PHY_INDEX_HI); - ufs_renesas_write_800_80c_poll(hba, 0x1c, 0x01); + high = ufs_renesas_read(hba, 0xd4) & 0xff; ufs_renesas_write_828_82c_poll(hba, 0x0f000000); ufs_renesas_write(hba, 0xf0, 0); + + data16 |= (high << 8) | low; + ufs_renesas_write_phy(hba, addr16, data16); } static void ufs_renesas_indirect_write(struct ufs_hba *hba, u32 gpio, u32 addr, @@ -266,6 +211,8 @@ static void ufs_renesas_indirect_poll(struct ufs_hba *hba, u32 gpio, u32 addr, static void ufs_renesas_pre_init(struct ufs_hba *hba) { + u32 timer_val; + /* This setting is for SERIES B */ ufs_renesas_write(hba, 0xc0, 0x49425308); ufs_renesas_write_d0_d4(hba, 0x00000104, 0x00000002); @@ -292,7 +239,7 @@ static void ufs_renesas_pre_init(struct ufs_hba *hba) ufs_renesas_poll(hba, 0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0)); ufs_renesas_write(hba, 0xd0, 0x00000d00); - ufs_renesas_save(hba, 0xd4, 0x0000ffff, TIMER_INDEX); + timer_val = ufs_renesas_read(hba, 0xd4) & 0x0000ffff; ufs_renesas_write(hba, 0xd4, 0x00000000); ufs_renesas_write_d0_d4(hba, 0x0000082c, 0x0f000000); ufs_renesas_write_d0_d4(hba, 0x00000828, 0x08000000); @@ -387,7 +334,7 @@ static void ufs_renesas_pre_init(struct ufs_hba *hba) ufs_renesas_write(hba, 0xf0, 0); ufs_renesas_write(hba, 0xd0, 0x00000d00); - ufs_renesas_restore(hba, 0xd4, TIMER_INDEX); + ufs_renesas_write(hba, 0xd4, timer_val); } static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba, -- 2.51.0 From 855bde8ce5bc17e2f5a4126126c55e1970b23c4b Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 5 Mar 2025 14:34:12 +0100 Subject: [PATCH 13/16] scsi: ufs: renesas: Remove register control helper function After refactoring the code, ufs_renesas_reg_control() is no longer needed, because all operations are simple and can be called directly. Remove the ufs_renesas_reg_control() helper function, and call udelay() directly. Signed-off-by: Yoshihiro Shimoda Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/69500e4c18be1ca1de360f9e797e282ffef04004.1741179611.git.geert+renesas@glider.be Signed-off-by: Martin K. Petersen --- drivers/ufs/host/ufs-renesas.c | 102 +++++---------------------------- 1 file changed, 14 insertions(+), 88 deletions(-) diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c index 100186a2e180..59e35ec4955f 100644 --- a/drivers/ufs/host/ufs-renesas.c +++ b/drivers/ufs/host/ufs-renesas.c @@ -23,106 +23,32 @@ struct ufs_renesas_priv { bool initialized; /* The hardware needs initialization once */ }; -enum ufs_renesas_init_param_mode { - MODE_POLL, - MODE_READ, - MODE_WAIT, - MODE_WRITE, -}; - -struct ufs_renesas_init_param { - enum ufs_renesas_init_param_mode mode; - u32 reg; - union { - u32 expected; - u32 delay_us; - u32 val; - } u; - u32 mask; - u32 index; -}; - static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba) { ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + "); } -static u32 ufs_renesas_reg_control(struct ufs_hba *hba, - const struct ufs_renesas_init_param *p) -{ - u32 val = 0; - int ret; - - switch (p->mode) { - case MODE_POLL: - ret = readl_poll_timeout_atomic(hba->mmio_base + p->reg, - val, - (val & p->mask) == p->u.expected, - 10, 1000); - if (ret) - dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n", - __func__, ret, val, p->mask, p->u.expected); - break; - case MODE_READ: - val = ufshcd_readl(hba, p->reg); - break; - case MODE_WAIT: - if (p->u.delay_us > 1000) - mdelay(DIV_ROUND_UP(p->u.delay_us, 1000)); - else - udelay(p->u.delay_us); - break; - case MODE_WRITE: - ufshcd_writel(hba, p->u.val, p->reg); - break; - default: - break; - } - - return val; -} - static void ufs_renesas_poll(struct ufs_hba *hba, u32 reg, u32 expected, u32 mask) { - struct ufs_renesas_init_param param = { - .mode = MODE_POLL, - .reg = reg, - .u.expected = expected, - .mask = mask, - }; - - ufs_renesas_reg_control(hba, ¶m); + int ret; + u32 val; + + ret = readl_poll_timeout_atomic(hba->mmio_base + reg, + val, (val & mask) == expected, + 10, 1000); + if (ret) + dev_err(hba->dev, "%s: poll failed %d (%08x, %08x, %08x)\n", + __func__, ret, val, mask, expected); } static u32 ufs_renesas_read(struct ufs_hba *hba, u32 reg) { - struct ufs_renesas_init_param param = { - .mode = MODE_READ, - .reg = reg, - }; - - return ufs_renesas_reg_control(hba, ¶m); -} - -static void ufs_renesas_wait(struct ufs_hba *hba, u32 delay_us) -{ - struct ufs_renesas_init_param param = { - .mode = MODE_WAIT, - .u.delay_us = delay_us, - }; - - ufs_renesas_reg_control(hba, ¶m); + return ufshcd_readl(hba, reg); } static void ufs_renesas_write(struct ufs_hba *hba, u32 reg, u32 value) { - struct ufs_renesas_init_param param = { - .mode = MODE_WRITE, - .reg = reg, - .u.val = value, - }; - - ufs_renesas_reg_control(hba, ¶m); + ufshcd_writel(hba, value, reg); } static void ufs_renesas_write_d0_d4(struct ufs_hba *hba, u32 data_d0, u32 data_d4) @@ -216,13 +142,13 @@ static void ufs_renesas_pre_init(struct ufs_hba *hba) /* This setting is for SERIES B */ ufs_renesas_write(hba, 0xc0, 0x49425308); ufs_renesas_write_d0_d4(hba, 0x00000104, 0x00000002); - ufs_renesas_wait(hba, 1); + udelay(1); ufs_renesas_write_d0_d4(hba, 0x00000828, 0x00000200); - ufs_renesas_wait(hba, 1); + udelay(1); ufs_renesas_write_d0_d4(hba, 0x00000828, 0x00000000); ufs_renesas_write_d0_d4(hba, 0x00000104, 0x00000001); ufs_renesas_write_d0_d4(hba, 0x00000940, 0x00000001); - ufs_renesas_wait(hba, 1); + udelay(1); ufs_renesas_write_d0_d4(hba, 0x00000940, 0x00000000); ufs_renesas_write(hba, 0xc0, 0x49425308); -- 2.51.0 From cca2b807c2277f15049f12947b810446f2b41451 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 5 Mar 2025 14:34:13 +0100 Subject: [PATCH 14/16] scsi: ufs: renesas: Refactor 0x10ad/0x10af PHY settings Extract specific PHY setting of the 0x10a[df] registers into a new function. Signed-off-by: Yoshihiro Shimoda Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/110eafd1ee24f9db0285a5e2bca224e35962268a.1741179611.git.geert+renesas@glider.be Signed-off-by: Martin K. Petersen --- drivers/ufs/host/ufs-renesas.c | 37 +++++++++++++--------------------- 1 file changed, 14 insertions(+), 23 deletions(-) diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c index 59e35ec4955f..e4510e9b1a2c 100644 --- a/drivers/ufs/host/ufs-renesas.c +++ b/drivers/ufs/host/ufs-renesas.c @@ -135,6 +135,16 @@ static void ufs_renesas_indirect_poll(struct ufs_hba *hba, u32 gpio, u32 addr, ufs_renesas_write(hba, 0xf0, 0); } +static void ufs_renesas_write_phy_10ad_10af(struct ufs_hba *hba, + u32 data_10ad, u32 data_10af) +{ + ufs_renesas_write_phy(hba, 0x10ae, 0x0001); + ufs_renesas_write_phy(hba, 0x10ad, data_10ad); + ufs_renesas_write_phy(hba, 0x10af, data_10af); + ufs_renesas_write_phy(hba, 0x10b6, 0x0001); + ufs_renesas_write_phy(hba, 0x10ae, 0x0000); +} + static void ufs_renesas_pre_init(struct ufs_hba *hba) { u32 timer_val; @@ -209,29 +219,10 @@ static void ufs_renesas_pre_init(struct ufs_hba *hba) ufs_renesas_write_phy(hba, 0x4000, 0x0000); ufs_renesas_write_phy(hba, 0x4001, 0x0000); - ufs_renesas_write_phy(hba, 0x10ae, 0x0001); - ufs_renesas_write_phy(hba, 0x10ad, 0x0000); - ufs_renesas_write_phy(hba, 0x10af, 0x0001); - ufs_renesas_write_phy(hba, 0x10b6, 0x0001); - ufs_renesas_write_phy(hba, 0x10ae, 0x0000); - - ufs_renesas_write_phy(hba, 0x10ae, 0x0001); - ufs_renesas_write_phy(hba, 0x10ad, 0x0000); - ufs_renesas_write_phy(hba, 0x10af, 0x0002); - ufs_renesas_write_phy(hba, 0x10b6, 0x0001); - ufs_renesas_write_phy(hba, 0x10ae, 0x0000); - - ufs_renesas_write_phy(hba, 0x10ae, 0x0001); - ufs_renesas_write_phy(hba, 0x10ad, 0x0080); - ufs_renesas_write_phy(hba, 0x10af, 0x0000); - ufs_renesas_write_phy(hba, 0x10b6, 0x0001); - ufs_renesas_write_phy(hba, 0x10ae, 0x0000); - - ufs_renesas_write_phy(hba, 0x10ae, 0x0001); - ufs_renesas_write_phy(hba, 0x10ad, 0x0080); - ufs_renesas_write_phy(hba, 0x10af, 0x001a); - ufs_renesas_write_phy(hba, 0x10b6, 0x0001); - ufs_renesas_write_phy(hba, 0x10ae, 0x0000); + ufs_renesas_write_phy_10ad_10af(hba, 0x0000, 0x0001); + ufs_renesas_write_phy_10ad_10af(hba, 0x0000, 0x0002); + ufs_renesas_write_phy_10ad_10af(hba, 0x0080, 0x0000); + ufs_renesas_write_phy_10ad_10af(hba, 0x0080, 0x001a); ufs_renesas_indirect_write(hba, 7, 0x70, 0x0016); ufs_renesas_indirect_write(hba, 7, 0x71, 0x0016); -- 2.51.0 From 44ca16f4970e9ddbc08851d3e3a08ec2a2123356 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 5 Mar 2025 14:34:14 +0100 Subject: [PATCH 15/16] scsi: ufs: renesas: Add reusable functions Since some settings can be reused on other UFS controller (R-Car S4-8 ES1.2), add reusable functions. Signed-off-by: Yoshihiro Shimoda Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/446d67b751a96645799de3aeefec539735aa78c8.1741179611.git.geert+renesas@glider.be Signed-off-by: Martin K. Petersen --- drivers/ufs/host/ufs-renesas.c | 71 ++++++++++++++++++++++++---------- 1 file changed, 50 insertions(+), 21 deletions(-) diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c index e4510e9b1a2c..d9ba766dcd2f 100644 --- a/drivers/ufs/host/ufs-renesas.c +++ b/drivers/ufs/host/ufs-renesas.c @@ -135,21 +135,8 @@ static void ufs_renesas_indirect_poll(struct ufs_hba *hba, u32 gpio, u32 addr, ufs_renesas_write(hba, 0xf0, 0); } -static void ufs_renesas_write_phy_10ad_10af(struct ufs_hba *hba, - u32 data_10ad, u32 data_10af) +static void ufs_renesas_init_step1_to_3(struct ufs_hba *hba) { - ufs_renesas_write_phy(hba, 0x10ae, 0x0001); - ufs_renesas_write_phy(hba, 0x10ad, data_10ad); - ufs_renesas_write_phy(hba, 0x10af, data_10af); - ufs_renesas_write_phy(hba, 0x10b6, 0x0001); - ufs_renesas_write_phy(hba, 0x10ae, 0x0000); -} - -static void ufs_renesas_pre_init(struct ufs_hba *hba) -{ - u32 timer_val; - - /* This setting is for SERIES B */ ufs_renesas_write(hba, 0xc0, 0x49425308); ufs_renesas_write_d0_d4(hba, 0x00000104, 0x00000002); udelay(1); @@ -163,7 +150,10 @@ static void ufs_renesas_pre_init(struct ufs_hba *hba) ufs_renesas_write(hba, 0xc0, 0x49425308); ufs_renesas_write(hba, 0xc0, 0x41584901); +} +static void ufs_renesas_init_step4_to_6(struct ufs_hba *hba) +{ ufs_renesas_write_d0_d4(hba, 0x0000080c, 0x00000100); ufs_renesas_write_d0_d4(hba, 0x00000804, 0x00000000); ufs_renesas_write(hba, 0xd0, 0x0000080c); @@ -173,6 +163,11 @@ static void ufs_renesas_pre_init(struct ufs_hba *hba) ufs_renesas_write(hba, 0xd0, 0x00000804); ufs_renesas_poll(hba, 0xd4, BIT(8) | BIT(6) | BIT(0), BIT(8) | BIT(6) | BIT(0)); +} + +static u32 ufs_renesas_init_disable_timer(struct ufs_hba *hba) +{ + u32 timer_val; ufs_renesas_write(hba, 0xd0, 0x00000d00); timer_val = ufs_renesas_read(hba, 0xd4) & 0x0000ffff; @@ -184,6 +179,45 @@ static void ufs_renesas_pre_init(struct ufs_hba *hba) ufs_renesas_write(hba, 0xd0, 0x00000d2c); ufs_renesas_poll(hba, 0xd4, BIT(0), BIT(0)); + return timer_val; +} + +static void ufs_renesas_init_enable_timer(struct ufs_hba *hba, u32 timer_val) +{ + ufs_renesas_write(hba, 0xf0, 0); + ufs_renesas_write(hba, 0xd0, 0x00000d00); + ufs_renesas_write(hba, 0xd4, timer_val); +} + +static void ufs_renesas_write_phy_10ad_10af(struct ufs_hba *hba, + u32 data_10ad, u32 data_10af) +{ + ufs_renesas_write_phy(hba, 0x10ae, 0x0001); + ufs_renesas_write_phy(hba, 0x10ad, data_10ad); + ufs_renesas_write_phy(hba, 0x10af, data_10af); + ufs_renesas_write_phy(hba, 0x10b6, 0x0001); + ufs_renesas_write_phy(hba, 0x10ae, 0x0000); +} + +static void ufs_renesas_init_compensation_and_slicers(struct ufs_hba *hba) +{ + ufs_renesas_write_phy_10ad_10af(hba, 0x0000, 0x0001); + ufs_renesas_write_phy_10ad_10af(hba, 0x0000, 0x0002); + ufs_renesas_write_phy_10ad_10af(hba, 0x0080, 0x0000); + ufs_renesas_write_phy_10ad_10af(hba, 0x0080, 0x001a); +} + +static void ufs_renesas_pre_init(struct ufs_hba *hba) +{ + u32 timer_val; + + /* This setting is for SERIES B */ + ufs_renesas_init_step1_to_3(hba); + + ufs_renesas_init_step4_to_6(hba); + + timer_val = ufs_renesas_init_disable_timer(hba); + /* phy setup */ ufs_renesas_indirect_write(hba, 1, 0x01, 0x001f); ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014); @@ -219,10 +253,7 @@ static void ufs_renesas_pre_init(struct ufs_hba *hba) ufs_renesas_write_phy(hba, 0x4000, 0x0000); ufs_renesas_write_phy(hba, 0x4001, 0x0000); - ufs_renesas_write_phy_10ad_10af(hba, 0x0000, 0x0001); - ufs_renesas_write_phy_10ad_10af(hba, 0x0000, 0x0002); - ufs_renesas_write_phy_10ad_10af(hba, 0x0080, 0x0000); - ufs_renesas_write_phy_10ad_10af(hba, 0x0080, 0x001a); + ufs_renesas_init_compensation_and_slicers(hba); ufs_renesas_indirect_write(hba, 7, 0x70, 0x0016); ufs_renesas_indirect_write(hba, 7, 0x71, 0x0016); @@ -249,9 +280,7 @@ static void ufs_renesas_pre_init(struct ufs_hba *hba) ufs_renesas_indirect_poll(hba, 7, 0x41, 0, BIT(7)); /* end of phy setup */ - ufs_renesas_write(hba, 0xf0, 0); - ufs_renesas_write(hba, 0xd0, 0x00000d00); - ufs_renesas_write(hba, 0xd4, timer_val); + ufs_renesas_init_enable_timer(hba, timer_val); } static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba, -- 2.51.0 From b3bb1762451a9b2e3374c35ce3f8745c3a68a1d3 Mon Sep 17 00:00:00 2001 From: Yoshihiro Shimoda Date: Wed, 5 Mar 2025 14:34:15 +0100 Subject: [PATCH 16/16] scsi: ufs: renesas: Add initialization code for R-Car S4-8 ES1.2 Add initialization code for R-Car S4-8 ES1.2 to improve transfer stability. Using the new code requires downloading firmware and reading calibration data from E-FUSE. If either fails, the driver falls back to the old initialization code. Signed-off-by: Yoshihiro Shimoda Co-developed-by: Geert Uytterhoeven Signed-off-by: Geert Uytterhoeven Link: https://lore.kernel.org/r/97d83709495c764b2456d4d25846f5f48197cad0.1741179611.git.geert+renesas@glider.be Signed-off-by: Martin K. Petersen --- drivers/ufs/host/ufs-renesas.c | 199 ++++++++++++++++++++++++++++++++- 1 file changed, 194 insertions(+), 5 deletions(-) diff --git a/drivers/ufs/host/ufs-renesas.c b/drivers/ufs/host/ufs-renesas.c index d9ba766dcd2f..5bf7d0e77ad8 100644 --- a/drivers/ufs/host/ufs-renesas.c +++ b/drivers/ufs/host/ufs-renesas.c @@ -9,20 +9,31 @@ #include #include #include +#include #include #include #include +#include #include #include #include +#include #include #include "ufshcd-pltfrm.h" +#define EFUSE_CALIB_SIZE 8 + struct ufs_renesas_priv { + const struct firmware *fw; + void (*pre_init)(struct ufs_hba *hba); bool initialized; /* The hardware needs initialization once */ + u8 calib[EFUSE_CALIB_SIZE]; }; +#define UFS_RENESAS_FIRMWARE_NAME "r8a779f0_ufs.bin" +MODULE_FIRMWARE(UFS_RENESAS_FIRMWARE_NAME); + static void ufs_renesas_dbg_register_dump(struct ufs_hba *hba) { ufshcd_dump_regs(hba, 0xc0, 0x40, "regs: 0xc0 + "); @@ -116,6 +127,22 @@ static void ufs_renesas_set_phy(struct ufs_hba *hba, u32 addr16, u32 data16) ufs_renesas_write_phy(hba, addr16, data16); } +static void ufs_renesas_reset_indirect_write(struct ufs_hba *hba, int gpio, + u32 addr, u32 data) +{ + ufs_renesas_write(hba, 0xf0, gpio); + ufs_renesas_write_800_80c_poll(hba, addr, data); +} + +static void ufs_renesas_reset_indirect_update(struct ufs_hba *hba) +{ + ufs_renesas_write_d0_d4(hba, 0x0000082c, 0x0f000000); + ufs_renesas_write_d0_d4(hba, 0x00000828, 0x0f000000); + ufs_renesas_write(hba, 0xd0, 0x0000082c); + ufs_renesas_poll(hba, 0xd4, BIT(27) | BIT(26) | BIT(24), BIT(27) | BIT(26) | BIT(24)); + ufs_renesas_write(hba, 0xf0, 0); +} + static void ufs_renesas_indirect_write(struct ufs_hba *hba, u32 gpio, u32 addr, u32 data_800) { @@ -135,15 +162,19 @@ static void ufs_renesas_indirect_poll(struct ufs_hba *hba, u32 gpio, u32 addr, ufs_renesas_write(hba, 0xf0, 0); } -static void ufs_renesas_init_step1_to_3(struct ufs_hba *hba) +static void ufs_renesas_init_step1_to_3(struct ufs_hba *hba, bool init108) { ufs_renesas_write(hba, 0xc0, 0x49425308); ufs_renesas_write_d0_d4(hba, 0x00000104, 0x00000002); + if (init108) + ufs_renesas_write_d0_d4(hba, 0x00000108, 0x00000002); udelay(1); ufs_renesas_write_d0_d4(hba, 0x00000828, 0x00000200); udelay(1); ufs_renesas_write_d0_d4(hba, 0x00000828, 0x00000000); ufs_renesas_write_d0_d4(hba, 0x00000104, 0x00000001); + if (init108) + ufs_renesas_write_d0_d4(hba, 0x00000108, 0x00000001); ufs_renesas_write_d0_d4(hba, 0x00000940, 0x00000001); udelay(1); ufs_renesas_write_d0_d4(hba, 0x00000940, 0x00000000); @@ -207,12 +238,12 @@ static void ufs_renesas_init_compensation_and_slicers(struct ufs_hba *hba) ufs_renesas_write_phy_10ad_10af(hba, 0x0080, 0x001a); } -static void ufs_renesas_pre_init(struct ufs_hba *hba) +static void ufs_renesas_r8a779f0_es10_pre_init(struct ufs_hba *hba) { u32 timer_val; /* This setting is for SERIES B */ - ufs_renesas_init_step1_to_3(hba); + ufs_renesas_init_step1_to_3(hba, false); ufs_renesas_init_step4_to_6(hba); @@ -283,6 +314,105 @@ static void ufs_renesas_pre_init(struct ufs_hba *hba) ufs_renesas_init_enable_timer(hba, timer_val); } +static void ufs_renesas_r8a779f0_init_step3_add(struct ufs_hba *hba, bool assert) +{ + u32 val_2x = 0, val_3x = 0, val_4x = 0; + + if (assert) { + val_2x = 0x0001; + val_3x = 0x0003; + val_4x = 0x0001; + } + + ufs_renesas_reset_indirect_write(hba, 7, 0x20, val_2x); + ufs_renesas_reset_indirect_write(hba, 7, 0x4a, val_4x); + ufs_renesas_reset_indirect_write(hba, 7, 0x35, val_3x); + ufs_renesas_reset_indirect_update(hba); + ufs_renesas_reset_indirect_write(hba, 7, 0x21, val_2x); + ufs_renesas_reset_indirect_write(hba, 7, 0x4b, val_4x); + ufs_renesas_reset_indirect_write(hba, 7, 0x36, val_3x); + ufs_renesas_reset_indirect_update(hba); +} + +static void ufs_renesas_r8a779f0_pre_init(struct ufs_hba *hba) +{ + struct ufs_renesas_priv *priv = ufshcd_get_variant(hba); + u32 timer_val; + u32 data; + int i; + + /* This setting is for SERIES B */ + ufs_renesas_init_step1_to_3(hba, true); + + ufs_renesas_r8a779f0_init_step3_add(hba, true); + ufs_renesas_reset_indirect_write(hba, 7, 0x5f, 0x0063); + ufs_renesas_reset_indirect_update(hba); + ufs_renesas_reset_indirect_write(hba, 7, 0x60, 0x0003); + ufs_renesas_reset_indirect_update(hba); + ufs_renesas_reset_indirect_write(hba, 7, 0x5b, 0x00a6); + ufs_renesas_reset_indirect_update(hba); + ufs_renesas_reset_indirect_write(hba, 7, 0x5c, 0x0003); + ufs_renesas_reset_indirect_update(hba); + ufs_renesas_r8a779f0_init_step3_add(hba, false); + + ufs_renesas_init_step4_to_6(hba); + + timer_val = ufs_renesas_init_disable_timer(hba); + + ufs_renesas_indirect_write(hba, 1, 0x01, 0x001f); + ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014); + ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0014); + ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0007); + ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007); + + ufs_renesas_indirect_poll(hba, 7, 0x3c, 0, BIT(7)); + ufs_renesas_indirect_poll(hba, 7, 0x4c, 0, BIT(4)); + + ufs_renesas_indirect_write(hba, 1, 0x32, 0x0080); + ufs_renesas_indirect_write(hba, 1, 0x1f, 0x0001); + ufs_renesas_indirect_write(hba, 1, 0x2c, 0x0001); + ufs_renesas_indirect_write(hba, 1, 0x32, 0x0087); + + ufs_renesas_indirect_write(hba, 1, 0x4d, priv->calib[2]); + ufs_renesas_indirect_write(hba, 1, 0x4e, priv->calib[3]); + ufs_renesas_indirect_write(hba, 1, 0x0d, 0x0006); + ufs_renesas_indirect_write(hba, 1, 0x0e, 0x0007); + ufs_renesas_write_phy(hba, 0x0028, priv->calib[3]); + ufs_renesas_write_phy(hba, 0x4014, priv->calib[3]); + + ufs_renesas_set_phy(hba, 0x401c, BIT(2)); + + ufs_renesas_write_phy(hba, 0x4000, priv->calib[6]); + ufs_renesas_write_phy(hba, 0x4001, priv->calib[7]); + + ufs_renesas_indirect_write(hba, 1, 0x14, 0x0001); + + ufs_renesas_init_compensation_and_slicers(hba); + + ufs_renesas_indirect_write(hba, 7, 0x79, 0x0000); + ufs_renesas_indirect_write(hba, 7, 0x24, 0x000c); + ufs_renesas_indirect_write(hba, 7, 0x25, 0x000c); + ufs_renesas_indirect_write(hba, 7, 0x62, 0x00c0); + ufs_renesas_indirect_write(hba, 7, 0x63, 0x0001); + + for (i = 0; i < priv->fw->size / 2; i++) { + data = (priv->fw->data[i * 2 + 1] << 8) | priv->fw->data[i * 2]; + ufs_renesas_write_phy(hba, 0xc000 + i, data); + } + + ufs_renesas_indirect_write(hba, 7, 0x0d, 0x0002); + ufs_renesas_indirect_write(hba, 7, 0x0e, 0x0007); + + ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0014); + ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017); + ufs_renesas_indirect_write(hba, 7, 0x5d, 0x0004); + ufs_renesas_indirect_write(hba, 7, 0x5e, 0x0017); + ufs_renesas_indirect_poll(hba, 7, 0x55, 0, BIT(6)); + ufs_renesas_indirect_poll(hba, 7, 0x41, 0, BIT(7)); + + ufs_renesas_init_enable_timer(hba, timer_val); +} + static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba, enum ufs_notify_change_status status) { @@ -292,7 +422,7 @@ static int ufs_renesas_hce_enable_notify(struct ufs_hba *hba, return 0; if (status == PRE_CHANGE) - ufs_renesas_pre_init(hba); + priv->pre_init(hba); priv->initialized = true; @@ -310,20 +440,78 @@ static int ufs_renesas_setup_clocks(struct ufs_hba *hba, bool on, return 0; } +static const struct soc_device_attribute ufs_fallback[] = { + { .soc_id = "r8a779f0", .revision = "ES1.[01]" }, + { /* Sentinel */ } +}; + static int ufs_renesas_init(struct ufs_hba *hba) { + const struct soc_device_attribute *attr; + struct nvmem_cell *cell = NULL; + struct device *dev = hba->dev; struct ufs_renesas_priv *priv; + u8 *data = NULL; + size_t len; + int ret; - priv = devm_kzalloc(hba->dev, sizeof(*priv), GFP_KERNEL); + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; ufshcd_set_variant(hba, priv); hba->quirks |= UFSHCD_QUIRK_HIBERN_FASTAUTO; + attr = soc_device_match(ufs_fallback); + if (attr) + goto fallback; + + ret = request_firmware(&priv->fw, UFS_RENESAS_FIRMWARE_NAME, dev); + if (ret) { + dev_warn(dev, "Failed to load firmware\n"); + goto fallback; + } + + cell = nvmem_cell_get(dev, "calibration"); + if (IS_ERR(cell)) { + dev_warn(dev, "No calibration data specified\n"); + goto fallback; + } + + data = nvmem_cell_read(cell, &len); + if (IS_ERR(data)) { + dev_warn(dev, "Failed to read calibration data: %pe\n", data); + goto fallback; + } + + if (len != EFUSE_CALIB_SIZE) { + dev_warn(dev, "Invalid calibration data size %zu\n", len); + goto fallback; + } + + memcpy(priv->calib, data, EFUSE_CALIB_SIZE); + priv->pre_init = ufs_renesas_r8a779f0_pre_init; + goto out; + +fallback: + dev_info(dev, "Using ES1.0 init code\n"); + priv->pre_init = ufs_renesas_r8a779f0_es10_pre_init; + +out: + kfree(data); + if (!IS_ERR_OR_NULL(cell)) + nvmem_cell_put(cell); + return 0; } +static void ufs_renesas_exit(struct ufs_hba *hba) +{ + struct ufs_renesas_priv *priv = ufshcd_get_variant(hba); + + release_firmware(priv->fw); +} + static int ufs_renesas_set_dma_mask(struct ufs_hba *hba) { return dma_set_mask_and_coherent(hba->dev, DMA_BIT_MASK(32)); @@ -332,6 +520,7 @@ static int ufs_renesas_set_dma_mask(struct ufs_hba *hba) static const struct ufs_hba_variant_ops ufs_renesas_vops = { .name = "renesas", .init = ufs_renesas_init, + .exit = ufs_renesas_exit, .set_dma_mask = ufs_renesas_set_dma_mask, .setup_clocks = ufs_renesas_setup_clocks, .hce_enable_notify = ufs_renesas_hce_enable_notify, -- 2.51.0