From 514eff7b0aa1c5eb645ddbb8676ef3e2d88a8b99 Mon Sep 17 00:00:00 2001 From: Vladimir Oltean Date: Tue, 15 Apr 2025 00:30:20 +0300 Subject: [PATCH 01/16] net: dsa: avoid refcount warnings when ds->ops->tag_8021q_vlan_del() fails This is very similar to the problem and solution from commit 232deb3f9567 ("net: dsa: avoid refcount warnings when ->port_{fdb,mdb}_del returns error"), except for the dsa_port_do_tag_8021q_vlan_del() operation. Fixes: c64b9c05045a ("net: dsa: tag_8021q: add proper cross-chip notifier support") Signed-off-by: Vladimir Oltean Link: https://patch.msgid.link/20250414213020.2959021-1-vladimir.oltean@nxp.com Signed-off-by: Jakub Kicinski --- net/dsa/tag_8021q.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index 3ee53e28ec2e..53e03fd8071b 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -197,7 +197,7 @@ static int dsa_port_do_tag_8021q_vlan_del(struct dsa_port *dp, u16 vid) err = ds->ops->tag_8021q_vlan_del(ds, port, vid); if (err) { - refcount_inc(&v->refcount); + refcount_set(&v->refcount, 1); return err; } -- 2.51.0 From 2a5970d5aaff8f3e33ce3bfaa403ae88c40de40d Mon Sep 17 00:00:00 2001 From: Sagi Maimon Date: Tue, 15 Apr 2025 08:31:31 +0300 Subject: [PATCH 02/16] ptp: ocp: fix start time alignment in ptp_ocp_signal_set In ptp_ocp_signal_set, the start time for periodic signals is not aligned to the next period boundary. The current code rounds up the start time and divides by the period but fails to multiply back by the period, causing misaligned signal starts. Fix this by multiplying the rounded-up value by the period to ensure the start time is the closest next period. Fixes: 4bd46bb037f8e ("ptp: ocp: Use DIV64_U64_ROUND_UP for rounding.") Signed-off-by: Sagi Maimon Reviewed-by: Vadim Fedorenko Link: https://patch.msgid.link/20250415053131.129413-1-maimon.sagi@gmail.com Signed-off-by: Jakub Kicinski --- drivers/ptp/ptp_ocp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/ptp/ptp_ocp.c b/drivers/ptp/ptp_ocp.c index 7945c6be1f7c..faf6e027f89a 100644 --- a/drivers/ptp/ptp_ocp.c +++ b/drivers/ptp/ptp_ocp.c @@ -2067,6 +2067,7 @@ ptp_ocp_signal_set(struct ptp_ocp *bp, int gen, struct ptp_ocp_signal *s) if (!s->start) { /* roundup() does not work on 32-bit systems */ s->start = DIV64_U64_ROUND_UP(start_ns, s->period); + s->start *= s->period; s->start = ktime_add(s->start, s->phase); } -- 2.51.0 From 4798cfa2097f0833d54d8f5ce20ef14631917839 Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Tue, 15 Apr 2025 08:15:52 -0700 Subject: [PATCH 03/16] net: don't try to ops lock uninitialized devs We need to be careful when operating on dev while in rtnl_create_link(). Some devices (vxlan) initialize netdev_ops in ->newlink, so later on. Avoid using netdev_lock_ops(), the device isn't registered so we cannot legally call its ops or generate any notifications for it. netdev_ops_assert_locked_or_invisible() is safe to use, it checks registration status first. Reported-by: syzbot+de1c7d68a10e3f123bdd@syzkaller.appspotmail.com Fixes: 04efcee6ef8d ("net: hold instance lock during NETDEV_CHANGE") Acked-by: Stanislav Fomichev Reviewed-by: Kuniyuki Iwashima Link: https://patch.msgid.link/20250415151552.768373-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- net/core/dev.c | 2 ++ net/core/rtnetlink.c | 5 +---- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/net/core/dev.c b/net/core/dev.c index 5fcbc66d865e..1be7cb73a602 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1520,6 +1520,8 @@ EXPORT_SYMBOL(netdev_features_change); void netif_state_change(struct net_device *dev) { + netdev_ops_assert_locked_or_invisible(dev); + if (dev->flags & IFF_UP) { struct netdev_notifier_change_info change_info = { .info.dev = dev, diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 39a5b72e861f..c5a7f41982a5 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3676,11 +3676,8 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname, nla_len(tb[IFLA_BROADCAST])); if (tb[IFLA_TXQLEN]) dev->tx_queue_len = nla_get_u32(tb[IFLA_TXQLEN]); - if (tb[IFLA_OPERSTATE]) { - netdev_lock_ops(dev); + if (tb[IFLA_OPERSTATE]) set_operstate(dev, nla_get_u8(tb[IFLA_OPERSTATE])); - netdev_unlock_ops(dev); - } if (tb[IFLA_LINKMODE]) dev->link_mode = nla_get_u8(tb[IFLA_LINKMODE]); if (tb[IFLA_GROUP]) -- 2.51.0 From c7b67ddc3c999aa2f8d77be7ef1913298fe78f0e Mon Sep 17 00:00:00 2001 From: Hans Holmberg Date: Wed, 9 Apr 2025 12:39:56 +0000 Subject: [PATCH 04/16] xfs: document zoned rt specifics in admin-guide Document the lifetime, nolifetime and max_open_zones mount options added for zoned rt file systems. Also add documentation describing the max_open_zones sysfs attribute exposed in /sys/fs/xfs//zoned/ Fixes: 4e4d52075577 ("xfs: add the zoned space allocator") Signed-off-by: Hans Holmberg Reviewed-by: Christoph Hellwig Reviewed-by: Darrick J. Wong Signed-off-by: Carlos Maiolino --- Documentation/admin-guide/xfs.rst | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/Documentation/admin-guide/xfs.rst b/Documentation/admin-guide/xfs.rst index 7b0811d650f9..3e76276bd488 100644 --- a/Documentation/admin-guide/xfs.rst +++ b/Documentation/admin-guide/xfs.rst @@ -124,6 +124,14 @@ When mounting an XFS filesystem, the following options are accepted. controls the size of each buffer and so is also relevant to this case. + lifetime (default) or nolifetime + Enable data placement based on write life time hints provided + by the user. This turns on co-allocation of data of similar + life times when statistically favorable to reduce garbage + collection cost. + + These options are only available for zoned rt file systems. + logbsize=value Set the size of each in-memory log buffer. The size may be specified in bytes, or in kilobytes with a "k" suffix. @@ -143,6 +151,14 @@ When mounting an XFS filesystem, the following options are accepted. optional, and the log section can be separate from the data section or contained within it. + max_open_zones=value + Specify the max number of zones to keep open for writing on a + zoned rt device. Many open zones aids file data separation + but may impact performance on HDDs. + + If ``max_open_zones`` is not specified, the value is determined + by the capabilities and the size of the zoned rt device. + noalign Data allocations will not be aligned at stripe unit boundaries. This is only relevant to filesystems created @@ -546,6 +562,19 @@ The interesting knobs for XFS workqueues are as follows: Zoned Filesystems ================= +For zoned file systems, the following attribute is exposed in: + + /sys/fs/xfs//zoned/ + + max_open_zones (Min: 1 Default: Varies Max: UINTMAX) + This read-only attribute exposes the maximum number of open zones + available for data placement. The value is determined at mount time and + is limited by the capabilities of the backing zoned device, file system + size and the max_open_zones mount option. + +Zoned Filesystems +================= + For zoned file systems, the following attributes are exposed in: /sys/fs/xfs//zoned/ -- 2.51.0 From d2d31ea8cd80b9830cdab624e94f9d41178fc99d Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Tue, 15 Apr 2025 15:53:48 +0200 Subject: [PATCH 05/16] netfilter: conntrack: fix erronous removal of offload bit The blamed commit exposes a possible issue with flow_offload_teardown(): We might remove the offload bit of a conntrack entry that has been offloaded again. 1. conntrack entry c1 is offloaded via flow f1 (f1->ct == c1). 2. f1 times out and is pushed back to slowpath, c1 offload bit is removed. Due to bug, f1 is not unlinked from rhashtable right away. 3. a new packet arrives for the flow and re-offload is triggered, i.e. f2->ct == c1. This is because lookup in flowtable skip entries with teardown bit set. 4. Next flowtable gc cycle finds f1 again 5. flow_offload_teardown() is called again for f1 and c1 offload bit is removed again, even though we have f2 referencing the same entry. This is harmless, but clearly not correct. Fix the bug that exposes this: set 'teardown = true' to have the gc callback unlink the flowtable entry from the table right away instead of the unintentional defer to the next round. Also prevent flow_offload_teardown() from fixing up the ct state more than once: We could also be called from the data path or a notifier, not only from the flowtable gc callback. NF_FLOW_TEARDOWN can never be unset, so we can use it as synchronization point: if we observe did not see a 0 -> 1 transition, then another CPU is already doing the ct state fixups for us. Fixes: 03428ca5cee9 ("netfilter: conntrack: rework offload nf_conn timeout extension logic") Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_flow_table_core.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c index 9d8361526f82..9441ac3d8c1a 100644 --- a/net/netfilter/nf_flow_table_core.c +++ b/net/netfilter/nf_flow_table_core.c @@ -383,8 +383,8 @@ static void flow_offload_del(struct nf_flowtable *flow_table, void flow_offload_teardown(struct flow_offload *flow) { clear_bit(IPS_OFFLOAD_BIT, &flow->ct->status); - set_bit(NF_FLOW_TEARDOWN, &flow->flags); - flow_offload_fixup_ct(flow); + if (!test_and_set_bit(NF_FLOW_TEARDOWN, &flow->flags)) + flow_offload_fixup_ct(flow); } EXPORT_SYMBOL_GPL(flow_offload_teardown); @@ -558,10 +558,12 @@ static void nf_flow_offload_gc_step(struct nf_flowtable *flow_table, if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) || - nf_flow_custom_gc(flow_table, flow)) + nf_flow_custom_gc(flow_table, flow)) { flow_offload_teardown(flow); - else if (!teardown) + teardown = true; + } else if (!teardown) { nf_flow_table_extend_ct_timeout(flow->ct); + } if (teardown) { if (test_bit(NF_FLOW_HW, &flow->flags)) { -- 2.51.0 From 75bc744466444ef417b5f709f72b91c83301bcd1 Mon Sep 17 00:00:00 2001 From: Meghana Malladi Date: Tue, 15 Apr 2025 14:35:41 +0530 Subject: [PATCH 06/16] net: ti: icssg-prueth: Fix kernel warning while bringing down network interface During network interface initialization, the NIC driver needs to register its Rx queue with the XDP, to ensure the incoming XDP buffer carries a pointer reference to this info and is stored inside xdp_rxq_info. While this struct isn't tied to XDP prog, if there are any changes in Rx queue, the NIC driver needs to stop the Rx queue by unregistering with XDP before purging and reallocating memory. Drop page_pool destroy during Rx channel reset as this is already handled by XDP during xdp_rxq_info_unreg (Rx queue unregister), failing to do will cause the following warning: warning logs: https://gist.github.com/MeghanaMalladiTI/eb627e5dc8de24e42d7d46572c13e576 Fixes: 46eeb90f03e0 ("net: ti: icssg-prueth: Use page_pool API for RX buffer allocation") Signed-off-by: Meghana Malladi Reviewed-by: Simon Horman Reviewed-by: Roger Quadros Reviewed-by: Jacob Keller Link: https://patch.msgid.link/20250415090543.717991-2-m-malladi@ti.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/ti/icssg/icssg_common.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index 14002b026452..ec643fb69d30 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -1215,9 +1215,6 @@ void prueth_reset_rx_chan(struct prueth_rx_chn *chn, prueth_rx_cleanup); if (disable) k3_udma_glue_disable_rx_chn(chn->rx_chn); - - page_pool_destroy(chn->pg_pool); - chn->pg_pool = NULL; } EXPORT_SYMBOL_GPL(prueth_reset_rx_chan); -- 2.51.0 From 8ed2fa661350f0b49edb765d18173b5c766c3686 Mon Sep 17 00:00:00 2001 From: Meghana Malladi Date: Tue, 15 Apr 2025 14:35:42 +0530 Subject: [PATCH 07/16] net: ti: icssg-prueth: Fix possible NULL pointer dereference inside emac_xmit_xdp_frame() There is an error check inside emac_xmit_xdp_frame() function which is called when the driver wants to transmit XDP frame, to check if the allocated tx descriptor is NULL, if true to exit and return ICSSG_XDP_CONSUMED implying failure in transmission. In this case trying to free a descriptor which is NULL will result in kernel crash due to NULL pointer dereference. Fix this error handling and increase netdev tx_dropped stats in the caller of this function if the function returns ICSSG_XDP_CONSUMED. Fixes: 62aa3246f462 ("net: ti: icssg-prueth: Add XDP support") Reported-by: Dan Carpenter Closes: https://lore.kernel.org/all/70d8dd76-0c76-42fc-8611-9884937c82f5@stanley.mountain/ Signed-off-by: Meghana Malladi Reviewed-by: Simon Horman Reviewed-by: Roger Quadros Reviewed-by: Jacob Keller Link: https://patch.msgid.link/20250415090543.717991-3-m-malladi@ti.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/ti/icssg/icssg_common.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/ti/icssg/icssg_common.c b/drivers/net/ethernet/ti/icssg/icssg_common.c index ec643fb69d30..b4be76e13a2f 100644 --- a/drivers/net/ethernet/ti/icssg/icssg_common.c +++ b/drivers/net/ethernet/ti/icssg/icssg_common.c @@ -583,7 +583,7 @@ u32 emac_xmit_xdp_frame(struct prueth_emac *emac, first_desc = k3_cppi_desc_pool_alloc(tx_chn->desc_pool); if (!first_desc) { netdev_dbg(ndev, "xdp tx: failed to allocate descriptor\n"); - goto drop_free_descs; /* drop */ + return ICSSG_XDP_CONSUMED; /* drop */ } if (page) { /* already DMA mapped by page_pool */ @@ -671,8 +671,10 @@ static u32 emac_run_xdp(struct prueth_emac *emac, struct xdp_buff *xdp, q_idx = smp_processor_id() % emac->tx_ch_num; result = emac_xmit_xdp_frame(emac, xdpf, page, q_idx); - if (result == ICSSG_XDP_CONSUMED) + if (result == ICSSG_XDP_CONSUMED) { + ndev->stats.tx_dropped++; goto drop; + } dev_sw_netstats_rx_add(ndev, xdpf->len); return result; -- 2.51.0 From 7349c9e9979333abfce42da5f9025598083b59c9 Mon Sep 17 00:00:00 2001 From: Meghana Malladi Date: Tue, 15 Apr 2025 14:35:43 +0530 Subject: [PATCH 08/16] net: ti: icss-iep: Fix possible NULL pointer dereference for perout request The ICSS IEP driver tracks perout and pps enable state with flags. Currently when disabling pps and perout signals during icss_iep_exit(), results in NULL pointer dereference for perout. To fix the null pointer dereference issue, the icss_iep_perout_enable_hw function can be modified to directly clear the IEP CMP registers when disabling PPS or PEROUT, without referencing the ptp_perout_request structure, as its contents are irrelevant in this case. Fixes: 9b115361248d ("net: ti: icssg-prueth: Fix clearing of IEP_CMP_CFG registers during iep_init") Reported-by: Dan Carpenter Closes: https://lore.kernel.org/all/7b1c7c36-363a-4085-b26c-4f210bee1df6@stanley.mountain/ Signed-off-by: Meghana Malladi Reviewed-by: Jacob Keller Link: https://patch.msgid.link/20250415090543.717991-4-m-malladi@ti.com Signed-off-by: Paolo Abeni --- drivers/net/ethernet/ti/icssg/icss_iep.c | 121 +++++++++++------------ 1 file changed, 58 insertions(+), 63 deletions(-) diff --git a/drivers/net/ethernet/ti/icssg/icss_iep.c b/drivers/net/ethernet/ti/icssg/icss_iep.c index b4a34c57b7b4..2a1c43316f46 100644 --- a/drivers/net/ethernet/ti/icssg/icss_iep.c +++ b/drivers/net/ethernet/ti/icssg/icss_iep.c @@ -412,6 +412,22 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep, int ret; u64 cmp; + if (!on) { + /* Disable CMP 1 */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP_EN(1), 0); + + /* clear CMP regs */ + regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) + regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0); + + /* Disable sync */ + regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); + + return 0; + } + /* Calculate width of the signal for PPS/PEROUT handling */ ts.tv_sec = req->on.sec; ts.tv_nsec = req->on.nsec; @@ -430,64 +446,39 @@ static int icss_iep_perout_enable_hw(struct icss_iep *iep, if (ret) return ret; - if (on) { - /* Configure CMP */ - regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp)); - if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) - regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp)); - /* Configure SYNC, based on req on width */ - regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, - div_u64(ns_width, iep->def_inc)); - regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0); - regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, - div_u64(ns_start, iep->def_inc)); - regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */ - /* Enable CMP 1 */ - regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, - IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); - } else { - /* Disable CMP 1 */ - regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, - IEP_CMP_CFG_CMP_EN(1), 0); - - /* clear regs */ - regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0); - if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) - regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0); - } + /* Configure CMP */ + regmap_write(iep->map, ICSS_IEP_CMP1_REG0, lower_32_bits(cmp)); + if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) + regmap_write(iep->map, ICSS_IEP_CMP1_REG1, upper_32_bits(cmp)); + /* Configure SYNC, based on req on width */ + regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, + div_u64(ns_width, iep->def_inc)); + regmap_write(iep->map, ICSS_IEP_SYNC0_PERIOD_REG, 0); + regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, + div_u64(ns_start, iep->def_inc)); + regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); /* one-shot mode */ + /* Enable CMP 1 */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); } else { - if (on) { - u64 start_ns; - - iep->period = ((u64)req->period.sec * NSEC_PER_SEC) + - req->period.nsec; - start_ns = ((u64)req->period.sec * NSEC_PER_SEC) - + req->period.nsec; - icss_iep_update_to_next_boundary(iep, start_ns); - - regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, - div_u64(ns_width, iep->def_inc)); - regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, - div_u64(ns_start, iep->def_inc)); - /* Enable Sync in single shot mode */ - regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, - IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN); - /* Enable CMP 1 */ - regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, - IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); - } else { - /* Disable CMP 1 */ - regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, - IEP_CMP_CFG_CMP_EN(1), 0); - - /* clear CMP regs */ - regmap_write(iep->map, ICSS_IEP_CMP1_REG0, 0); - if (iep->plat_data->flags & ICSS_IEP_64BIT_COUNTER_SUPPORT) - regmap_write(iep->map, ICSS_IEP_CMP1_REG1, 0); - - /* Disable sync */ - regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, 0); - } + u64 start_ns; + + iep->period = ((u64)req->period.sec * NSEC_PER_SEC) + + req->period.nsec; + start_ns = ((u64)req->period.sec * NSEC_PER_SEC) + + req->period.nsec; + icss_iep_update_to_next_boundary(iep, start_ns); + + regmap_write(iep->map, ICSS_IEP_SYNC_PWIDTH_REG, + div_u64(ns_width, iep->def_inc)); + regmap_write(iep->map, ICSS_IEP_SYNC_START_REG, + div_u64(ns_start, iep->def_inc)); + /* Enable Sync in single shot mode */ + regmap_write(iep->map, ICSS_IEP_SYNC_CTRL_REG, + IEP_SYNC_CTRL_SYNC_N_EN(0) | IEP_SYNC_CTRL_SYNC_EN); + /* Enable CMP 1 */ + regmap_update_bits(iep->map, ICSS_IEP_CMP_CFG_REG, + IEP_CMP_CFG_CMP_EN(1), IEP_CMP_CFG_CMP_EN(1)); } return 0; @@ -498,11 +489,21 @@ static int icss_iep_perout_enable(struct icss_iep *iep, { int ret = 0; + if (!on) + goto disable; + /* Reject requests with unsupported flags */ if (req->flags & ~(PTP_PEROUT_DUTY_CYCLE | PTP_PEROUT_PHASE)) return -EOPNOTSUPP; + /* Set default "on" time (1ms) for the signal if not passed by the app */ + if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) { + req->on.sec = 0; + req->on.nsec = NSEC_PER_MSEC; + } + +disable: mutex_lock(&iep->ptp_clk_mutex); if (iep->pps_enabled) { @@ -513,12 +514,6 @@ static int icss_iep_perout_enable(struct icss_iep *iep, if (iep->perout_enabled == !!on) goto exit; - /* Set default "on" time (1ms) for the signal if not passed by the app */ - if (!(req->flags & PTP_PEROUT_DUTY_CYCLE)) { - req->on.sec = 0; - req->on.nsec = NSEC_PER_MSEC; - } - ret = icss_iep_perout_enable_hw(iep, req, on); if (!ret) iep->perout_enabled = !!on; -- 2.51.0 From 951a04ab3a2db4029debfa48d380ef834b93207e Mon Sep 17 00:00:00 2001 From: Tamura Dai Date: Thu, 17 Apr 2025 10:16:05 +0900 Subject: [PATCH 09/16] spi: spi-imx: Add check for spi_imx_setupxfer() Add check for the return value of spi_imx_setupxfer(). spi_imx->rx and spi_imx->tx function pointer can be NULL when spi_imx_setupxfer() return error, and make NULL pointer dereference. Unable to handle kernel NULL pointer dereference at virtual address 0000000000000000 Call trace: 0x0 spi_imx_pio_transfer+0x50/0xd8 spi_imx_transfer_one+0x18c/0x858 spi_transfer_one_message+0x43c/0x790 __spi_pump_transfer_message+0x238/0x5d4 __spi_sync+0x2b0/0x454 spi_write_then_read+0x11c/0x200 Signed-off-by: Tamura Dai Reviewed-by: Carlos Song Link: https://patch.msgid.link/20250417011700.14436-1-kirinode0@gmail.com Signed-off-by: Mark Brown --- drivers/spi/spi-imx.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index 832d6e9009eb..c93d80a4d734 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -1695,9 +1695,12 @@ static int spi_imx_transfer_one(struct spi_controller *controller, struct spi_device *spi, struct spi_transfer *transfer) { + int ret; struct spi_imx_data *spi_imx = spi_controller_get_devdata(spi->controller); - spi_imx_setupxfer(spi, transfer); + ret = spi_imx_setupxfer(spi, transfer); + if (ret < 0) + return ret; transfer->effective_speed_hz = spi_imx->spi_bus_clk; /* flush rxfifo before transfer */ -- 2.51.0 From 6bc2b6c6f16d8e60de518d26da1bc6bc436cf71d Mon Sep 17 00:00:00 2001 From: Bo-Cun Chen Date: Wed, 16 Apr 2025 01:50:46 +0100 Subject: [PATCH 10/16] net: ethernet: mtk_eth_soc: reapply mdc divider on reset In the current method, the MDC divider was reset to the default setting of 2.5MHz after the NETSYS SER. Therefore, we need to reapply the MDC divider configuration function in mtk_hw_init() after reset. Fixes: c0a440031d431 ("net: ethernet: mtk_eth_soc: set MDIO bus clock frequency") Signed-off-by: Bo-Cun Chen Signed-off-by: Daniel Golle Link: https://patch.msgid.link/8ab7381447e6cdcb317d5b5a6ddd90a1734efcb0.1744764277.git.daniel@makrotopia.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 39 +++++++++++++-------- drivers/net/ethernet/mediatek/mtk_eth_soc.h | 1 + 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 43197b28b3e7..1a235283b0e9 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -871,9 +871,25 @@ static const struct phylink_mac_ops mtk_phylink_ops = { .mac_enable_tx_lpi = mtk_mac_enable_tx_lpi, }; +static void mtk_mdio_config(struct mtk_eth *eth) +{ + u32 val; + + /* Configure MDC Divider */ + val = FIELD_PREP(PPSC_MDC_CFG, eth->mdc_divider); + + /* Configure MDC Turbo Mode */ + if (mtk_is_netsys_v3_or_greater(eth)) + mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3); + else + val |= PPSC_MDC_TURBO; + + mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC); +} + static int mtk_mdio_init(struct mtk_eth *eth) { - unsigned int max_clk = 2500000, divider; + unsigned int max_clk = 2500000; struct device_node *mii_np; int ret; u32 val; @@ -908,20 +924,9 @@ static int mtk_mdio_init(struct mtk_eth *eth) } max_clk = val; } - divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63); - - /* Configure MDC Turbo Mode */ - if (mtk_is_netsys_v3_or_greater(eth)) - mtk_m32(eth, 0, MISC_MDC_TURBO, MTK_MAC_MISC_V3); - - /* Configure MDC Divider */ - val = FIELD_PREP(PPSC_MDC_CFG, divider); - if (!mtk_is_netsys_v3_or_greater(eth)) - val |= PPSC_MDC_TURBO; - mtk_m32(eth, PPSC_MDC_CFG, val, MTK_PPSC); - - dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / divider); - + eth->mdc_divider = min_t(unsigned int, DIV_ROUND_UP(MDC_MAX_FREQ, max_clk), 63); + mtk_mdio_config(eth); + dev_dbg(eth->dev, "MDC is running on %d Hz\n", MDC_MAX_FREQ / eth->mdc_divider); ret = of_mdiobus_register(eth->mii_bus, mii_np); err_put_node: @@ -3974,6 +3979,10 @@ static int mtk_hw_init(struct mtk_eth *eth, bool reset) else mtk_hw_reset(eth); + /* No MT7628/88 support yet */ + if (reset && !MTK_HAS_CAPS(eth->soc->caps, MTK_SOC_MT7628)) + mtk_mdio_config(eth); + if (mtk_is_netsys_v3_or_greater(eth)) { /* Set FE to PDMAv2 if necessary */ val = mtk_r32(eth, MTK_FE_GLO_MISC); diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index 90a377ab4359..39709649ea8d 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -1271,6 +1271,7 @@ struct mtk_eth { struct clk *clks[MTK_CLK_MAX]; struct mii_bus *mii_bus; + unsigned int mdc_divider; struct work_struct pending_work; unsigned long state; -- 2.51.0 From 6b02eb372c6776c9abb8bc81cf63f96039c24664 Mon Sep 17 00:00:00 2001 From: Bo-Cun Chen Date: Wed, 16 Apr 2025 01:51:07 +0100 Subject: [PATCH 11/16] net: ethernet: mtk_eth_soc: correct the max weight of the queue limit for 100Mbps Without this patch, the maximum weight of the queue limit will be incorrect when linked at 100Mbps due to an apparent typo. Fixes: f63959c7eec31 ("net: ethernet: mtk_eth_soc: implement multi-queue support for per-port queues") Signed-off-by: Bo-Cun Chen Signed-off-by: Daniel Golle Link: https://patch.msgid.link/74111ba0bdb13743313999ed467ce564e8189006.1744764277.git.daniel@makrotopia.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 1a235283b0e9..5a3cfb8908a1 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -734,7 +734,7 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, case SPEED_100: val |= MTK_QTX_SCH_MAX_RATE_EN | FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 103) | - FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3); + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 3) | FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1); break; case SPEED_1000: @@ -757,7 +757,7 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, case SPEED_100: val |= MTK_QTX_SCH_MAX_RATE_EN | FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) | - FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5); + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) | FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 1); break; case SPEED_1000: -- 2.51.0 From 1b66124135f5f8640bd540fadda4b20cdd23114b Mon Sep 17 00:00:00 2001 From: Bo-Cun Chen Date: Wed, 16 Apr 2025 01:51:25 +0100 Subject: [PATCH 12/16] net: ethernet: mtk_eth_soc: revise QDMA packet scheduler settings The QDMA packet scheduler suffers from a performance issue. Fix this by picking up changes from MediaTek's SDK which change to use Token Bucket instead of Leaky Bucket and fix the SPEED_1000 configuration. Fixes: 160d3a9b1929 ("net: ethernet: mtk_eth_soc: introduce MTK_NETSYS_V2 support") Signed-off-by: Bo-Cun Chen Signed-off-by: Daniel Golle Link: https://patch.msgid.link/18040f60f9e2f5855036b75b28c4332a2d2ebdd8.1744764277.git.daniel@makrotopia.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/mediatek/mtk_eth_soc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c index 5a3cfb8908a1..bdb98c9d8b1c 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c @@ -762,8 +762,8 @@ static void mtk_set_queue_speed(struct mtk_eth *eth, unsigned int idx, break; case SPEED_1000: val |= MTK_QTX_SCH_MAX_RATE_EN | - FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 10) | - FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 5) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_MAN, 1) | + FIELD_PREP(MTK_QTX_SCH_MAX_RATE_EXP, 6) | FIELD_PREP(MTK_QTX_SCH_MAX_RATE_WEIGHT, 10); break; default: @@ -3320,7 +3320,7 @@ static int mtk_start_dma(struct mtk_eth *eth) if (mtk_is_netsys_v2_or_greater(eth)) val |= MTK_MUTLI_CNT | MTK_RESV_BUF | MTK_WCOMP_EN | MTK_DMAD_WR_WDONE | - MTK_CHK_DDONE_EN | MTK_LEAKY_BUCKET_EN; + MTK_CHK_DDONE_EN; else val |= MTK_RX_BT_32DWORDS; mtk_w32(eth, val, reg_map->qdma.glo_cfg); -- 2.51.0 From 261592ba06aa44001ab95fd47bafa4225bab25cf Mon Sep 17 00:00:00 2001 From: Kent Overstreet Date: Thu, 17 Apr 2025 14:09:56 -0400 Subject: [PATCH 13/16] bcachefs: Fix snapshotting a subvolume, then renaming it Subvolume roots and the dirents that point to them are special; they don't obey the normal snapshot versioning rules because they cross snapshot boundaries. We don't keep around older versions of subvolume dirents on rename - we don't need to, because subvolume dirents are only visible in the parent subvolume, and we wouldn't be able to match up the different dirent and inode versions due to crossing the snapshot ID boundary. That means that when we rename a subvolume, that's been snapshotted, the older version of the subvolume root will become dangling - it won't have a dirent that points to it. That's expected, we just need to tell fsck that this is ok. Fixes: https://github.com/koverstreet/bcachefs/issues/856 Signed-off-by: Kent Overstreet --- fs/bcachefs/fsck.c | 44 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 43 insertions(+), 1 deletion(-) diff --git a/fs/bcachefs/fsck.c b/fs/bcachefs/fsck.c index 18308f3d64a1..7b25cedd3e40 100644 --- a/fs/bcachefs/fsck.c +++ b/fs/bcachefs/fsck.c @@ -321,6 +321,31 @@ static inline bool inode_should_reattach(struct bch_inode_unpacked *inode) inode->bi_subvol == BCACHEFS_ROOT_SUBVOL) return false; + /* + * Subvolume roots are special: older versions of subvolume roots may be + * disconnected, it's only the newest version that matters. + * + * We only keep a single dirent pointing to a subvolume root, i.e. + * older versions of snapshots will not have a different dirent pointing + * to the same subvolume root. + * + * This is because dirents that point to subvolumes are only visible in + * the parent subvolume - versioning is not needed - and keeping them + * around would break fsck, because when we're crossing subvolumes we + * don't have a consistent snapshot ID to do check the inode <-> dirent + * relationships. + * + * Thus, a subvolume root that's been renamed after a snapshot will have + * a disconnected older version - that's expected. + * + * Note that taking a snapshot always updates the root inode (to update + * the dirent backpointer), so a subvolume root inode with + * BCH_INODE_has_child_snapshot is never visible. + */ + if (inode->bi_subvol && + (inode->bi_flags & BCH_INODE_has_child_snapshot)) + return false; + return !inode->bi_dir && !(inode->bi_flags & BCH_INODE_unlinked); } @@ -1007,6 +1032,23 @@ static int check_inode_dirent_inode(struct btree_trans *trans, if (ret && !bch2_err_matches(ret, ENOENT)) return ret; + if ((ret || dirent_points_to_inode_nowarn(d, inode)) && + inode->bi_subvol && + (inode->bi_flags & BCH_INODE_has_child_snapshot)) { + /* Older version of a renamed subvolume root: we won't have a + * correct dirent for it. That's expected, see + * inode_should_reattach(). + * + * We don't clear the backpointer field when doing the rename + * because there might be arbitrarily many versions in older + * snapshots. + */ + inode->bi_dir = 0; + inode->bi_dir_offset = 0; + *write_inode = true; + goto out; + } + if (fsck_err_on(ret, trans, inode_points_to_missing_dirent, "inode points to missing dirent\n%s", @@ -1027,7 +1069,7 @@ static int check_inode_dirent_inode(struct btree_trans *trans, inode->bi_dir_offset = 0; *write_inode = true; } - +out: ret = 0; fsck_err: bch2_trans_iter_exit(trans, &dirent_iter); -- 2.51.0 From 4067196a52278156d18d8d6fa7f43970611b1b49 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Sat, 29 Mar 2025 19:10:29 +0200 Subject: [PATCH 14/16] mm/page_alloc: fix deadlock on cpu_hotplug_lock in __accept_page() When the last page in the zone is accepted, __accept_page() calls static_branch_dec(). This function takes cpu_hotplug_lock, which can lead to a deadlock if the allocation occurs during CPU bringup path as _cpu_up() also takes the lock. To prevent this deadlock, defer static_branch_dec() to a workqueue. Call static_branch_dec() only when the workqueue is not yet initialized. Workqueues are initialized before CPU bring up, so this will not conflict with the first scenario. Link: https://lkml.kernel.org/r/20250329171030.3942298-1-kirill.shutemov@linux.intel.com Fixes: 55ad43e8ba0f ("mm: add a helper to accept page") Signed-off-by: Kirill A. Shutemov Reported-by: Srikanth Aithal Tested-by: Srikanth Aithal Cc: Dave Hansen Cc: Ashish Kalra Cc: David Hildenbrand Cc: "Edgecombe, Rick P" Cc: Mel Gorman Cc: "Mike Rapoport (IBM)" Cc: Thomas Lendacky Cc: Vlastimil Babka Signed-off-by: Andrew Morton --- include/linux/mmzone.h | 3 +++ mm/internal.h | 1 + mm/mm_init.c | 1 + mm/page_alloc.c | 28 ++++++++++++++++++++++++++-- 4 files changed, 31 insertions(+), 2 deletions(-) diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 25e80b2ca7f4..4c95fcc9e9df 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -967,6 +967,9 @@ struct zone { #ifdef CONFIG_UNACCEPTED_MEMORY /* Pages to be accepted. All pages on the list are MAX_PAGE_ORDER */ struct list_head unaccepted_pages; + + /* To be called once the last page in the zone is accepted */ + struct work_struct unaccepted_cleanup; #endif /* zone flags, see below */ diff --git a/mm/internal.h b/mm/internal.h index 50c2f590b2d0..e9695baa5922 100644 --- a/mm/internal.h +++ b/mm/internal.h @@ -1595,6 +1595,7 @@ unsigned long move_page_tables(struct pagetable_move_control *pmc); #ifdef CONFIG_UNACCEPTED_MEMORY void accept_page(struct page *page); +void unaccepted_cleanup_work(struct work_struct *work); #else /* CONFIG_UNACCEPTED_MEMORY */ static inline void accept_page(struct page *page) { diff --git a/mm/mm_init.c b/mm/mm_init.c index 84f14fa12d0d..9659689b8ace 100644 --- a/mm/mm_init.c +++ b/mm/mm_init.c @@ -1441,6 +1441,7 @@ static void __meminit zone_init_free_lists(struct zone *zone) #ifdef CONFIG_UNACCEPTED_MEMORY INIT_LIST_HEAD(&zone->unaccepted_pages); + INIT_WORK(&zone->unaccepted_cleanup, unaccepted_cleanup_work); #endif } diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 1715e34b91af..e506e365d6f1 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -7191,6 +7191,11 @@ static DEFINE_STATIC_KEY_FALSE(zones_with_unaccepted_pages); static bool lazy_accept = true; +void unaccepted_cleanup_work(struct work_struct *work) +{ + static_branch_dec(&zones_with_unaccepted_pages); +} + static int __init accept_memory_parse(char *p) { if (!strcmp(p, "lazy")) { @@ -7229,8 +7234,27 @@ static void __accept_page(struct zone *zone, unsigned long *flags, __free_pages_ok(page, MAX_PAGE_ORDER, FPI_TO_TAIL); - if (last) - static_branch_dec(&zones_with_unaccepted_pages); + if (last) { + /* + * There are two corner cases: + * + * - If allocation occurs during the CPU bring up, + * static_branch_dec() cannot be used directly as + * it causes a deadlock on cpu_hotplug_lock. + * + * Instead, use schedule_work() to prevent deadlock. + * + * - If allocation occurs before workqueues are initialized, + * static_branch_dec() should be called directly. + * + * Workqueues are initialized before CPU bring up, so this + * will not conflict with the first scenario. + */ + if (system_wq) + schedule_work(&zone->unaccepted_cleanup); + else + unaccepted_cleanup_work(&zone->unaccepted_cleanup); + } } void accept_page(struct page *page) -- 2.51.0 From 98b1917cdef92c29fc9a14060d5606c619050c2c Mon Sep 17 00:00:00 2001 From: David Hildenbrand Date: Thu, 10 Apr 2025 11:10:20 +0200 Subject: [PATCH 15/16] fs/dax: fix folio splitting issue by resetting old folio order + _nr_pages Alison reports an issue with fsdax when large extends end up using large ZONE_DEVICE folios: [ 417.796271] BUG: kernel NULL pointer dereference, address: 0000000000000b00 [ 417.796982] #PF: supervisor read access in kernel mode [ 417.797540] #PF: error_code(0x0000) - not-present page [ 417.798123] PGD 2a5c5067 P4D 2a5c5067 PUD 2a5c6067 PMD 0 [ 417.798690] Oops: Oops: 0000 [#1] SMP NOPTI [ 417.799178] CPU: 5 UID: 0 PID: 1515 Comm: mmap Tainted: ... [ 417.800150] Tainted: [O]=OOT_MODULE [ 417.800583] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 0.0.0 02/06/2015 [ 417.801358] RIP: 0010:__lruvec_stat_mod_folio+0x7e/0x250 [ 417.801948] Code: ... [ 417.803662] RSP: 0000:ffffc90002be3a08 EFLAGS: 00010206 [ 417.804234] RAX: 0000000000000000 RBX: 0000000000000200 RCX: 0000000000000002 [ 417.804984] RDX: ffffffff815652d7 RSI: 0000000000000000 RDI: ffffffff82a2beae [ 417.805689] RBP: ffffc90002be3a28 R08: 0000000000000000 R09: 0000000000000000 [ 417.806384] R10: ffffea0007000040 R11: ffff888376ffe000 R12: 0000000000000001 [ 417.807099] R13: 0000000000000012 R14: ffff88807fe4ab40 R15: ffff888029210580 [ 417.807801] FS: 00007f339fa7a740(0000) GS:ffff8881fa9b9000(0000) knlGS:0000000000000000 [ 417.808570] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 417.809193] CR2: 0000000000000b00 CR3: 000000002a4f0004 CR4: 0000000000370ef0 [ 417.809925] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [ 417.810622] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [ 417.811353] Call Trace: [ 417.811709] [ 417.812038] folio_add_file_rmap_ptes+0x143/0x230 [ 417.812566] insert_page_into_pte_locked+0x1ee/0x3c0 [ 417.813132] insert_page+0x78/0xf0 [ 417.813558] vmf_insert_page_mkwrite+0x55/0xa0 [ 417.814088] dax_fault_iter+0x484/0x7b0 [ 417.814542] dax_iomap_pte_fault+0x1ca/0x620 [ 417.815055] dax_iomap_fault+0x39/0x40 [ 417.815499] __xfs_write_fault+0x139/0x380 [ 417.815995] ? __handle_mm_fault+0x5e5/0x1a60 [ 417.816483] xfs_write_fault+0x41/0x50 [ 417.816966] xfs_filemap_fault+0x3b/0xe0 [ 417.817424] __do_fault+0x31/0x180 [ 417.817859] __handle_mm_fault+0xee1/0x1a60 [ 417.818325] ? debug_smp_processor_id+0x17/0x20 [ 417.818844] handle_mm_fault+0xe1/0x2b0 [...] The issue is that when we split a large ZONE_DEVICE folio to order-0 ones, we don't reset the order/_nr_pages. As folio->_nr_pages overlays page[1]->memcg_data, once page[1] is a folio, it suddenly looks like it has folio->memcg_data set. And we never manually initialize folio->memcg_data in fsdax code, because we never expect it to be set at all. When __lruvec_stat_mod_folio() then stumbles over such a folio, it tries to use folio->memcg_data (because it's non-NULL) but it does not actually point at a memcg, resulting in the problem. Alison also observed that these folios sometimes have "locked" set, which is rather concerning (folios locked from the beginning ...). The reason is that the order for large folios is stored in page[1]->flags, which become the folio->flags of a new small folio. Let's fix it by adding a folio helper to clear order/_nr_pages for splitting purposes. Maybe we should reinitialize other large folio flags / folio members as well when splitting, because they might similarly cause harm once page[1] becomes a folio? At least other flags in PAGE_FLAGS_SECOND should not be set for fsdax, so at least page[1]->flags might be as expected with this fix. From a quick glimpse, initializing ->mapping, ->pgmap and ->share should re-initialize most things from a previous page[1] used by large folios that fsdax cares about. For example folio->private might not get reinitialized, but maybe that's not relevant -- no traces of it's use in fsdax code. Needs a closer look. Another thing that should be considered in the future is performing similar checks as we perform in free_tail_page_prepare() -- checking pincount etc. -- when freeing a large fsdax folio. Link: https://lkml.kernel.org/r/20250410091020.119116-1-david@redhat.com Fixes: 4996fc547f5b ("mm: let _folio_nr_pages overlay memcg_data in first tail page") Fixes: 38607c62b34b ("fs/dax: properly refcount fs dax pages") Signed-off-by: David Hildenbrand Reported-by: Alison Schofield Closes: https://lkml.kernel.org/r/Z_W9Oeg-D9FhImf3@aschofie-mobl2.lan Tested-by: Alison Schofield Reviewed-by: Dan Williams Tested-by: "Darrick J. Wong" Cc: Alexander Viro Cc: Christian Brauner Cc: Jan Kara Cc: Matthew Wilcox Cc: Alistair Popple Cc: Christoph Hellwig Signed-off-by: Andrew Morton --- fs/dax.c | 1 + include/linux/mm.h | 17 +++++++++++++++++ 2 files changed, 18 insertions(+) diff --git a/fs/dax.c b/fs/dax.c index af5045b0f476..676303419e9e 100644 --- a/fs/dax.c +++ b/fs/dax.c @@ -396,6 +396,7 @@ static inline unsigned long dax_folio_put(struct folio *folio) order = folio_order(folio); if (!order) return 0; + folio_reset_order(folio); for (i = 0; i < (1UL << order); i++) { struct dev_pagemap *pgmap = page_pgmap(&folio->page); diff --git a/include/linux/mm.h b/include/linux/mm.h index b7f13f087954..bf55206935c4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1218,6 +1218,23 @@ static inline unsigned int folio_order(const struct folio *folio) return folio_large_order(folio); } +/** + * folio_reset_order - Reset the folio order and derived _nr_pages + * @folio: The folio. + * + * Reset the order and derived _nr_pages to 0. Must only be used in the + * process of splitting large folios. + */ +static inline void folio_reset_order(struct folio *folio) +{ + if (WARN_ON_ONCE(!folio_test_large(folio))) + return; + folio->_flags_1 &= ~0xffUL; +#ifdef NR_PAGES_IN_LARGE_FOLIO + folio->_nr_pages = 0; +#endif +} + #include /* -- 2.51.0 From 8ad5ac8f4fc4848d17db809038773ee0bee76b0b Mon Sep 17 00:00:00 2001 From: Vlastimil Babka Date: Thu, 10 Apr 2025 11:00:22 +0200 Subject: [PATCH 16/16] MAINTAINERS: update SLAB ALLOCATOR maintainers With permission, reduce the number of maintainers. Create a CREDITS entry for Joonsoo (Pekka already has one). Thanks for all the work! Link: https://lkml.kernel.org/r/20250410090021.72296-3-vbabka@suse.cz Signed-off-by: Vlastimil Babka Acked-by: Harry Yoo Acked-by: Christoph Lameter (Ampere) Acked-by: David Rientjes Cc: Pekka Enberg Cc: Joonsoo Kim Cc: Roman Gushchin Cc: Brendan Jackman Cc: Johannes Weiner Cc: Michal Hocko Cc: Suren Baghdasaryan Cc: Zi Yan Signed-off-by: Andrew Morton --- CREDITS | 4 ++++ MAINTAINERS | 2 -- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/CREDITS b/CREDITS index 1b77fba6c27e..f74d230992d6 100644 --- a/CREDITS +++ b/CREDITS @@ -2071,6 +2071,10 @@ S: 660 Harvard Ave. #7 S: Santa Clara, CA 95051 S: USA +N: Joonsoo Kim +E: iamjoonsoo.kim@lge.com +D: Slab allocators + N: Kukjin Kim E: kgene@kernel.org D: Samsung S3C, S5P and Exynos ARM architectures diff --git a/MAINTAINERS b/MAINTAINERS index 8c7d796131a8..16c9e10622df 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -22249,9 +22249,7 @@ F: drivers/nvmem/layouts/sl28vpd.c SLAB ALLOCATOR M: Christoph Lameter -M: Pekka Enberg M: David Rientjes -M: Joonsoo Kim M: Andrew Morton M: Vlastimil Babka R: Roman Gushchin -- 2.51.0