From fcbb93f1e48a150159534a1e6ec19e6fdf9196df Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 24 Mar 2025 13:51:20 +0200 Subject: [PATCH 01/16] drm/display: dp: change drm_dp_dpcd_read_link_status() return value drm_dp_dpcd_read_link_status() follows the "return error code or number of bytes read" protocol, with the code returning less bytes than requested in case of some errors. However most of the drivers interpreted that as "return error code in case of any error". Switch drm_dp_dpcd_read_link_status() to drm_dp_dpcd_read_data() and make it follow that protocol too. Acked-by: Jani Nikula Reviewed-by: Lyude Paul Signed-off-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20250324-drm-rework-dpcd-access-v4-2-e80ff89593df@oss.qualcomm.com Signed-off-by: Dmitry Baryshkov --- drivers/gpu/drm/amd/amdgpu/atombios_dp.c | 8 +++---- .../drm/bridge/cadence/cdns-mhdp8546-core.c | 2 +- drivers/gpu/drm/display/drm_dp_helper.c | 7 +++--- drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c | 4 ++-- drivers/gpu/drm/msm/dp/dp_ctrl.c | 24 ++++--------------- drivers/gpu/drm/msm/dp/dp_link.c | 18 +++++++------- drivers/gpu/drm/radeon/atombios_dp.c | 8 +++---- 7 files changed, 28 insertions(+), 43 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c index 521b9faab180..492813ab1b54 100644 --- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c +++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c @@ -458,8 +458,8 @@ bool amdgpu_atombios_dp_needs_link_train(struct amdgpu_connector *amdgpu_connect u8 link_status[DP_LINK_STATUS_SIZE]; struct amdgpu_connector_atom_dig *dig = amdgpu_connector->con_priv; - if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, link_status) - <= 0) + if (drm_dp_dpcd_read_link_status(&amdgpu_connector->ddc_bus->aux, + link_status) < 0) return false; if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count)) return false; @@ -616,7 +616,7 @@ amdgpu_atombios_dp_link_train_cr(struct amdgpu_atombios_dp_link_train_info *dp_i drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, - dp_info->link_status) <= 0) { + dp_info->link_status) < 0) { DRM_ERROR("displayport link status failed\n"); break; } @@ -681,7 +681,7 @@ amdgpu_atombios_dp_link_train_ce(struct amdgpu_atombios_dp_link_train_info *dp_i drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, - dp_info->link_status) <= 0) { + dp_info->link_status) < 0) { DRM_ERROR("displayport link status failed\n"); break; } diff --git a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c index ae1bd58975ce..96a983c970ea 100644 --- a/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c +++ b/drivers/gpu/drm/bridge/cadence/cdns-mhdp8546-core.c @@ -2306,7 +2306,7 @@ static int cdns_mhdp_update_link_status(struct cdns_mhdp_device *mhdp) * If everything looks fine, just return, as we don't handle * DP IRQs. */ - if (ret > 0 && + if (!ret && drm_dp_channel_eq_ok(status, mhdp->link.num_lanes) && drm_dp_clock_recovery_ok(status, mhdp->link.num_lanes)) goto out; diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index e43a8f4a252d..410be0be233a 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -778,14 +778,13 @@ EXPORT_SYMBOL(drm_dp_dpcd_write); * @aux: DisplayPort AUX channel * @status: buffer to store the link status in (must be at least 6 bytes) * - * Returns the number of bytes transferred on success or a negative error - * code on failure. + * Returns a negative error code on failure or 0 on success. */ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux, u8 status[DP_LINK_STATUS_SIZE]) { - return drm_dp_dpcd_read(aux, DP_LANE0_1_STATUS, status, - DP_LINK_STATUS_SIZE); + return drm_dp_dpcd_read_data(aux, DP_LANE0_1_STATUS, status, + DP_LINK_STATUS_SIZE); } EXPORT_SYMBOL(drm_dp_dpcd_read_link_status); diff --git a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c index f6355c16cc0a..a3b78b0fd53e 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c +++ b/drivers/gpu/drm/hisilicon/hibmc/dp/dp_link.c @@ -188,7 +188,7 @@ static int hibmc_dp_link_training_cr(struct hibmc_dp_dev *dp) drm_dp_link_train_clock_recovery_delay(&dp->aux, dp->dpcd); ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status); - if (ret != DP_LINK_STATUS_SIZE) { + if (ret) { drm_err(dp->dev, "Get lane status failed\n"); return ret; } @@ -236,7 +236,7 @@ static int hibmc_dp_link_training_channel_eq(struct hibmc_dp_dev *dp) drm_dp_link_train_channel_eq_delay(&dp->aux, dp->dpcd); ret = drm_dp_dpcd_read_link_status(&dp->aux, lane_status); - if (ret != DP_LINK_STATUS_SIZE) { + if (ret) { drm_err(dp->dev, "get lane status failed\n"); break; } diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c index 9c463ae2f8fa..925b176b66c7 100644 --- a/drivers/gpu/drm/msm/dp/dp_ctrl.c +++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c @@ -1099,20 +1099,6 @@ static bool msm_dp_ctrl_train_pattern_set(struct msm_dp_ctrl_private *ctrl, return ret == 1; } -static int msm_dp_ctrl_read_link_status(struct msm_dp_ctrl_private *ctrl, - u8 *link_status) -{ - int ret = 0, len; - - len = drm_dp_dpcd_read_link_status(ctrl->aux, link_status); - if (len != DP_LINK_STATUS_SIZE) { - DRM_ERROR("DP link status read failed, err: %d\n", len); - ret = -EINVAL; - } - - return ret; -} - static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl, int *training_step) { @@ -1139,7 +1125,7 @@ static int msm_dp_ctrl_link_train_1(struct msm_dp_ctrl_private *ctrl, for (tries = 0; tries < maximum_retries; tries++) { drm_dp_link_train_clock_recovery_delay(ctrl->aux, ctrl->panel->dpcd); - ret = msm_dp_ctrl_read_link_status(ctrl, link_status); + ret = drm_dp_dpcd_read_link_status(ctrl->aux, link_status); if (ret) return ret; @@ -1251,7 +1237,7 @@ static int msm_dp_ctrl_link_train_2(struct msm_dp_ctrl_private *ctrl, for (tries = 0; tries <= maximum_retries; tries++) { drm_dp_link_train_channel_eq_delay(ctrl->aux, ctrl->panel->dpcd); - ret = msm_dp_ctrl_read_link_status(ctrl, link_status); + ret = drm_dp_dpcd_read_link_status(ctrl->aux, link_status); if (ret) return ret; @@ -1804,7 +1790,7 @@ static bool msm_dp_ctrl_channel_eq_ok(struct msm_dp_ctrl_private *ctrl) u8 link_status[DP_LINK_STATUS_SIZE]; int num_lanes = ctrl->link->link_params.num_lanes; - msm_dp_ctrl_read_link_status(ctrl, link_status); + drm_dp_dpcd_read_link_status(ctrl->aux, link_status); return drm_dp_channel_eq_ok(link_status, num_lanes); } @@ -1862,7 +1848,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl) if (!msm_dp_catalog_link_is_connected(ctrl->catalog)) break; - msm_dp_ctrl_read_link_status(ctrl, link_status); + drm_dp_dpcd_read_link_status(ctrl->aux, link_status); rc = msm_dp_ctrl_link_rate_down_shift(ctrl); if (rc < 0) { /* already in RBR = 1.6G */ @@ -1887,7 +1873,7 @@ int msm_dp_ctrl_on_link(struct msm_dp_ctrl *msm_dp_ctrl) if (!msm_dp_catalog_link_is_connected(ctrl->catalog)) break; - msm_dp_ctrl_read_link_status(ctrl, link_status); + drm_dp_dpcd_read_link_status(ctrl->aux, link_status); if (!drm_dp_clock_recovery_ok(link_status, ctrl->link->link_params.num_lanes)) diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c index 1a1fbb2d7d4f..92a9077959b3 100644 --- a/drivers/gpu/drm/msm/dp/dp_link.c +++ b/drivers/gpu/drm/msm/dp/dp_link.c @@ -714,21 +714,21 @@ end: static int msm_dp_link_parse_sink_status_field(struct msm_dp_link_private *link) { - int len; + int ret; link->prev_sink_count = link->msm_dp_link.sink_count; - len = drm_dp_read_sink_count(link->aux); - if (len < 0) { + ret = drm_dp_read_sink_count(link->aux); + if (ret < 0) { DRM_ERROR("DP parse sink count failed\n"); - return len; + return ret; } - link->msm_dp_link.sink_count = len; + link->msm_dp_link.sink_count = ret; - len = drm_dp_dpcd_read_link_status(link->aux, - link->link_status); - if (len < DP_LINK_STATUS_SIZE) { + ret = drm_dp_dpcd_read_link_status(link->aux, + link->link_status); + if (ret < 0) { DRM_ERROR("DP link status read failed\n"); - return len; + return ret; } return msm_dp_link_parse_request(link); diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index fa78824931cc..3f3c360dce4b 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -501,8 +501,8 @@ bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector) u8 link_status[DP_LINK_STATUS_SIZE]; struct radeon_connector_atom_dig *dig = radeon_connector->con_priv; - if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, link_status) - <= 0) + if (drm_dp_dpcd_read_link_status(&radeon_connector->ddc_bus->aux, + link_status) < 0) return false; if (drm_dp_channel_eq_ok(link_status, dig->dp_lane_count)) return false; @@ -678,7 +678,7 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info) drm_dp_link_train_clock_recovery_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, - dp_info->link_status) <= 0) { + dp_info->link_status) < 0) { DRM_ERROR("displayport link status failed\n"); break; } @@ -741,7 +741,7 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info) drm_dp_link_train_channel_eq_delay(dp_info->aux, dp_info->dpcd); if (drm_dp_dpcd_read_link_status(dp_info->aux, - dp_info->link_status) <= 0) { + dp_info->link_status) < 0) { DRM_ERROR("displayport link status failed\n"); break; } -- 2.50.1 From af67978ee37e543e62d6d3f7eba58f8f259423a7 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 24 Mar 2025 13:51:21 +0200 Subject: [PATCH 02/16] drm/display: dp: use new DCPD access helpers Switch drm_dp_helper.c to use new set of DPCD read / write helpers. Reviewed-by: Lyude Paul Acked-by: Jani Nikula Signed-off-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20250324-drm-rework-dpcd-access-v4-3-e80ff89593df@oss.qualcomm.com Signed-off-by: Dmitry Baryshkov --- drivers/gpu/drm/display/drm_dp_helper.c | 296 ++++++++++-------------- 1 file changed, 116 insertions(+), 180 deletions(-) diff --git a/drivers/gpu/drm/display/drm_dp_helper.c b/drivers/gpu/drm/display/drm_dp_helper.c index 410be0be233a..e2439c8a7fef 100644 --- a/drivers/gpu/drm/display/drm_dp_helper.c +++ b/drivers/gpu/drm/display/drm_dp_helper.c @@ -327,7 +327,7 @@ static int __read_delay(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_CAP_SI if (offset < DP_RECEIVER_CAP_SIZE) { rd_interval = dpcd[offset]; } else { - if (drm_dp_dpcd_readb(aux, offset, &rd_interval) != 1) { + if (drm_dp_dpcd_read_byte(aux, offset, &rd_interval) < 0) { drm_dbg_kms(aux->drm_dev, "%s: failed rd interval read\n", aux->name); /* arbitrary default delay */ @@ -358,7 +358,7 @@ int drm_dp_128b132b_read_aux_rd_interval(struct drm_dp_aux *aux) int unit; u8 val; - if (drm_dp_dpcd_readb(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) != 1) { + if (drm_dp_dpcd_read_byte(aux, DP_128B132B_TRAINING_AUX_RD_INTERVAL, &val) < 0) { drm_err(aux->drm_dev, "%s: failed rd interval read\n", aux->name); /* default to max */ @@ -807,30 +807,20 @@ int drm_dp_dpcd_read_phy_link_status(struct drm_dp_aux *aux, { int ret; - if (dp_phy == DP_PHY_DPRX) { - ret = drm_dp_dpcd_read(aux, - DP_LANE0_1_STATUS, - link_status, - DP_LINK_STATUS_SIZE); - - if (ret < 0) - return ret; + if (dp_phy == DP_PHY_DPRX) + return drm_dp_dpcd_read_data(aux, + DP_LANE0_1_STATUS, + link_status, + DP_LINK_STATUS_SIZE); - WARN_ON(ret != DP_LINK_STATUS_SIZE); - - return 0; - } - - ret = drm_dp_dpcd_read(aux, - DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy), - link_status, - DP_LINK_STATUS_SIZE - 1); + ret = drm_dp_dpcd_read_data(aux, + DP_LANE0_1_STATUS_PHY_REPEATER(dp_phy), + link_status, + DP_LINK_STATUS_SIZE - 1); if (ret < 0) return ret; - WARN_ON(ret != DP_LINK_STATUS_SIZE - 1); - /* Convert the LTTPR to the sink PHY link status layout */ memmove(&link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS + 1], &link_status[DP_SINK_STATUS - DP_LANE0_1_STATUS], @@ -846,7 +836,7 @@ static int read_payload_update_status(struct drm_dp_aux *aux) int ret; u8 status; - ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); + ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); if (ret < 0) return ret; @@ -873,21 +863,21 @@ int drm_dp_dpcd_write_payload(struct drm_dp_aux *aux, int ret; int retries = 0; - drm_dp_dpcd_writeb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, - DP_PAYLOAD_TABLE_UPDATED); + drm_dp_dpcd_write_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, + DP_PAYLOAD_TABLE_UPDATED); payload_alloc[0] = vcpid; payload_alloc[1] = start_time_slot; payload_alloc[2] = time_slot_count; - ret = drm_dp_dpcd_write(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); - if (ret != 3) { + ret = drm_dp_dpcd_write_data(aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "failed to write payload allocation %d\n", ret); goto fail; } retry: - ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); + ret = drm_dp_dpcd_read_byte(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status); if (ret < 0) { drm_dbg_kms(aux->drm_dev, "failed to read payload table status %d\n", ret); goto fail; @@ -1043,15 +1033,15 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, { u8 link_edid_read = 0, auto_test_req = 0, test_resp = 0; - if (drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR, - &auto_test_req, 1) < 1) { + if (drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR, + &auto_test_req) < 0) { drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n", aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR); return false; } auto_test_req &= DP_AUTOMATED_TEST_REQUEST; - if (drm_dp_dpcd_read(aux, DP_TEST_REQUEST, &link_edid_read, 1) < 1) { + if (drm_dp_dpcd_read_byte(aux, DP_TEST_REQUEST, &link_edid_read) < 0) { drm_err(aux->drm_dev, "%s: DPCD failed read at register 0x%x\n", aux->name, DP_TEST_REQUEST); return false; @@ -1064,23 +1054,23 @@ bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux, return false; } - if (drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR, - &auto_test_req, 1) < 1) { + if (drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR, + auto_test_req) < 0) { drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n", aux->name, DP_DEVICE_SERVICE_IRQ_VECTOR); return false; } /* send back checksum for the last edid extension block data */ - if (drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM, - &real_edid_checksum, 1) < 1) { + if (drm_dp_dpcd_write_byte(aux, DP_TEST_EDID_CHECKSUM, + real_edid_checksum) < 0) { drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n", aux->name, DP_TEST_EDID_CHECKSUM); return false; } test_resp |= DP_TEST_EDID_CHECKSUM_WRITE; - if (drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, &test_resp, 1) < 1) { + if (drm_dp_dpcd_write_byte(aux, DP_TEST_RESPONSE, test_resp) < 0) { drm_err(aux->drm_dev, "%s: DPCD failed write at register 0x%x\n", aux->name, DP_TEST_RESPONSE); return false; @@ -1117,12 +1107,10 @@ static int drm_dp_read_extended_dpcd_caps(struct drm_dp_aux *aux, DP_EXTENDED_RECEIVER_CAP_FIELD_PRESENT)) return 0; - ret = drm_dp_dpcd_read(aux, DP_DP13_DPCD_REV, &dpcd_ext, - sizeof(dpcd_ext)); + ret = drm_dp_dpcd_read_data(aux, DP_DP13_DPCD_REV, &dpcd_ext, + sizeof(dpcd_ext)); if (ret < 0) return ret; - if (ret != sizeof(dpcd_ext)) - return -EIO; if (dpcd[DP_DPCD_REV] > dpcd_ext[DP_DPCD_REV]) { drm_dbg_kms(aux->drm_dev, @@ -1159,10 +1147,10 @@ int drm_dp_read_dpcd_caps(struct drm_dp_aux *aux, { int ret; - ret = drm_dp_dpcd_read(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE); + ret = drm_dp_dpcd_read_data(aux, DP_DPCD_REV, dpcd, DP_RECEIVER_CAP_SIZE); if (ret < 0) return ret; - if (ret != DP_RECEIVER_CAP_SIZE || dpcd[DP_DPCD_REV] == 0) + if (dpcd[DP_DPCD_REV] == 0) return -EIO; ret = drm_dp_read_extended_dpcd_caps(aux, dpcd); @@ -1212,11 +1200,9 @@ int drm_dp_read_downstream_info(struct drm_dp_aux *aux, if (dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DETAILED_CAP_INFO_AVAILABLE) len *= 4; - ret = drm_dp_dpcd_read(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len); + ret = drm_dp_dpcd_read_data(aux, DP_DOWNSTREAM_PORT_0, downstream_ports, len); if (ret < 0) return ret; - if (ret != len) - return -EIO; drm_dbg_kms(aux->drm_dev, "%s: DPCD DFP: %*ph\n", aux->name, len, downstream_ports); @@ -1573,7 +1559,7 @@ EXPORT_SYMBOL(drm_dp_downstream_mode); */ int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]) { - return drm_dp_dpcd_read(aux, DP_BRANCH_ID, id, 6); + return drm_dp_dpcd_read_data(aux, DP_BRANCH_ID, id, 6); } EXPORT_SYMBOL(drm_dp_downstream_id); @@ -1638,13 +1624,13 @@ void drm_dp_downstream_debug(struct seq_file *m, drm_dp_downstream_id(aux, id); seq_printf(m, "\t\tID: %s\n", id); - len = drm_dp_dpcd_read(aux, DP_BRANCH_HW_REV, &rev[0], 1); - if (len > 0) + len = drm_dp_dpcd_read_data(aux, DP_BRANCH_HW_REV, &rev[0], 1); + if (!len) seq_printf(m, "\t\tHW: %d.%d\n", (rev[0] & 0xf0) >> 4, rev[0] & 0xf); - len = drm_dp_dpcd_read(aux, DP_BRANCH_SW_REV, rev, 2); - if (len > 0) + len = drm_dp_dpcd_read_data(aux, DP_BRANCH_SW_REV, rev, 2); + if (!len) seq_printf(m, "\t\tSW: %d.%d\n", rev[0], rev[1]); if (detailed_cap_info) { @@ -1782,11 +1768,9 @@ int drm_dp_read_sink_count(struct drm_dp_aux *aux) u8 count; int ret; - ret = drm_dp_dpcd_readb(aux, DP_SINK_COUNT, &count); + ret = drm_dp_dpcd_read_byte(aux, DP_SINK_COUNT, &count); if (ret < 0) return ret; - if (ret != 1) - return -EIO; return DP_GET_SINK_COUNT(count); } @@ -2175,13 +2159,13 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc) u8 buf, count; int ret; - ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf); if (ret < 0) return ret; WARN_ON(!(buf & DP_TEST_SINK_START)); - ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK_MISC, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK_MISC, &buf); if (ret < 0) return ret; @@ -2195,11 +2179,7 @@ static int drm_dp_aux_get_crc(struct drm_dp_aux *aux, u8 *crc) * At DP_TEST_CRC_R_CR, there's 6 bytes containing CRC data, 2 bytes * per component (RGB or CrYCb). */ - ret = drm_dp_dpcd_read(aux, DP_TEST_CRC_R_CR, crc, 6); - if (ret < 0) - return ret; - - return 0; + return drm_dp_dpcd_read_data(aux, DP_TEST_CRC_R_CR, crc, 6); } static void drm_dp_aux_crc_work(struct work_struct *work) @@ -2398,11 +2378,11 @@ int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc) u8 buf; int ret; - ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf); if (ret < 0) return ret; - ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START); + ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf | DP_TEST_SINK_START); if (ret < 0) return ret; @@ -2425,11 +2405,11 @@ int drm_dp_stop_crc(struct drm_dp_aux *aux) u8 buf; int ret; - ret = drm_dp_dpcd_readb(aux, DP_TEST_SINK, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_TEST_SINK, &buf); if (ret < 0) return ret; - ret = drm_dp_dpcd_writeb(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START); + ret = drm_dp_dpcd_write_byte(aux, DP_TEST_SINK, buf & ~DP_TEST_SINK_START); if (ret < 0) return ret; @@ -2515,11 +2495,7 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch) static int drm_dp_read_ident(struct drm_dp_aux *aux, unsigned int offset, struct drm_dp_dpcd_ident *ident) { - int ret; - - ret = drm_dp_dpcd_read(aux, offset, ident, sizeof(*ident)); - - return ret < 0 ? ret : 0; + return drm_dp_dpcd_read_data(aux, offset, ident, sizeof(*ident)); } static void drm_dp_dump_desc(struct drm_dp_aux *aux, @@ -2777,13 +2753,11 @@ static int drm_dp_read_lttpr_regs(struct drm_dp_aux *aux, int ret; for (offset = 0; offset < buf_size; offset += block_size) { - ret = drm_dp_dpcd_read(aux, - address + offset, - &buf[offset], block_size); + ret = drm_dp_dpcd_read_data(aux, + address + offset, + &buf[offset], block_size); if (ret < 0) return ret; - - WARN_ON(ret != block_size); } return 0; @@ -2998,12 +2972,12 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux, int err; u8 rate, lanes; - err = drm_dp_dpcd_readb(aux, DP_TEST_LINK_RATE, &rate); + err = drm_dp_dpcd_read_byte(aux, DP_TEST_LINK_RATE, &rate); if (err < 0) return err; data->link_rate = drm_dp_bw_code_to_link_rate(rate); - err = drm_dp_dpcd_readb(aux, DP_TEST_LANE_COUNT, &lanes); + err = drm_dp_dpcd_read_byte(aux, DP_TEST_LANE_COUNT, &lanes); if (err < 0) return err; data->num_lanes = lanes & DP_MAX_LANE_COUNT_MASK; @@ -3011,22 +2985,22 @@ int drm_dp_get_phy_test_pattern(struct drm_dp_aux *aux, if (lanes & DP_ENHANCED_FRAME_CAP) data->enhanced_frame_cap = true; - err = drm_dp_dpcd_readb(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern); + err = drm_dp_dpcd_read_byte(aux, DP_PHY_TEST_PATTERN, &data->phy_pattern); if (err < 0) return err; switch (data->phy_pattern) { case DP_PHY_TEST_PATTERN_80BIT_CUSTOM: - err = drm_dp_dpcd_read(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0, - &data->custom80, sizeof(data->custom80)); + err = drm_dp_dpcd_read_data(aux, DP_TEST_80BIT_CUSTOM_PATTERN_7_0, + &data->custom80, sizeof(data->custom80)); if (err < 0) return err; break; case DP_PHY_TEST_PATTERN_CP2520: - err = drm_dp_dpcd_read(aux, DP_TEST_HBR2_SCRAMBLER_RESET, - &data->hbr2_reset, - sizeof(data->hbr2_reset)); + err = drm_dp_dpcd_read_data(aux, DP_TEST_HBR2_SCRAMBLER_RESET, + &data->hbr2_reset, + sizeof(data->hbr2_reset)); if (err < 0) return err; } @@ -3053,15 +3027,15 @@ int drm_dp_set_phy_test_pattern(struct drm_dp_aux *aux, if (dp_rev < 0x12) { test_pattern = (test_pattern << 2) & DP_LINK_QUAL_PATTERN_11_MASK; - err = drm_dp_dpcd_writeb(aux, DP_TRAINING_PATTERN_SET, - test_pattern); + err = drm_dp_dpcd_write_byte(aux, DP_TRAINING_PATTERN_SET, + test_pattern); if (err < 0) return err; } else { for (i = 0; i < data->num_lanes; i++) { - err = drm_dp_dpcd_writeb(aux, - DP_LINK_QUAL_LANE0_SET + i, - test_pattern); + err = drm_dp_dpcd_write_byte(aux, + DP_LINK_QUAL_LANE0_SET + i, + test_pattern); if (err < 0) return err; } @@ -3268,8 +3242,8 @@ bool drm_dp_as_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_C if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13) return false; - if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1, - &rx_feature) != 1) { + if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1, + &rx_feature) < 0) { drm_dbg_dp(aux->drm_dev, "Failed to read DP_DPRX_FEATURE_ENUMERATION_LIST_CONT_1\n"); return false; @@ -3293,7 +3267,7 @@ bool drm_dp_vsc_sdp_supported(struct drm_dp_aux *aux, const u8 dpcd[DP_RECEIVER_ if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_13) return false; - if (drm_dp_dpcd_readb(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) != 1) { + if (drm_dp_dpcd_read_byte(aux, DP_DPRX_FEATURE_ENUMERATION_LIST, &rx_feature) < 0) { drm_dbg_dp(aux->drm_dev, "failed to read DP_DPRX_FEATURE_ENUMERATION_LIST\n"); return false; } @@ -3424,16 +3398,13 @@ EXPORT_SYMBOL(drm_dp_get_pcon_max_frl_bw); */ int drm_dp_pcon_frl_prepare(struct drm_dp_aux *aux, bool enable_frl_ready_hpd) { - int ret; u8 buf = DP_PCON_ENABLE_SOURCE_CTL_MODE | DP_PCON_ENABLE_LINK_FRL_MODE; if (enable_frl_ready_hpd) buf |= DP_PCON_ENABLE_HPD_READY; - ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); - - return ret; + return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); } EXPORT_SYMBOL(drm_dp_pcon_frl_prepare); @@ -3448,7 +3419,7 @@ bool drm_dp_pcon_is_frl_ready(struct drm_dp_aux *aux) int ret; u8 buf; - ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf); if (ret < 0) return false; @@ -3477,7 +3448,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps, int ret; u8 buf; - ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf); if (ret < 0) return ret; @@ -3512,11 +3483,7 @@ int drm_dp_pcon_frl_configure_1(struct drm_dp_aux *aux, int max_frl_gbps, return -EINVAL; } - ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); - if (ret < 0) - return ret; - - return 0; + return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); } EXPORT_SYMBOL(drm_dp_pcon_frl_configure_1); @@ -3542,7 +3509,7 @@ int drm_dp_pcon_frl_configure_2(struct drm_dp_aux *aux, int max_frl_mask, else buf &= ~DP_PCON_FRL_LINK_TRAIN_EXTENDED; - ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf); + return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_2, buf); if (ret < 0) return ret; @@ -3558,13 +3525,7 @@ EXPORT_SYMBOL(drm_dp_pcon_frl_configure_2); */ int drm_dp_pcon_reset_frl_config(struct drm_dp_aux *aux) { - int ret; - - ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0); - if (ret < 0) - return ret; - - return 0; + return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, 0x0); } EXPORT_SYMBOL(drm_dp_pcon_reset_frl_config); @@ -3579,7 +3540,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux) int ret; u8 buf = 0; - ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, &buf); if (ret < 0) return ret; if (!(buf & DP_PCON_ENABLE_SOURCE_CTL_MODE)) { @@ -3588,11 +3549,7 @@ int drm_dp_pcon_frl_enable(struct drm_dp_aux *aux) return -EINVAL; } buf |= DP_PCON_ENABLE_HDMI_LINK; - ret = drm_dp_dpcd_writeb(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); - if (ret < 0) - return ret; - - return 0; + return drm_dp_dpcd_write_byte(aux, DP_PCON_HDMI_LINK_CONFIG_1, buf); } EXPORT_SYMBOL(drm_dp_pcon_frl_enable); @@ -3607,7 +3564,7 @@ bool drm_dp_pcon_hdmi_link_active(struct drm_dp_aux *aux) u8 buf; int ret; - ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_TX_LINK_STATUS, &buf); if (ret < 0) return false; @@ -3632,7 +3589,7 @@ int drm_dp_pcon_hdmi_link_mode(struct drm_dp_aux *aux, u8 *frl_trained_mask) int mode; int ret; - ret = drm_dp_dpcd_readb(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_POST_FRL_STATUS, &buf); if (ret < 0) return ret; @@ -3661,7 +3618,7 @@ void drm_dp_pcon_hdmi_frl_link_error_count(struct drm_dp_aux *aux, struct drm_hdmi_info *hdmi = &connector->display_info.hdmi; for (i = 0; i < hdmi->max_lanes; i++) { - if (drm_dp_dpcd_readb(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0) + if (drm_dp_dpcd_read_byte(aux, DP_PCON_HDMI_ERROR_STATUS_LN0 + i, &buf) < 0) return; error_count = buf & DP_PCON_HDMI_ERROR_COUNT_MASK; @@ -3796,7 +3753,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config) u8 buf; int ret; - ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf); if (ret < 0) return ret; @@ -3807,11 +3764,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config) buf |= pps_buf_config << 2; } - ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf); - if (ret < 0) - return ret; - - return 0; + return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf); } /** @@ -3823,13 +3776,7 @@ int drm_dp_pcon_configure_dsc_enc(struct drm_dp_aux *aux, u8 pps_buf_config) */ int drm_dp_pcon_pps_default(struct drm_dp_aux *aux) { - int ret; - - ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED); - if (ret < 0) - return ret; - - return 0; + return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_DISABLED); } EXPORT_SYMBOL(drm_dp_pcon_pps_default); @@ -3845,15 +3792,11 @@ int drm_dp_pcon_pps_override_buf(struct drm_dp_aux *aux, u8 pps_buf[128]) { int ret; - ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128); + ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVERRIDE_BASE, &pps_buf, 128); if (ret < 0) return ret; - ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER); - if (ret < 0) - return ret; - - return 0; + return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER); } EXPORT_SYMBOL(drm_dp_pcon_pps_override_buf); @@ -3870,21 +3813,17 @@ int drm_dp_pcon_pps_override_param(struct drm_dp_aux *aux, u8 pps_param[6]) { int ret; - ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2); - if (ret < 0) - return ret; - ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2); + ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_HEIGHT, &pps_param[0], 2); if (ret < 0) return ret; - ret = drm_dp_dpcd_write(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2); + ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_SLICE_WIDTH, &pps_param[2], 2); if (ret < 0) return ret; - - ret = drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER); + ret = drm_dp_dpcd_write_data(aux, DP_PCON_HDMI_PPS_OVRD_BPP, &pps_param[4], 2); if (ret < 0) return ret; - return 0; + return drm_dp_pcon_configure_dsc_enc(aux, DP_PCON_ENC_PPS_OVERRIDE_EN_BUFFER); } EXPORT_SYMBOL(drm_dp_pcon_pps_override_param); @@ -3900,7 +3839,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc) int ret; u8 buf; - ret = drm_dp_dpcd_readb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf); + ret = drm_dp_dpcd_read_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, &buf); if (ret < 0) return ret; @@ -3909,11 +3848,7 @@ int drm_dp_pcon_convert_rgb_to_ycbcr(struct drm_dp_aux *aux, u8 color_spc) else buf &= ~DP_CONVERSION_RGB_YCBCR_MASK; - ret = drm_dp_dpcd_writeb(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf); - if (ret < 0) - return ret; - - return 0; + return drm_dp_dpcd_write_byte(aux, DP_PROTOCOL_CONVERTER_CONTROL_2, buf); } EXPORT_SYMBOL(drm_dp_pcon_convert_rgb_to_ycbcr); @@ -3945,12 +3880,12 @@ int drm_edp_backlight_set_level(struct drm_dp_aux *aux, const struct drm_edp_bac buf[0] = level; } - ret = drm_dp_dpcd_write(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf)); - if (ret != sizeof(buf)) { + ret = drm_dp_dpcd_write_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, sizeof(buf)); + if (ret < 0) { drm_err(aux->drm_dev, "%s: Failed to write aux backlight level: %d\n", aux->name, ret); - return ret < 0 ? ret : -EIO; + return ret; } return 0; @@ -3968,22 +3903,22 @@ drm_edp_backlight_set_enable(struct drm_dp_aux *aux, const struct drm_edp_backli if (!bl->aux_enable) return 0; - ret = drm_dp_dpcd_readb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf); - if (ret != 1) { + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, &buf); + if (ret < 0) { drm_err(aux->drm_dev, "%s: Failed to read eDP display control register: %d\n", aux->name, ret); - return ret < 0 ? ret : -EIO; + return ret; } if (enable) buf |= DP_EDP_BACKLIGHT_ENABLE; else buf &= ~DP_EDP_BACKLIGHT_ENABLE; - ret = drm_dp_dpcd_writeb(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf); - if (ret != 1) { + ret = drm_dp_dpcd_write_byte(aux, DP_EDP_DISPLAY_CONTROL_REGISTER, buf); + if (ret < 0) { drm_err(aux->drm_dev, "%s: Failed to write eDP display control register: %d\n", aux->name, ret); - return ret < 0 ? ret : -EIO; + return ret; } return 0; @@ -4019,15 +3954,16 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli dpcd_buf = DP_EDP_BACKLIGHT_CONTROL_MODE_PWM; if (bl->pwmgen_bit_count) { - ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count); - if (ret != 1) + ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, bl->pwmgen_bit_count); + if (ret < 0) drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n", aux->name, ret); } if (bl->pwm_freq_pre_divider) { - ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_FREQ_SET, bl->pwm_freq_pre_divider); - if (ret != 1) + ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_FREQ_SET, + bl->pwm_freq_pre_divider); + if (ret < 0) drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight frequency: %d\n", aux->name, ret); @@ -4035,8 +3971,8 @@ int drm_edp_backlight_enable(struct drm_dp_aux *aux, const struct drm_edp_backli dpcd_buf |= DP_EDP_BACKLIGHT_FREQ_AUX_SET_ENABLE; } - ret = drm_dp_dpcd_writeb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf); - if (ret != 1) { + ret = drm_dp_dpcd_write_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, dpcd_buf); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux backlight mode: %d\n", aux->name, ret); return ret < 0 ? ret : -EIO; @@ -4091,8 +4027,8 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf if (!bl->aux_set) return 0; - ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn); - if (ret != 1) { + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, &pn); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap: %d\n", aux->name, ret); return -ENODEV; @@ -4125,14 +4061,14 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf * - FxP is within 25% of desired value. * Note: 25% is arbitrary value and may need some tweak. */ - ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min); - if (ret != 1) { + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap min: %d\n", aux->name, ret); return 0; } - ret = drm_dp_dpcd_readb(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max); - if (ret != 1) { + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to read pwmgen bit count cap max: %d\n", aux->name, ret); return 0; @@ -4157,8 +4093,8 @@ drm_edp_backlight_probe_max(struct drm_dp_aux *aux, struct drm_edp_backlight_inf break; } - ret = drm_dp_dpcd_writeb(aux, DP_EDP_PWMGEN_BIT_COUNT, pn); - if (ret != 1) { + ret = drm_dp_dpcd_write_byte(aux, DP_EDP_PWMGEN_BIT_COUNT, pn); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to write aux pwmgen bit count: %d\n", aux->name, ret); return 0; @@ -4183,8 +4119,8 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i u8 buf[2]; u8 mode_reg; - ret = drm_dp_dpcd_readb(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg); - if (ret != 1) { + ret = drm_dp_dpcd_read_byte(aux, DP_EDP_BACKLIGHT_MODE_SET_REGISTER, &mode_reg); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight mode: %d\n", aux->name, ret); return ret < 0 ? ret : -EIO; @@ -4197,11 +4133,11 @@ drm_edp_backlight_probe_state(struct drm_dp_aux *aux, struct drm_edp_backlight_i if (*current_mode == DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD) { int size = 1 + bl->lsb_reg_used; - ret = drm_dp_dpcd_read(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size); - if (ret != size) { + ret = drm_dp_dpcd_read_data(aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB, buf, size); + if (ret < 0) { drm_dbg_kms(aux->drm_dev, "%s: Failed to read backlight level: %d\n", aux->name, ret); - return ret < 0 ? ret : -EIO; + return ret; } if (bl->lsb_reg_used) @@ -4346,8 +4282,8 @@ int drm_panel_dp_aux_backlight(struct drm_panel *panel, struct drm_dp_aux *aux) if (!panel || !panel->dev || !aux) return -EINVAL; - ret = drm_dp_dpcd_read(aux, DP_EDP_DPCD_REV, edp_dpcd, - EDP_DISPLAY_CTL_CAP_SIZE); + ret = drm_dp_dpcd_read_data(aux, DP_EDP_DPCD_REV, edp_dpcd, + EDP_DISPLAY_CTL_CAP_SIZE); if (ret < 0) return ret; -- 2.50.1 From 97f37939881327e118d6252289973c186377a075 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 24 Mar 2025 13:51:22 +0200 Subject: [PATCH 03/16] drm/display: dp-cec: use new DCPD access helpers Switch drm_dp_cec.c to use new set of DPCD read / write helpers. Reviewed-by: Lyude Paul Acked-by: Jani Nikula Signed-off-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20250324-drm-rework-dpcd-access-v4-4-e80ff89593df@oss.qualcomm.com Signed-off-by: Dmitry Baryshkov --- drivers/gpu/drm/display/drm_dp_cec.c | 37 ++++++++++++++-------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/drivers/gpu/drm/display/drm_dp_cec.c b/drivers/gpu/drm/display/drm_dp_cec.c index 56a4965e518c..ed31471bd0e2 100644 --- a/drivers/gpu/drm/display/drm_dp_cec.c +++ b/drivers/gpu/drm/display/drm_dp_cec.c @@ -96,7 +96,7 @@ static int drm_dp_cec_adap_enable(struct cec_adapter *adap, bool enable) u32 val = enable ? DP_CEC_TUNNELING_ENABLE : 0; ssize_t err = 0; - err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val); + err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val); return (enable && err < 0) ? err : 0; } @@ -112,7 +112,7 @@ static int drm_dp_cec_adap_log_addr(struct cec_adapter *adap, u8 addr) la_mask |= adap->log_addrs.log_addr_mask | (1 << addr); mask[0] = la_mask & 0xff; mask[1] = la_mask >> 8; - err = drm_dp_dpcd_write(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2); + err = drm_dp_dpcd_write_data(aux, DP_CEC_LOGICAL_ADDRESS_MASK, mask, 2); return (addr != CEC_LOG_ADDR_INVALID && err < 0) ? err : 0; } @@ -123,15 +123,14 @@ static int drm_dp_cec_adap_transmit(struct cec_adapter *adap, u8 attempts, unsigned int retries = min(5, attempts - 1); ssize_t err; - err = drm_dp_dpcd_write(aux, DP_CEC_TX_MESSAGE_BUFFER, - msg->msg, msg->len); + err = drm_dp_dpcd_write_data(aux, DP_CEC_TX_MESSAGE_BUFFER, + msg->msg, msg->len); if (err < 0) return err; - err = drm_dp_dpcd_writeb(aux, DP_CEC_TX_MESSAGE_INFO, - (msg->len - 1) | (retries << 4) | - DP_CEC_TX_MESSAGE_SEND); - return err < 0 ? err : 0; + return drm_dp_dpcd_write_byte(aux, DP_CEC_TX_MESSAGE_INFO, + (msg->len - 1) | (retries << 4) | + DP_CEC_TX_MESSAGE_SEND); } static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap, @@ -144,13 +143,13 @@ static int drm_dp_cec_adap_monitor_all_enable(struct cec_adapter *adap, if (!(adap->capabilities & CEC_CAP_MONITOR_ALL)) return 0; - err = drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CONTROL, &val); - if (err >= 0) { + err = drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CONTROL, &val); + if (!err) { if (enable) val |= DP_CEC_SNOOPING_ENABLE; else val &= ~DP_CEC_SNOOPING_ENABLE; - err = drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_CONTROL, val); + err = drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_CONTROL, val); } return (enable && err < 0) ? err : 0; } @@ -194,7 +193,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux) u8 rx_msg_info; ssize_t err; - err = drm_dp_dpcd_readb(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info); + err = drm_dp_dpcd_read_byte(aux, DP_CEC_RX_MESSAGE_INFO, &rx_msg_info); if (err < 0) return err; @@ -202,7 +201,7 @@ static int drm_dp_cec_received(struct drm_dp_aux *aux) return 0; msg.len = (rx_msg_info & DP_CEC_RX_MESSAGE_LEN_MASK) + 1; - err = drm_dp_dpcd_read(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len); + err = drm_dp_dpcd_read_data(aux, DP_CEC_RX_MESSAGE_BUFFER, msg.msg, msg.len); if (err < 0) return err; @@ -215,7 +214,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux) struct cec_adapter *adap = aux->cec.adap; u8 flags; - if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0) + if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, &flags) < 0) return; if (flags & DP_CEC_RX_MESSAGE_INFO_VALID) @@ -230,7 +229,7 @@ static void drm_dp_cec_handle_irq(struct drm_dp_aux *aux) (DP_CEC_TX_ADDRESS_NACK_ERROR | DP_CEC_TX_DATA_NACK_ERROR)) cec_transmit_attempt_done(adap, CEC_TX_STATUS_NACK | CEC_TX_STATUS_MAX_RETRIES); - drm_dp_dpcd_writeb(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags); + drm_dp_dpcd_write_byte(aux, DP_CEC_TUNNELING_IRQ_FLAGS, flags); } /** @@ -253,13 +252,13 @@ void drm_dp_cec_irq(struct drm_dp_aux *aux) if (!aux->cec.adap) goto unlock; - ret = drm_dp_dpcd_readb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, - &cec_irq); + ret = drm_dp_dpcd_read_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, + &cec_irq); if (ret < 0 || !(cec_irq & DP_CEC_IRQ)) goto unlock; drm_dp_cec_handle_irq(aux); - drm_dp_dpcd_writeb(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ); + drm_dp_dpcd_write_byte(aux, DP_DEVICE_SERVICE_IRQ_VECTOR_ESI1, DP_CEC_IRQ); unlock: mutex_unlock(&aux->cec.lock); } @@ -269,7 +268,7 @@ static bool drm_dp_cec_cap(struct drm_dp_aux *aux, u8 *cec_cap) { u8 cap = 0; - if (drm_dp_dpcd_readb(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) != 1 || + if (drm_dp_dpcd_read_byte(aux, DP_CEC_TUNNELING_CAPABILITY, &cap) < 0 || !(cap & DP_CEC_TUNNELING_CAPABLE)) return false; if (cec_cap) -- 2.50.1 From 2554da0de3e8312c7149d03d702ddc6c1ff5e3de Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 24 Mar 2025 13:51:23 +0200 Subject: [PATCH 04/16] drm/display: dp-mst-topology: use new DCPD access helpers Switch drm_dp_mst_topology.c to use new set of DPCD read / write helpers. Reviewed-by: Lyude Paul Acked-by: Jani Nikula Signed-off-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20250324-drm-rework-dpcd-access-v4-5-e80ff89593df@oss.qualcomm.com Signed-off-by: Dmitry Baryshkov --- drivers/gpu/drm/display/drm_dp_mst_topology.c | 105 +++++++++--------- 1 file changed, 51 insertions(+), 54 deletions(-) diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c index 8b68bb3fbffb..e8716e73480b 100644 --- a/drivers/gpu/drm/display/drm_dp_mst_topology.c +++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c @@ -2201,15 +2201,12 @@ static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, guid_t *guid) mstb->port_parent, DP_GUID, sizeof(buf), buf); } else { - ret = drm_dp_dpcd_write(mstb->mgr->aux, - DP_GUID, buf, sizeof(buf)); + ret = drm_dp_dpcd_write_data(mstb->mgr->aux, + DP_GUID, buf, sizeof(buf)); } } - if (ret < 16 && ret > 0) - return -EPROTO; - - return ret == 16 ? 0 : ret; + return ret; } static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb, @@ -2744,14 +2741,13 @@ retry: do { tosend = min3(mgr->max_dpcd_transaction_bytes, 16, total); - ret = drm_dp_dpcd_write(mgr->aux, regbase + offset, - &msg[offset], - tosend); - if (ret != tosend) { - if (ret == -EIO && retries < 5) { - retries++; - goto retry; - } + ret = drm_dp_dpcd_write_data(mgr->aux, regbase + offset, + &msg[offset], + tosend); + if (ret == -EIO && retries < 5) { + retries++; + goto retry; + } else if (ret < 0) { drm_dbg_kms(mgr->dev, "failed to dpcd write %d %d\n", tosend, ret); return -EIO; @@ -3624,7 +3620,7 @@ enum drm_dp_mst_mode drm_dp_read_mst_cap(struct drm_dp_aux *aux, if (dpcd[DP_DPCD_REV] < DP_DPCD_REV_12) return DRM_DP_SST; - if (drm_dp_dpcd_readb(aux, DP_MSTM_CAP, &mstm_cap) != 1) + if (drm_dp_dpcd_read_byte(aux, DP_MSTM_CAP, &mstm_cap) < 0) return DRM_DP_SST; if (mstm_cap & DP_MST_CAP) @@ -3679,10 +3675,10 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms mgr->mst_primary = mstb; drm_dp_mst_topology_get_mstb(mgr->mst_primary); - ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | - DP_UP_REQ_EN | - DP_UPSTREAM_IS_SRC); + ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | + DP_UP_REQ_EN | + DP_UPSTREAM_IS_SRC); if (ret < 0) goto out_unlock; @@ -3697,7 +3693,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms mstb = mgr->mst_primary; mgr->mst_primary = NULL; /* this can fail if the device is gone */ - drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0); + drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, 0); ret = 0; mgr->payload_id_table_cleared = false; @@ -3763,8 +3759,8 @@ EXPORT_SYMBOL(drm_dp_mst_topology_queue_probe); void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr) { mutex_lock(&mgr->lock); - drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | DP_UPSTREAM_IS_SRC); + drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | DP_UPSTREAM_IS_SRC); mutex_unlock(&mgr->lock); flush_work(&mgr->up_req_work); flush_work(&mgr->work); @@ -3813,18 +3809,18 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr, goto out_fail; } - ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, - DP_MST_EN | - DP_UP_REQ_EN | - DP_UPSTREAM_IS_SRC); + ret = drm_dp_dpcd_write_byte(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | + DP_UP_REQ_EN | + DP_UPSTREAM_IS_SRC); if (ret < 0) { drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); goto out_fail; } /* Some hubs forget their guids after they resume */ - ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, buf, sizeof(buf)); - if (ret != sizeof(buf)) { + ret = drm_dp_dpcd_read_data(mgr->aux, DP_GUID, buf, sizeof(buf)); + if (ret < 0) { drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); goto out_fail; } @@ -3883,8 +3879,8 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, *mstb = NULL; len = min(mgr->max_dpcd_transaction_bytes, 16); - ret = drm_dp_dpcd_read(mgr->aux, basereg, replyblock, len); - if (ret != len) { + ret = drm_dp_dpcd_read_data(mgr->aux, basereg, replyblock, len); + if (ret < 0) { drm_dbg_kms(mgr->dev, "failed to read DPCD down rep %d %d\n", len, ret); return false; } @@ -3922,9 +3918,9 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up, curreply = len; while (replylen > 0) { len = min3(replylen, mgr->max_dpcd_transaction_bytes, 16); - ret = drm_dp_dpcd_read(mgr->aux, basereg + curreply, - replyblock, len); - if (ret != len) { + ret = drm_dp_dpcd_read_data(mgr->aux, basereg + curreply, + replyblock, len); + if (ret < 0) { drm_dbg_kms(mgr->dev, "failed to read a chunk (len %d, ret %d)\n", len, ret); return false; @@ -4873,9 +4869,9 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr, int i; for (i = 0; i < DP_PAYLOAD_TABLE_SIZE; i += 16) { - if (drm_dp_dpcd_read(mgr->aux, - DP_PAYLOAD_TABLE_UPDATE_STATUS + i, - &buf[i], 16) != 16) + if (drm_dp_dpcd_read_data(mgr->aux, + DP_PAYLOAD_TABLE_UPDATE_STATUS + i, + &buf[i], 16) < 0) return false; } return true; @@ -4964,23 +4960,24 @@ void drm_dp_mst_dump_topology(struct seq_file *m, } seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf); - ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2); - if (ret != 2) { + ret = drm_dp_dpcd_read_data(mgr->aux, DP_FAUX_CAP, buf, 2); + if (ret < 0) { seq_printf(m, "faux/mst read failed\n"); goto out; } seq_printf(m, "faux/mst: %*ph\n", 2, buf); - ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1); - if (ret != 1) { + ret = drm_dp_dpcd_read_data(mgr->aux, DP_MSTM_CTRL, buf, 1); + if (ret < 0) { seq_printf(m, "mst ctrl read failed\n"); goto out; } seq_printf(m, "mst ctrl: %*ph\n", 1, buf); /* dump the standard OUI branch header */ - ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE); - if (ret != DP_BRANCH_OUI_HEADER_SIZE) { + ret = drm_dp_dpcd_read_data(mgr->aux, DP_BRANCH_OUI, buf, + DP_BRANCH_OUI_HEADER_SIZE); + if (ret < 0) { seq_printf(m, "branch oui read failed\n"); goto out; } @@ -6104,14 +6101,14 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) /* DP-to-DP peer device */ if (drm_dp_mst_is_virtual_dpcd(immediate_upstream_port)) { - if (drm_dp_dpcd_read(&port->aux, - DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) + if (drm_dp_dpcd_read_data(&port->aux, + DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0) return NULL; - if (drm_dp_dpcd_read(&port->aux, - DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) + if (drm_dp_dpcd_read_data(&port->aux, + DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0) return NULL; - if (drm_dp_dpcd_read(&immediate_upstream_port->aux, - DP_DSC_SUPPORT, &upstream_dsc, 1) != 1) + if (drm_dp_dpcd_read_data(&immediate_upstream_port->aux, + DP_DSC_SUPPORT, &upstream_dsc, 1) < 0) return NULL; /* Enpoint decompression with DP-to-DP peer device */ @@ -6149,8 +6146,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD)) { u8 dpcd_ext[DP_RECEIVER_CAP_SIZE]; - if (drm_dp_dpcd_read(immediate_upstream_aux, - DP_DSC_SUPPORT, &upstream_dsc, 1) != 1) + if (drm_dp_dpcd_read_data(immediate_upstream_aux, + DP_DSC_SUPPORT, &upstream_dsc, 1) < 0) return NULL; if (!(upstream_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED)) @@ -6172,11 +6169,11 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port) * therefore the endpoint needs to be * both DSC and FEC capable. */ - if (drm_dp_dpcd_read(&port->aux, - DP_DSC_SUPPORT, &endpoint_dsc, 1) != 1) + if (drm_dp_dpcd_read_data(&port->aux, + DP_DSC_SUPPORT, &endpoint_dsc, 1) < 0) return NULL; - if (drm_dp_dpcd_read(&port->aux, - DP_FEC_CAPABILITY, &endpoint_fec, 1) != 1) + if (drm_dp_dpcd_read_data(&port->aux, + DP_FEC_CAPABILITY, &endpoint_fec, 1) < 0) return NULL; if ((endpoint_dsc & DP_DSC_DECOMPRESSION_IS_SUPPORTED) && (endpoint_fec & DP_FEC_CAPABLE)) -- 2.50.1 From 95c4ea2e0329b370a53a041a19227f8da3f47481 Mon Sep 17 00:00:00 2001 From: Dmitry Baryshkov Date: Mon, 24 Mar 2025 13:51:24 +0200 Subject: [PATCH 05/16] drm/display: dp-tunnel: use new DCPD access helpers Switch drm_dp_tunnel.c to use new set of DPCD read / write helpers. Reviewed-by: Lyude Paul Acked-by: Jani Nikula Signed-off-by: Dmitry Baryshkov Link: https://lore.kernel.org/r/20250324-drm-rework-dpcd-access-v4-6-e80ff89593df@oss.qualcomm.com Signed-off-by: Dmitry Baryshkov --- drivers/gpu/drm/display/drm_dp_tunnel.c | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/display/drm_dp_tunnel.c b/drivers/gpu/drm/display/drm_dp_tunnel.c index 90fe07a89260..076edf161048 100644 --- a/drivers/gpu/drm/display/drm_dp_tunnel.c +++ b/drivers/gpu/drm/display/drm_dp_tunnel.c @@ -222,7 +222,7 @@ static int read_tunnel_regs(struct drm_dp_aux *aux, struct drm_dp_tunnel_regs *r while ((len = next_reg_area(&offset))) { int address = DP_TUNNELING_BASE + offset; - if (drm_dp_dpcd_read(aux, address, tunnel_reg_ptr(regs, address), len) < 0) + if (drm_dp_dpcd_read_data(aux, address, tunnel_reg_ptr(regs, address), len) < 0) return -EIO; offset += len; @@ -913,7 +913,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable) u8 mask = DP_DISPLAY_DRIVER_BW_ALLOCATION_MODE_ENABLE | DP_UNMASK_BW_ALLOCATION_IRQ; u8 val; - if (drm_dp_dpcd_readb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0) + if (drm_dp_dpcd_read_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, &val) < 0) goto out_err; if (enable) @@ -921,7 +921,7 @@ static int set_bw_alloc_mode(struct drm_dp_tunnel *tunnel, bool enable) else val &= ~mask; - if (drm_dp_dpcd_writeb(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0) + if (drm_dp_dpcd_write_byte(tunnel->aux, DP_DPTX_BW_ALLOCATION_MODE_CONTROL, val) < 0) goto out_err; tunnel->bw_alloc_enabled = enable; @@ -1039,7 +1039,7 @@ static int clear_bw_req_state(struct drm_dp_aux *aux) { u8 bw_req_mask = DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED; - if (drm_dp_dpcd_writeb(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0) + if (drm_dp_dpcd_write_byte(aux, DP_TUNNELING_STATUS, bw_req_mask) < 0) return -EIO; return 0; @@ -1052,7 +1052,7 @@ static int bw_req_complete(struct drm_dp_aux *aux, bool *status_changed) u8 val; int err; - if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0) + if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0) return -EIO; *status_changed = val & status_change_mask; @@ -1095,7 +1095,7 @@ static int allocate_tunnel_bw(struct drm_dp_tunnel *tunnel, int bw) if (err) goto out; - if (drm_dp_dpcd_writeb(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) { + if (drm_dp_dpcd_write_byte(tunnel->aux, DP_REQUEST_BW, request_bw) < 0) { err = -EIO; goto out; } @@ -1196,13 +1196,13 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel) u8 mask = DP_BW_ALLOCATION_CAPABILITY_CHANGED | DP_ESTIMATED_BW_CHANGED; u8 val; - if (drm_dp_dpcd_readb(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0) + if (drm_dp_dpcd_read_byte(tunnel->aux, DP_TUNNELING_STATUS, &val) < 0) goto out_err; val &= mask; if (val) { - if (drm_dp_dpcd_writeb(tunnel->aux, DP_TUNNELING_STATUS, val) < 0) + if (drm_dp_dpcd_write_byte(tunnel->aux, DP_TUNNELING_STATUS, val) < 0) goto out_err; return 1; @@ -1215,7 +1215,7 @@ static int check_and_clear_status_change(struct drm_dp_tunnel *tunnel) * Check for estimated BW changes explicitly to account for lost * BW change notifications. */ - if (drm_dp_dpcd_readb(tunnel->aux, DP_ESTIMATED_BW, &val) < 0) + if (drm_dp_dpcd_read_byte(tunnel->aux, DP_ESTIMATED_BW, &val) < 0) goto out_err; if (val * tunnel->bw_granularity != tunnel->estimated_bw) @@ -1300,7 +1300,7 @@ int drm_dp_tunnel_handle_irq(struct drm_dp_tunnel_mgr *mgr, struct drm_dp_aux *a { u8 val; - if (drm_dp_dpcd_readb(aux, DP_TUNNELING_STATUS, &val) < 0) + if (drm_dp_dpcd_read_byte(aux, DP_TUNNELING_STATUS, &val) < 0) return -EIO; if (val & (DP_BW_REQUEST_SUCCEEDED | DP_BW_REQUEST_FAILED)) -- 2.50.1 From 0b87bbbeaf023b70fa1e66b674e74ec5cb59d0c3 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 17 Mar 2025 17:12:22 +0000 Subject: [PATCH 06/16] drm/display: Update comment on hdmi hotplug helper Update the comment on drm_atomic_helper_connector_hdmi_hotplug() to clarify that it must be called for all status updates. Signed-off-by: David Turner Reviewed-by: Maxime Ripard Link: https://patchwork.freedesktop.org/patch/msgid/20250317-vc4_hotplug-v4-1-2af625629186@raspberrypi.com Signed-off-by: Dave Stevenson --- drivers/gpu/drm/display/drm_hdmi_state_helper.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/display/drm_hdmi_state_helper.c b/drivers/gpu/drm/display/drm_hdmi_state_helper.c index 8a2472f277f1..d9d9948b29e9 100644 --- a/drivers/gpu/drm/display/drm_hdmi_state_helper.c +++ b/drivers/gpu/drm/display/drm_hdmi_state_helper.c @@ -1108,7 +1108,7 @@ drm_atomic_helper_connector_hdmi_update(struct drm_connector *connector, * @status: Connection status * * This function should be called as a part of the .detect() / .detect_ctx() - * callbacks, updating the HDMI-specific connector's data. + * callbacks for all status changes. */ void drm_atomic_helper_connector_hdmi_hotplug(struct drm_connector *connector, enum drm_connector_status status) -- 2.50.1 From 34f051accedb642087fdcf19b3501fe150fbee49 Mon Sep 17 00:00:00 2001 From: Stefan Wahren Date: Mon, 17 Mar 2025 17:12:23 +0000 Subject: [PATCH 07/16] drm/vc4: hdmi: Call HDMI hotplug helper on disconnect drm_atomic_helper_connector_hdmi_hotplug() must be called regardless of the connection status, otherwise the HDMI audio disconnect event won't be notified. Fixes: 2ea9ec5d2c20 ("drm/vc4: hdmi: use drm_atomic_helper_connector_hdmi_hotplug()") Suggested-by: Dmitry Baryshkov Signed-off-by: Stefan Wahren Reviewed-by: Maxime Ripard Signed-off-by: David Turner Link: https://patchwork.freedesktop.org/patch/msgid/20250317-vc4_hotplug-v4-2-2af625629186@raspberrypi.com Signed-off-by: Dave Stevenson --- drivers/gpu/drm/vc4/vc4_hdmi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 37238a12baa5..37a7d45695f2 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -372,13 +372,13 @@ static void vc4_hdmi_handle_hotplug(struct vc4_hdmi *vc4_hdmi, * the lock for now. */ + drm_atomic_helper_connector_hdmi_hotplug(connector, status); + if (status == connector_status_disconnected) { cec_phys_addr_invalidate(vc4_hdmi->cec_adap); return; } - drm_atomic_helper_connector_hdmi_hotplug(connector, status); - cec_s_phys_addr(vc4_hdmi->cec_adap, connector->display_info.source_physical_address, false); -- 2.50.1 From 2f9d51740cc30e0d2c8a23a55b1e20cf2513c250 Mon Sep 17 00:00:00 2001 From: David Turner Date: Mon, 17 Mar 2025 17:12:24 +0000 Subject: [PATCH 08/16] drm/vc4: hdmi: Add jack detection to HDMI audio driver Add ALSA jack detection to the vc4-hdmi audio driver so userspace knows when to add/remove HDMI audio devices. Signed-off-by: Stefan Wahren Reviewed-by: Maxime Ripard Signed-off-by: David Turner Link: https://patchwork.freedesktop.org/patch/msgid/20250317-vc4_hotplug-v4-3-2af625629186@raspberrypi.com Signed-off-by: Dave Stevenson --- drivers/gpu/drm/vc4/vc4_hdmi.c | 18 ++++++++++++++++++ drivers/gpu/drm/vc4/vc4_hdmi.h | 7 +++++++ 2 files changed, 25 insertions(+) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 37a7d45695f2..a29a6ef266f9 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -51,6 +51,7 @@ #include #include #include +#include #include #include #include @@ -2175,6 +2176,22 @@ static const struct drm_connector_hdmi_audio_funcs vc4_hdmi_audio_funcs = { .shutdown = vc4_hdmi_audio_shutdown, }; +static int vc4_hdmi_codec_init(struct snd_soc_pcm_runtime *rtd) +{ + struct vc4_hdmi *vc4_hdmi = snd_soc_card_get_drvdata(rtd->card); + struct snd_soc_component *component = snd_soc_rtd_to_codec(rtd, 0)->component; + int ret; + + ret = snd_soc_card_jack_new(rtd->card, "HDMI Jack", SND_JACK_LINEOUT, + &vc4_hdmi->hdmi_jack); + if (ret) { + dev_err(rtd->dev, "HDMI Jack creation failed: %d\n", ret); + return ret; + } + + return snd_soc_component_set_jack(component, &vc4_hdmi->hdmi_jack, NULL); +} + static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) { const struct vc4_hdmi_register *mai_data = @@ -2288,6 +2305,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *vc4_hdmi) dai_link->cpus->dai_name = dev_name(dev); dai_link->codecs->name = dev_name(&vc4_hdmi->connector.hdmi_audio.codec_pdev->dev); dai_link->platforms->name = dev_name(dev); + dai_link->init = vc4_hdmi_codec_init; card->dai_link = dai_link; card->num_links = 1; diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.h b/drivers/gpu/drm/vc4/vc4_hdmi.h index e3d989ca302b..a31157c99bee 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.h +++ b/drivers/gpu/drm/vc4/vc4_hdmi.h @@ -4,6 +4,7 @@ #include #include #include +#include #include #include "vc4_drv.h" @@ -211,6 +212,12 @@ struct vc4_hdmi { * KMS hooks. Protected by @mutex. */ enum hdmi_colorspace output_format; + + /** + * @hdmi_jack: Represents the connection state of the HDMI plug, for + * ALSA jack detection. + */ + struct snd_soc_jack hdmi_jack; }; #define connector_to_vc4_hdmi(_connector) \ -- 2.50.1 From 8f5c4871a014cf31133476ffd2f117bffeaf6b60 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sun, 23 Mar 2025 00:25:59 +0300 Subject: [PATCH 09/16] drm/gem: Change locked/unlocked postfix of drm_gem_v/unmap() function names MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Make drm/gem API function names consistent by having locked function use the _locked postfix in the name, while the unlocked variants don't use the _unlocked postfix. Rename drm_gem_v/unmap() function names to make them consistent with the rest of the API functions. Acked-by: Maxime Ripard Reviewed-by: Boris Brezillon Suggested-by: Boris Brezillon Reviewed-by: Christian König Acked-by: Thomas Zimmermann Signed-off-by: Dmitry Osipenko Link: https://patchwork.freedesktop.org/patch/msgid/20250322212608.40511-2-dmitry.osipenko@collabora.com --- drivers/gpu/drm/drm_client.c | 10 +++++----- drivers/gpu/drm/drm_gem.c | 20 ++++++++++---------- drivers/gpu/drm/drm_gem_framebuffer_helper.c | 6 +++--- drivers/gpu/drm/drm_internal.h | 4 ++-- drivers/gpu/drm/drm_prime.c | 4 ++-- drivers/gpu/drm/lima/lima_sched.c | 4 ++-- drivers/gpu/drm/panfrost/panfrost_dump.c | 4 ++-- drivers/gpu/drm/panfrost/panfrost_perfcnt.c | 6 +++--- drivers/gpu/drm/panthor/panthor_gem.h | 4 ++-- drivers/gpu/drm/panthor/panthor_sched.c | 4 ++-- include/drm/drm_gem.h | 4 ++-- 11 files changed, 35 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c index 549b28a5918c..f1de7faf9fb4 100644 --- a/drivers/gpu/drm/drm_client.c +++ b/drivers/gpu/drm/drm_client.c @@ -174,7 +174,7 @@ EXPORT_SYMBOL(drm_client_release); static void drm_client_buffer_delete(struct drm_client_buffer *buffer) { if (buffer->gem) { - drm_gem_vunmap_unlocked(buffer->gem, &buffer->map); + drm_gem_vunmap(buffer->gem, &buffer->map); drm_gem_object_put(buffer->gem); } @@ -252,7 +252,7 @@ int drm_client_buffer_vmap_local(struct drm_client_buffer *buffer, drm_gem_lock(gem); - ret = drm_gem_vmap(gem, map); + ret = drm_gem_vmap_locked(gem, map); if (ret) goto err_drm_gem_vmap_unlocked; *map_copy = *map; @@ -278,7 +278,7 @@ void drm_client_buffer_vunmap_local(struct drm_client_buffer *buffer) struct drm_gem_object *gem = buffer->gem; struct iosys_map *map = &buffer->map; - drm_gem_vunmap(gem, map); + drm_gem_vunmap_locked(gem, map); drm_gem_unlock(gem); } EXPORT_SYMBOL(drm_client_buffer_vunmap_local); @@ -316,7 +316,7 @@ drm_client_buffer_vmap(struct drm_client_buffer *buffer, ret = drm_gem_pin_locked(gem); if (ret) goto err_drm_gem_pin_locked; - ret = drm_gem_vmap(gem, map); + ret = drm_gem_vmap_locked(gem, map); if (ret) goto err_drm_gem_vmap; @@ -348,7 +348,7 @@ void drm_client_buffer_vunmap(struct drm_client_buffer *buffer) struct iosys_map *map = &buffer->map; drm_gem_lock(gem); - drm_gem_vunmap(gem, map); + drm_gem_vunmap_locked(gem, map); drm_gem_unpin_locked(gem); drm_gem_unlock(gem); } diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index c6240bab3fa5..27778e5ce0c0 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1216,7 +1216,7 @@ void drm_gem_unpin(struct drm_gem_object *obj) dma_resv_unlock(obj->resv); } -int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) +int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map) { int ret; @@ -1233,9 +1233,9 @@ int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) return 0; } -EXPORT_SYMBOL(drm_gem_vmap); +EXPORT_SYMBOL(drm_gem_vmap_locked); -void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) +void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map) { dma_resv_assert_held(obj->resv); @@ -1248,7 +1248,7 @@ void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) /* Always set the mapping to NULL. Callers may rely on this. */ iosys_map_clear(map); } -EXPORT_SYMBOL(drm_gem_vunmap); +EXPORT_SYMBOL(drm_gem_vunmap_locked); void drm_gem_lock(struct drm_gem_object *obj) { @@ -1262,25 +1262,25 @@ void drm_gem_unlock(struct drm_gem_object *obj) } EXPORT_SYMBOL(drm_gem_unlock); -int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map) +int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) { int ret; dma_resv_lock(obj->resv, NULL); - ret = drm_gem_vmap(obj, map); + ret = drm_gem_vmap_locked(obj, map); dma_resv_unlock(obj->resv); return ret; } -EXPORT_SYMBOL(drm_gem_vmap_unlocked); +EXPORT_SYMBOL(drm_gem_vmap); -void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map) +void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { dma_resv_lock(obj->resv, NULL); - drm_gem_vunmap(obj, map); + drm_gem_vunmap_locked(obj, map); dma_resv_unlock(obj->resv); } -EXPORT_SYMBOL(drm_gem_vunmap_unlocked); +EXPORT_SYMBOL(drm_gem_vunmap); /** * drm_gem_lock_reservations - Sets up the ww context and acquires diff --git a/drivers/gpu/drm/drm_gem_framebuffer_helper.c b/drivers/gpu/drm/drm_gem_framebuffer_helper.c index 0fbeb686e561..6f72e7a0f427 100644 --- a/drivers/gpu/drm/drm_gem_framebuffer_helper.c +++ b/drivers/gpu/drm/drm_gem_framebuffer_helper.c @@ -362,7 +362,7 @@ int drm_gem_fb_vmap(struct drm_framebuffer *fb, struct iosys_map *map, ret = -EINVAL; goto err_drm_gem_vunmap; } - ret = drm_gem_vmap_unlocked(obj, &map[i]); + ret = drm_gem_vmap(obj, &map[i]); if (ret) goto err_drm_gem_vunmap; } @@ -384,7 +384,7 @@ err_drm_gem_vunmap: obj = drm_gem_fb_get_obj(fb, i); if (!obj) continue; - drm_gem_vunmap_unlocked(obj, &map[i]); + drm_gem_vunmap(obj, &map[i]); } return ret; } @@ -411,7 +411,7 @@ void drm_gem_fb_vunmap(struct drm_framebuffer *fb, struct iosys_map *map) continue; if (iosys_map_is_null(&map[i])) continue; - drm_gem_vunmap_unlocked(obj, &map[i]); + drm_gem_vunmap(obj, &map[i]); } } EXPORT_SYMBOL(drm_gem_fb_vunmap); diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h index b2b6a8e49dda..e44f28fd81d3 100644 --- a/drivers/gpu/drm/drm_internal.h +++ b/drivers/gpu/drm/drm_internal.h @@ -179,8 +179,8 @@ int drm_gem_pin_locked(struct drm_gem_object *obj); void drm_gem_unpin_locked(struct drm_gem_object *obj); int drm_gem_pin(struct drm_gem_object *obj); void drm_gem_unpin(struct drm_gem_object *obj); -int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map); -void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map); +int drm_gem_vmap_locked(struct drm_gem_object *obj, struct iosys_map *map); +void drm_gem_vunmap_locked(struct drm_gem_object *obj, struct iosys_map *map); /* drm_debugfs.c drm_debugfs_crc.c */ #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 4b8c6075e46a..d828502268b8 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -707,7 +707,7 @@ int drm_gem_dmabuf_vmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_gem_object *obj = dma_buf->priv; - return drm_gem_vmap(obj, map); + return drm_gem_vmap_locked(obj, map); } EXPORT_SYMBOL(drm_gem_dmabuf_vmap); @@ -723,7 +723,7 @@ void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, struct iosys_map *map) { struct drm_gem_object *obj = dma_buf->priv; - drm_gem_vunmap(obj, map); + drm_gem_vunmap_locked(obj, map); } EXPORT_SYMBOL(drm_gem_dmabuf_vunmap); diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c index 825135a26aa4..7934098e651b 100644 --- a/drivers/gpu/drm/lima/lima_sched.c +++ b/drivers/gpu/drm/lima/lima_sched.c @@ -371,7 +371,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) } else { buffer_chunk->size = lima_bo_size(bo); - ret = drm_gem_vmap_unlocked(&bo->base.base, &map); + ret = drm_gem_vmap(&bo->base.base, &map); if (ret) { kvfree(et); goto out; @@ -379,7 +379,7 @@ static void lima_sched_build_error_task_list(struct lima_sched_task *task) memcpy(buffer_chunk + 1, map.vaddr, buffer_chunk->size); - drm_gem_vunmap_unlocked(&bo->base.base, &map); + drm_gem_vunmap(&bo->base.base, &map); } buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size; diff --git a/drivers/gpu/drm/panfrost/panfrost_dump.c b/drivers/gpu/drm/panfrost/panfrost_dump.c index 47751302f1bc..4042afe2fbf4 100644 --- a/drivers/gpu/drm/panfrost/panfrost_dump.c +++ b/drivers/gpu/drm/panfrost/panfrost_dump.c @@ -209,7 +209,7 @@ void panfrost_core_dump(struct panfrost_job *job) goto dump_header; } - ret = drm_gem_vmap_unlocked(&bo->base.base, &map); + ret = drm_gem_vmap(&bo->base.base, &map); if (ret) { dev_err(pfdev->dev, "Panfrost Dump: couldn't map Buffer Object\n"); iter.hdr->bomap.valid = 0; @@ -228,7 +228,7 @@ void panfrost_core_dump(struct panfrost_job *job) vaddr = map.vaddr; memcpy(iter.data, vaddr, bo->base.base.size); - drm_gem_vunmap_unlocked(&bo->base.base, &map); + drm_gem_vunmap(&bo->base.base, &map); iter.hdr->bomap.valid = 1; diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c index ba9b6e2b2636..52befead08c6 100644 --- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c +++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c @@ -106,7 +106,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, goto err_close_bo; } - ret = drm_gem_vmap_unlocked(&bo->base, &map); + ret = drm_gem_vmap(&bo->base, &map); if (ret) goto err_put_mapping; perfcnt->buf = map.vaddr; @@ -165,7 +165,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev, return 0; err_vunmap: - drm_gem_vunmap_unlocked(&bo->base, &map); + drm_gem_vunmap(&bo->base, &map); err_put_mapping: panfrost_gem_mapping_put(perfcnt->mapping); err_close_bo: @@ -195,7 +195,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev, GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_OFF)); perfcnt->user = NULL; - drm_gem_vunmap_unlocked(&perfcnt->mapping->obj->base.base, &map); + drm_gem_vunmap(&perfcnt->mapping->obj->base.base, &map); perfcnt->buf = NULL; panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv); panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu); diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h index 5749ef2ebe03..1a363bb814f4 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.h +++ b/drivers/gpu/drm/panthor/panthor_gem.h @@ -112,7 +112,7 @@ panthor_kernel_bo_vmap(struct panthor_kernel_bo *bo) if (bo->kmap) return 0; - ret = drm_gem_vmap_unlocked(bo->obj, &map); + ret = drm_gem_vmap(bo->obj, &map); if (ret) return ret; @@ -126,7 +126,7 @@ panthor_kernel_bo_vunmap(struct panthor_kernel_bo *bo) if (bo->kmap) { struct iosys_map map = IOSYS_MAP_INIT_VADDR(bo->kmap); - drm_gem_vunmap_unlocked(bo->obj, &map); + drm_gem_vunmap(bo->obj, &map); bo->kmap = NULL; } } diff --git a/drivers/gpu/drm/panthor/panthor_sched.c b/drivers/gpu/drm/panthor/panthor_sched.c index 4d31d1967716..446ec780eb4a 100644 --- a/drivers/gpu/drm/panthor/panthor_sched.c +++ b/drivers/gpu/drm/panthor/panthor_sched.c @@ -840,7 +840,7 @@ panthor_queue_put_syncwait_obj(struct panthor_queue *queue) if (queue->syncwait.kmap) { struct iosys_map map = IOSYS_MAP_INIT_VADDR(queue->syncwait.kmap); - drm_gem_vunmap_unlocked(queue->syncwait.obj, &map); + drm_gem_vunmap(queue->syncwait.obj, &map); queue->syncwait.kmap = NULL; } @@ -866,7 +866,7 @@ panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue goto err_put_syncwait_obj; queue->syncwait.obj = &bo->base.base; - ret = drm_gem_vmap_unlocked(queue->syncwait.obj, &map); + ret = drm_gem_vmap(queue->syncwait.obj, &map); if (drm_WARN_ON(&ptdev->base, ret)) goto err_put_syncwait_obj; diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 2bf893eabb4b..13c312ca07ae 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -537,8 +537,8 @@ void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, void drm_gem_lock(struct drm_gem_object *obj); void drm_gem_unlock(struct drm_gem_object *obj); -int drm_gem_vmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); -void drm_gem_vunmap_unlocked(struct drm_gem_object *obj, struct iosys_map *map); +int drm_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map); +void drm_gem_vunmap(struct drm_gem_object *obj, struct iosys_map *map); int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles, int count, struct drm_gem_object ***objs_out); -- 2.50.1 From 9a0fd089f08d059c1d9cc3f8ac6fc268e93ff6d7 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sun, 23 Mar 2025 00:26:00 +0300 Subject: [PATCH 10/16] drm/gem: Add _locked postfix to functions that have unlocked counterpart MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Add _locked postfix to drm_gem functions that have unlocked counterpart functions to make GEM functions naming more consistent and intuitive in regards to the locking requirements. Acked-by: Maxime Ripard Reviewed-by: Boris Brezillon Suggested-by: Boris Brezillon Reviewed-by: Christian König Acked-by: Thomas Zimmermann Signed-off-by: Dmitry Osipenko Link: https://patchwork.freedesktop.org/patch/msgid/20250322212608.40511-3-dmitry.osipenko@collabora.com --- drivers/gpu/drm/drm_gem.c | 6 +++--- include/drm/drm_gem.h | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 27778e5ce0c0..1e659d2660f7 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1543,10 +1543,10 @@ tail: EXPORT_SYMBOL(drm_gem_lru_scan); /** - * drm_gem_evict - helper to evict backing pages for a GEM object + * drm_gem_evict_locked - helper to evict backing pages for a GEM object * @obj: obj in question */ -int drm_gem_evict(struct drm_gem_object *obj) +int drm_gem_evict_locked(struct drm_gem_object *obj) { dma_resv_assert_held(obj->resv); @@ -1558,4 +1558,4 @@ int drm_gem_evict(struct drm_gem_object *obj) return 0; } -EXPORT_SYMBOL(drm_gem_evict); +EXPORT_SYMBOL(drm_gem_evict_locked); diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 13c312ca07ae..43cf3c2c7ca0 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -561,7 +561,7 @@ unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned long *remaining, bool (*shrink)(struct drm_gem_object *obj)); -int drm_gem_evict(struct drm_gem_object *obj); +int drm_gem_evict_locked(struct drm_gem_object *obj); /** * drm_gem_object_is_shared_for_memory_stats - helper for shared memory stats -- 2.50.1 From 5462dc8371486fe72129c7b1ad9386cf00679737 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sun, 23 Mar 2025 00:26:01 +0300 Subject: [PATCH 11/16] drm/gem: Document locking rule of vmap and evict callbacks MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit The vmap/vunmap/evict GEM callbacks are always invoked with a held GEM's reservation lock. Document this locking rule for clarity. Reviewed-by: Boris Brezillon Reviewed-by: Christian König Acked-by: Thomas Zimmermann Signed-off-by: Dmitry Osipenko Link: https://patchwork.freedesktop.org/patch/msgid/20250322212608.40511-4-dmitry.osipenko@collabora.com --- include/drm/drm_gem.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 43cf3c2c7ca0..9b71f7a9f3f8 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -159,7 +159,8 @@ struct drm_gem_object_funcs { * @vmap: * * Returns a virtual address for the buffer. Used by the - * drm_gem_dmabuf_vmap() helper. + * drm_gem_dmabuf_vmap() helper. Called with a held GEM reservation + * lock. * * This callback is optional. */ @@ -169,7 +170,8 @@ struct drm_gem_object_funcs { * @vunmap: * * Releases the address previously returned by @vmap. Used by the - * drm_gem_dmabuf_vunmap() helper. + * drm_gem_dmabuf_vunmap() helper. Called with a held GEM reservation + * lock. * * This callback is optional. */ @@ -192,7 +194,8 @@ struct drm_gem_object_funcs { * @evict: * * Evicts gem object out from memory. Used by the drm_gem_object_evict() - * helper. Returns 0 on success, -errno otherwise. + * helper. Returns 0 on success, -errno otherwise. Called with a held + * GEM reservation lock. * * This callback is optional. */ -- 2.50.1 From 1d23391d7e0975c36c312321917f144846e239a5 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sun, 23 Mar 2025 00:26:02 +0300 Subject: [PATCH 12/16] drm/shmem-helper: Make all exported symbols GPL Make all drm-shmem exported symbols GPL to make them consistent with the rest of drm-shmem symbols. Acked-by: Maxime Ripard Reviewed-by: Boris Brezillon Acked-by: Thomas Zimmermann Signed-off-by: Dmitry Osipenko Link: https://patchwork.freedesktop.org/patch/msgid/20250322212608.40511-5-dmitry.osipenko@collabora.com --- drivers/gpu/drm/drm_gem_shmem_helper.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index d99dee67353a..98c68999d61a 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -247,7 +247,7 @@ void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) shmem->pages_mark_accessed_on_put); shmem->pages = NULL; } -EXPORT_SYMBOL(drm_gem_shmem_put_pages); +EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages); int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) { @@ -296,7 +296,7 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) return ret; } -EXPORT_SYMBOL(drm_gem_shmem_pin); +EXPORT_SYMBOL_GPL(drm_gem_shmem_pin); /** * drm_gem_shmem_unpin - Unpin backing pages for a shmem GEM object @@ -315,7 +315,7 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) drm_gem_shmem_unpin_locked(shmem); dma_resv_unlock(shmem->base.resv); } -EXPORT_SYMBOL(drm_gem_shmem_unpin); +EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin); /* * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object @@ -385,7 +385,7 @@ err_zero_use: return ret; } -EXPORT_SYMBOL(drm_gem_shmem_vmap); +EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap); /* * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object @@ -421,7 +421,7 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, shmem->vaddr = NULL; } -EXPORT_SYMBOL(drm_gem_shmem_vunmap); +EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap); static int drm_gem_shmem_create_with_handle(struct drm_file *file_priv, @@ -460,7 +460,7 @@ int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) return (madv >= 0); } -EXPORT_SYMBOL(drm_gem_shmem_madvise); +EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise); void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) { @@ -492,7 +492,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); } -EXPORT_SYMBOL(drm_gem_shmem_purge); +EXPORT_SYMBOL_GPL(drm_gem_shmem_purge); /** * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object @@ -670,7 +670,7 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); } -EXPORT_SYMBOL(drm_gem_shmem_print_info); +EXPORT_SYMBOL_GPL(drm_gem_shmem_print_info); /** * drm_gem_shmem_get_sg_table - Provide a scatter/gather table of pinned -- 2.50.1 From 954907f7147dc43e0d1cd4d430c21d143d5fdf55 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sun, 23 Mar 2025 00:26:03 +0300 Subject: [PATCH 13/16] drm/shmem-helper: Refactor locked/unlocked functions Add locked and remove unlocked postfixes from drm-shmem function names, making names consistent with the drm/gem core code. Reviewed-by: Boris Brezillon Suggested-by: Boris Brezillon Acked-by: Thomas Zimmermann Signed-off-by: Dmitry Osipenko Link: https://patchwork.freedesktop.org/patch/msgid/20250322212608.40511-6-dmitry.osipenko@collabora.com --- drivers/gpu/drm/drm_gem_shmem_helper.c | 60 +++++++++---------- drivers/gpu/drm/imagination/pvr_gem.c | 4 +- drivers/gpu/drm/lima/lima_gem.c | 2 +- drivers/gpu/drm/panfrost/panfrost_drv.c | 2 +- .../gpu/drm/panfrost/panfrost_gem_shrinker.c | 2 +- drivers/gpu/drm/tests/drm_gem_shmem_test.c | 14 ++--- include/drm/drm_gem_shmem_helper.h | 28 ++++----- 7 files changed, 56 insertions(+), 56 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 98c68999d61a..a9e35a46e72b 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -174,7 +174,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) kfree(shmem->sgt); } if (shmem->pages) - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); drm_WARN_ON(obj->dev, shmem->pages_use_count); @@ -186,7 +186,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) } EXPORT_SYMBOL_GPL(drm_gem_shmem_free); -static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) +static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; struct page **pages; @@ -220,12 +220,12 @@ static int drm_gem_shmem_get_pages(struct drm_gem_shmem_object *shmem) } /* - * drm_gem_shmem_put_pages - Decrease use count on the backing pages for a shmem GEM object + * drm_gem_shmem_put_pages_locked - Decrease use count on the backing pages for a shmem GEM object * @shmem: shmem GEM object * * This function decreases the use count and puts the backing pages when use drops to zero. */ -void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; @@ -247,7 +247,7 @@ void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem) shmem->pages_mark_accessed_on_put); shmem->pages = NULL; } -EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages); +EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked); int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) { @@ -257,7 +257,7 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base)); - ret = drm_gem_shmem_get_pages(shmem); + ret = drm_gem_shmem_get_pages_locked(shmem); return ret; } @@ -267,7 +267,7 @@ void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem) { dma_resv_assert_held(shmem->base.resv); - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); } EXPORT_SYMBOL(drm_gem_shmem_unpin_locked); @@ -318,7 +318,7 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin); /* - * drm_gem_shmem_vmap - Create a virtual mapping for a shmem GEM object + * drm_gem_shmem_vmap_locked - Create a virtual mapping for a shmem GEM object * @shmem: shmem GEM object * @map: Returns the kernel virtual address of the SHMEM GEM object's backing * store. @@ -327,13 +327,13 @@ EXPORT_SYMBOL_GPL(drm_gem_shmem_unpin); * exists for the buffer backing the shmem GEM object. It hides the differences * between dma-buf imported and natively allocated objects. * - * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap(). + * Acquired mappings should be cleaned up by calling drm_gem_shmem_vunmap_locked(). * * Returns: * 0 on success or a negative error code on failure. */ -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; int ret = 0; @@ -356,7 +356,7 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, return 0; } - ret = drm_gem_shmem_get_pages(shmem); + ret = drm_gem_shmem_get_pages_locked(shmem); if (ret) goto err_zero_use; @@ -379,28 +379,28 @@ int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, err_put_pages: if (!drm_gem_is_imported(obj)) - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); err_zero_use: shmem->vmap_use_count = 0; return ret; } -EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap); +EXPORT_SYMBOL_GPL(drm_gem_shmem_vmap_locked); /* - * drm_gem_shmem_vunmap - Unmap a virtual mapping for a shmem GEM object + * drm_gem_shmem_vunmap_locked - Unmap a virtual mapping for a shmem GEM object * @shmem: shmem GEM object * @map: Kernel virtual address where the SHMEM GEM object was mapped * * This function cleans up a kernel virtual address mapping acquired by - * drm_gem_shmem_vmap(). The mapping is only removed when the use count drops to - * zero. + * drm_gem_shmem_vmap_locked(). The mapping is only removed when the use count + * drops to zero. * * This function hides the differences between dma-buf imported and natively * allocated objects. */ -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map) +void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map) { struct drm_gem_object *obj = &shmem->base; @@ -416,12 +416,12 @@ void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, return; vunmap(shmem->vaddr); - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); } shmem->vaddr = NULL; } -EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap); +EXPORT_SYMBOL_GPL(drm_gem_shmem_vunmap_locked); static int drm_gem_shmem_create_with_handle(struct drm_file *file_priv, @@ -449,7 +449,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv, /* Update madvise status, returns true if not purged, else * false or -errno. */ -int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) +int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv) { dma_resv_assert_held(shmem->base.resv); @@ -460,9 +460,9 @@ int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv) return (madv >= 0); } -EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise); +EXPORT_SYMBOL_GPL(drm_gem_shmem_madvise_locked); -void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) +void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem) { struct drm_gem_object *obj = &shmem->base; struct drm_device *dev = obj->dev; @@ -476,7 +476,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) kfree(shmem->sgt); shmem->sgt = NULL; - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); shmem->madv = -1; @@ -492,7 +492,7 @@ void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem) invalidate_mapping_pages(file_inode(obj->filp)->i_mapping, 0, (loff_t)-1); } -EXPORT_SYMBOL_GPL(drm_gem_shmem_purge); +EXPORT_SYMBOL_GPL(drm_gem_shmem_purge_locked); /** * drm_gem_shmem_dumb_create - Create a dumb shmem buffer object @@ -589,7 +589,7 @@ static void drm_gem_shmem_vm_close(struct vm_area_struct *vma) struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); dma_resv_lock(shmem->base.resv, NULL); - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); dma_resv_unlock(shmem->base.resv); drm_gem_vm_close(vma); @@ -639,7 +639,7 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct return -EINVAL; dma_resv_lock(shmem->base.resv, NULL); - ret = drm_gem_shmem_get_pages(shmem); + ret = drm_gem_shmem_get_pages_locked(shmem); dma_resv_unlock(shmem->base.resv); if (ret) @@ -707,7 +707,7 @@ static struct sg_table *drm_gem_shmem_get_pages_sgt_locked(struct drm_gem_shmem_ drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); - ret = drm_gem_shmem_get_pages(shmem); + ret = drm_gem_shmem_get_pages_locked(shmem); if (ret) return ERR_PTR(ret); @@ -729,7 +729,7 @@ err_free_sgt: sg_free_table(sgt); kfree(sgt); err_put_pages: - drm_gem_shmem_put_pages(shmem); + drm_gem_shmem_put_pages_locked(shmem); return ERR_PTR(ret); } diff --git a/drivers/gpu/drm/imagination/pvr_gem.c b/drivers/gpu/drm/imagination/pvr_gem.c index 6a8c81fe8c1e..d9d7c6d1a138 100644 --- a/drivers/gpu/drm/imagination/pvr_gem.c +++ b/drivers/gpu/drm/imagination/pvr_gem.c @@ -203,7 +203,7 @@ pvr_gem_object_vmap(struct pvr_gem_object *pvr_obj) dma_resv_lock(obj->resv, NULL); - err = drm_gem_shmem_vmap(shmem_obj, &map); + err = drm_gem_shmem_vmap_locked(shmem_obj, &map); if (err) goto err_unlock; @@ -257,7 +257,7 @@ pvr_gem_object_vunmap(struct pvr_gem_object *pvr_obj) dma_sync_sgtable_for_device(dev, shmem_obj->sgt, DMA_BIDIRECTIONAL); } - drm_gem_shmem_vunmap(shmem_obj, &map); + drm_gem_shmem_vunmap_locked(shmem_obj, &map); dma_resv_unlock(obj->resv); } diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 9bb997dbb4b9..609221351cde 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -195,7 +195,7 @@ static int lima_gem_vmap(struct drm_gem_object *obj, struct iosys_map *map) if (bo->heap_size) return -EINVAL; - return drm_gem_shmem_vmap(&bo->base, map); + return drm_gem_shmem_vmap_locked(&bo->base, map); } static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c index 0f3935556ac7..a731f6b59a42 100644 --- a/drivers/gpu/drm/panfrost/panfrost_drv.c +++ b/drivers/gpu/drm/panfrost/panfrost_drv.c @@ -476,7 +476,7 @@ static int panfrost_ioctl_madvise(struct drm_device *dev, void *data, } } - args->retained = drm_gem_shmem_madvise(&bo->base, args->madv); + args->retained = drm_gem_shmem_madvise_locked(&bo->base, args->madv); if (args->retained) { if (args->madv == PANFROST_MADV_DONTNEED) diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c index 3d9f51bd48b6..02b60ea1433a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c +++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c @@ -51,7 +51,7 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj) goto unlock_mappings; panfrost_gem_teardown_mappings_locked(bo); - drm_gem_shmem_purge(&bo->base); + drm_gem_shmem_purge_locked(&bo->base); ret = true; dma_resv_unlock(shmem->base.resv); diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c index fd4215e2f982..98884966bb92 100644 --- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c +++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c @@ -173,7 +173,7 @@ static void drm_gem_shmem_test_vmap(struct kunit *test) ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); KUNIT_ASSERT_EQ(test, ret, 0); - ret = drm_gem_shmem_vmap(shmem, &map); + ret = drm_gem_shmem_vmap_locked(shmem, &map); KUNIT_ASSERT_EQ(test, ret, 0); KUNIT_ASSERT_NOT_NULL(test, shmem->vaddr); KUNIT_ASSERT_FALSE(test, iosys_map_is_null(&map)); @@ -183,7 +183,7 @@ static void drm_gem_shmem_test_vmap(struct kunit *test) for (i = 0; i < TEST_SIZE; i++) KUNIT_EXPECT_EQ(test, iosys_map_rd(&map, i, u8), TEST_BYTE); - drm_gem_shmem_vunmap(shmem, &map); + drm_gem_shmem_vunmap_locked(shmem, &map); KUNIT_EXPECT_NULL(test, shmem->vaddr); KUNIT_EXPECT_EQ(test, shmem->vmap_use_count, 0); } @@ -281,17 +281,17 @@ static void drm_gem_shmem_test_madvise(struct kunit *test) ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); KUNIT_ASSERT_EQ(test, ret, 0); - ret = drm_gem_shmem_madvise(shmem, 1); + ret = drm_gem_shmem_madvise_locked(shmem, 1); KUNIT_EXPECT_TRUE(test, ret); KUNIT_ASSERT_EQ(test, shmem->madv, 1); /* Set madv to a negative value */ - ret = drm_gem_shmem_madvise(shmem, -1); + ret = drm_gem_shmem_madvise_locked(shmem, -1); KUNIT_EXPECT_FALSE(test, ret); KUNIT_ASSERT_EQ(test, shmem->madv, -1); /* Check that madv cannot be set back to a positive value */ - ret = drm_gem_shmem_madvise(shmem, 0); + ret = drm_gem_shmem_madvise_locked(shmem, 0); KUNIT_EXPECT_FALSE(test, ret); KUNIT_ASSERT_EQ(test, shmem->madv, -1); } @@ -319,7 +319,7 @@ static void drm_gem_shmem_test_purge(struct kunit *test) ret = drm_gem_shmem_is_purgeable(shmem); KUNIT_EXPECT_FALSE(test, ret); - ret = drm_gem_shmem_madvise(shmem, 1); + ret = drm_gem_shmem_madvise_locked(shmem, 1); KUNIT_EXPECT_TRUE(test, ret); /* The scatter/gather table will be freed by drm_gem_shmem_free */ @@ -329,7 +329,7 @@ static void drm_gem_shmem_test_purge(struct kunit *test) ret = drm_gem_shmem_is_purgeable(shmem); KUNIT_EXPECT_TRUE(test, ret); - drm_gem_shmem_purge(shmem); + drm_gem_shmem_purge_locked(shmem); KUNIT_EXPECT_NULL(test, shmem->pages); KUNIT_EXPECT_NULL(test, shmem->sgt); KUNIT_EXPECT_EQ(test, shmem->madv, -1); diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index cef5a6b5a4d6..0609e336479d 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -102,19 +102,19 @@ struct drm_gem_shmem_object *drm_gem_shmem_create_with_mnt(struct drm_device *de struct vfsmount *gemfs); void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem); -void drm_gem_shmem_put_pages(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem); int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem); -int drm_gem_shmem_vmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map); -void drm_gem_shmem_vunmap(struct drm_gem_shmem_object *shmem, - struct iosys_map *map); +int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); +void drm_gem_shmem_vunmap_locked(struct drm_gem_shmem_object *shmem, + struct iosys_map *map); int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct *vma); int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem); void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem); -int drm_gem_shmem_madvise(struct drm_gem_shmem_object *shmem, int madv); +int drm_gem_shmem_madvise_locked(struct drm_gem_shmem_object *shmem, int madv); static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem) { @@ -123,7 +123,7 @@ static inline bool drm_gem_shmem_is_purgeable(struct drm_gem_shmem_object *shmem !shmem->base.dma_buf && !drm_gem_is_imported(&shmem->base); } -void drm_gem_shmem_purge(struct drm_gem_shmem_object *shmem); +void drm_gem_shmem_purge_locked(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_sg_table(struct drm_gem_shmem_object *shmem); struct sg_table *drm_gem_shmem_get_pages_sgt(struct drm_gem_shmem_object *shmem); @@ -214,12 +214,12 @@ static inline struct sg_table *drm_gem_shmem_object_get_sg_table(struct drm_gem_ } /* - * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap() + * drm_gem_shmem_object_vmap - GEM object function for drm_gem_shmem_vmap_locked() * @obj: GEM object * @map: Returns the kernel virtual address of the SHMEM GEM object's backing store. * - * This function wraps drm_gem_shmem_vmap(). Drivers that employ the shmem helpers should - * use it as their &drm_gem_object_funcs.vmap handler. + * This function wraps drm_gem_shmem_vmap_locked(). Drivers that employ the shmem + * helpers should use it as their &drm_gem_object_funcs.vmap handler. * * Returns: * 0 on success or a negative error code on failure. @@ -229,7 +229,7 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - return drm_gem_shmem_vmap(shmem, map); + return drm_gem_shmem_vmap_locked(shmem, map); } /* @@ -237,15 +237,15 @@ static inline int drm_gem_shmem_object_vmap(struct drm_gem_object *obj, * @obj: GEM object * @map: Kernel virtual address where the SHMEM GEM object was mapped * - * This function wraps drm_gem_shmem_vunmap(). Drivers that employ the shmem helpers should - * use it as their &drm_gem_object_funcs.vunmap handler. + * This function wraps drm_gem_shmem_vunmap_locked(). Drivers that employ the shmem + * helpers should use it as their &drm_gem_object_funcs.vunmap handler. */ static inline void drm_gem_shmem_object_vunmap(struct drm_gem_object *obj, struct iosys_map *map) { struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj); - drm_gem_shmem_vunmap(shmem, map); + drm_gem_shmem_vunmap_locked(shmem, map); } /** -- 2.50.1 From eab10538073c3ff9e21c857bd462f79f2f6f7e00 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sun, 23 Mar 2025 00:26:04 +0300 Subject: [PATCH 14/16] drm/shmem-helper: Remove obsoleted is_iomem test Everything that uses the mapped buffer should be agnostic to is_iomem. The only reason for the is_iomem test is that we're setting shmem->vaddr to the returned map->vaddr. Now that the shmem->vaddr code is gone, remove the obsoleted is_iomem test to clean up the code. Acked-by: Maxime Ripard Suggested-by: Thomas Zimmermann Reviewed-by: Boris Brezillon Acked-by: Thomas Zimmermann Signed-off-by: Dmitry Osipenko Link: https://patchwork.freedesktop.org/patch/msgid/20250322212608.40511-7-dmitry.osipenko@collabora.com --- drivers/gpu/drm/drm_gem_shmem_helper.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index a9e35a46e72b..277e792a0c5c 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -340,12 +340,6 @@ int drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem, if (drm_gem_is_imported(obj)) { ret = dma_buf_vmap(obj->dma_buf, map); - if (!ret) { - if (drm_WARN_ON(obj->dev, map->is_iomem)) { - dma_buf_vunmap(obj->dma_buf, map); - return -EIO; - } - } } else { pgprot_t prot = PAGE_KERNEL; -- 2.50.1 From d586b535f1448a6969e2b664e4e061c40bec4679 Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sun, 23 Mar 2025 00:26:05 +0300 Subject: [PATCH 15/16] drm/shmem-helper: Add and use pages_pin_count Add separate pages_pin_count for tracking of whether drm-shmem pages are moveable or not. With the addition of memory shrinker support to drm-shmem, the pages_use_count will no longer determine whether pages are hard-pinned in memory, but whether pages exist and are soft-pinned (and could be swapped out). The pages_pin_count > 1 will hard-pin pages in memory. Acked-by: Maxime Ripard Reviewed-by: Boris Brezillon Suggested-by: Boris Brezillon Acked-by: Thomas Zimmermann Signed-off-by: Dmitry Osipenko Link: https://patchwork.freedesktop.org/patch/msgid/20250322212608.40511-8-dmitry.osipenko@collabora.com --- drivers/gpu/drm/drm_gem_shmem_helper.c | 16 +++++++++++++++- include/drm/drm_gem_shmem_helper.h | 11 +++++++++++ 2 files changed, 26 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 277e792a0c5c..d338b36f4eaa 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -177,6 +177,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) drm_gem_shmem_put_pages_locked(shmem); drm_WARN_ON(obj->dev, shmem->pages_use_count); + drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count)); dma_resv_unlock(shmem->base.resv); } @@ -257,7 +258,12 @@ int drm_gem_shmem_pin_locked(struct drm_gem_shmem_object *shmem) drm_WARN_ON(shmem->base.dev, drm_gem_is_imported(&shmem->base)); + if (refcount_inc_not_zero(&shmem->pages_pin_count)) + return 0; + ret = drm_gem_shmem_get_pages_locked(shmem); + if (!ret) + refcount_set(&shmem->pages_pin_count, 1); return ret; } @@ -267,7 +273,8 @@ void drm_gem_shmem_unpin_locked(struct drm_gem_shmem_object *shmem) { dma_resv_assert_held(shmem->base.resv); - drm_gem_shmem_put_pages_locked(shmem); + if (refcount_dec_and_test(&shmem->pages_pin_count)) + drm_gem_shmem_put_pages_locked(shmem); } EXPORT_SYMBOL(drm_gem_shmem_unpin_locked); @@ -288,6 +295,9 @@ int drm_gem_shmem_pin(struct drm_gem_shmem_object *shmem) drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); + if (refcount_inc_not_zero(&shmem->pages_pin_count)) + return 0; + ret = dma_resv_lock_interruptible(shmem->base.resv, NULL); if (ret) return ret; @@ -311,6 +321,9 @@ void drm_gem_shmem_unpin(struct drm_gem_shmem_object *shmem) drm_WARN_ON(obj->dev, drm_gem_is_imported(obj)); + if (refcount_dec_not_one(&shmem->pages_pin_count)) + return; + dma_resv_lock(shmem->base.resv, NULL); drm_gem_shmem_unpin_locked(shmem); dma_resv_unlock(shmem->base.resv); @@ -660,6 +673,7 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, if (drm_gem_is_imported(&shmem->base)) return; + drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count)); drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index 0609e336479d..d411215fe494 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -39,6 +39,17 @@ struct drm_gem_shmem_object { */ unsigned int pages_use_count; + /** + * @pages_pin_count: + * + * Reference count on the pinned pages table. + * + * Pages are hard-pinned and reside in memory if count + * greater than zero. Otherwise, when count is zero, the pages are + * allowed to be evicted and purged by memory shrinker. + */ + refcount_t pages_pin_count; + /** * @madv: State for madvise * -- 2.50.1 From 051b6646d36dc974c3884d75021bf282925b171c Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Sun, 23 Mar 2025 00:26:06 +0300 Subject: [PATCH 16/16] drm/shmem-helper: Use refcount_t for pages_use_count Use atomic refcount_t helper for pages_use_count to optimize pin/unpin functions by skipping reservation locking while GEM's pin refcount > 1. Acked-by: Maxime Ripard Reviewed-by: Boris Brezillon Suggested-by: Boris Brezillon Acked-by: Thomas Zimmermann Signed-off-by: Dmitry Osipenko Link: https://patchwork.freedesktop.org/patch/msgid/20250322212608.40511-9-dmitry.osipenko@collabora.com --- drivers/gpu/drm/drm_gem_shmem_helper.c | 33 ++++++++++------------ drivers/gpu/drm/lima/lima_gem.c | 2 +- drivers/gpu/drm/panfrost/panfrost_mmu.c | 2 +- drivers/gpu/drm/tests/drm_gem_shmem_test.c | 8 +++--- include/drm/drm_gem_shmem_helper.h | 2 +- 5 files changed, 22 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index d338b36f4eaa..6fb96e790abd 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -176,7 +176,7 @@ void drm_gem_shmem_free(struct drm_gem_shmem_object *shmem) if (shmem->pages) drm_gem_shmem_put_pages_locked(shmem); - drm_WARN_ON(obj->dev, shmem->pages_use_count); + drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_use_count)); drm_WARN_ON(obj->dev, refcount_read(&shmem->pages_pin_count)); dma_resv_unlock(shmem->base.resv); @@ -194,14 +194,13 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) dma_resv_assert_held(shmem->base.resv); - if (shmem->pages_use_count++ > 0) + if (refcount_inc_not_zero(&shmem->pages_use_count)) return 0; pages = drm_gem_get_pages(obj); if (IS_ERR(pages)) { drm_dbg_kms(obj->dev, "Failed to get pages (%ld)\n", PTR_ERR(pages)); - shmem->pages_use_count = 0; return PTR_ERR(pages); } @@ -217,6 +216,8 @@ static int drm_gem_shmem_get_pages_locked(struct drm_gem_shmem_object *shmem) shmem->pages = pages; + refcount_set(&shmem->pages_use_count, 1); + return 0; } @@ -232,21 +233,17 @@ void drm_gem_shmem_put_pages_locked(struct drm_gem_shmem_object *shmem) dma_resv_assert_held(shmem->base.resv); - if (drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) - return; - - if (--shmem->pages_use_count > 0) - return; - + if (refcount_dec_and_test(&shmem->pages_use_count)) { #ifdef CONFIG_X86 - if (shmem->map_wc) - set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT); + if (shmem->map_wc) + set_pages_array_wb(shmem->pages, obj->size >> PAGE_SHIFT); #endif - drm_gem_put_pages(obj, shmem->pages, - shmem->pages_mark_dirty_on_put, - shmem->pages_mark_accessed_on_put); - shmem->pages = NULL; + drm_gem_put_pages(obj, shmem->pages, + shmem->pages_mark_dirty_on_put, + shmem->pages_mark_accessed_on_put); + shmem->pages = NULL; + } } EXPORT_SYMBOL_GPL(drm_gem_shmem_put_pages_locked); @@ -582,8 +579,8 @@ static void drm_gem_shmem_vm_open(struct vm_area_struct *vma) * mmap'd, vm_open() just grabs an additional reference for the new * mm the vma is getting copied into (ie. on fork()). */ - if (!drm_WARN_ON_ONCE(obj->dev, !shmem->pages_use_count)) - shmem->pages_use_count++; + drm_WARN_ON_ONCE(obj->dev, + !refcount_inc_not_zero(&shmem->pages_use_count)); dma_resv_unlock(shmem->base.resv); @@ -674,7 +671,7 @@ void drm_gem_shmem_print_info(const struct drm_gem_shmem_object *shmem, return; drm_printf_indent(p, indent, "pages_pin_count=%u\n", refcount_read(&shmem->pages_pin_count)); - drm_printf_indent(p, indent, "pages_use_count=%u\n", shmem->pages_use_count); + drm_printf_indent(p, indent, "pages_use_count=%u\n", refcount_read(&shmem->pages_use_count)); drm_printf_indent(p, indent, "vmap_use_count=%u\n", shmem->vmap_use_count); drm_printf_indent(p, indent, "vaddr=%p\n", shmem->vaddr); } diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c index 609221351cde..5deec673c11e 100644 --- a/drivers/gpu/drm/lima/lima_gem.c +++ b/drivers/gpu/drm/lima/lima_gem.c @@ -47,7 +47,7 @@ int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm) } bo->base.pages = pages; - bo->base.pages_use_count = 1; + refcount_set(&bo->base.pages_use_count, 1); mapping_set_unevictable(mapping); } diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index b91019cd5acb..4a0b4bf03f1a 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -489,7 +489,7 @@ static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as, goto err_unlock; } bo->base.pages = pages; - bo->base.pages_use_count = 1; + refcount_set(&bo->base.pages_use_count, 1); } else { pages = bo->base.pages; if (pages[page_offset]) { diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c index 98884966bb92..1459cdb0c413 100644 --- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c +++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c @@ -134,7 +134,7 @@ static void drm_gem_shmem_test_pin_pages(struct kunit *test) shmem = drm_gem_shmem_create(drm_dev, TEST_SIZE); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, shmem); KUNIT_EXPECT_NULL(test, shmem->pages); - KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0); + KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0); ret = kunit_add_action_or_reset(test, drm_gem_shmem_free_wrapper, shmem); KUNIT_ASSERT_EQ(test, ret, 0); @@ -142,14 +142,14 @@ static void drm_gem_shmem_test_pin_pages(struct kunit *test) ret = drm_gem_shmem_pin(shmem); KUNIT_ASSERT_EQ(test, ret, 0); KUNIT_ASSERT_NOT_NULL(test, shmem->pages); - KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1); + KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1); for (i = 0; i < (shmem->base.size >> PAGE_SHIFT); i++) KUNIT_ASSERT_NOT_NULL(test, shmem->pages[i]); drm_gem_shmem_unpin(shmem); KUNIT_EXPECT_NULL(test, shmem->pages); - KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 0); + KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 0); } /* @@ -251,7 +251,7 @@ static void drm_gem_shmem_test_get_sg_table(struct kunit *test) sgt = drm_gem_shmem_get_pages_sgt(shmem); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, sgt); KUNIT_ASSERT_NOT_NULL(test, shmem->pages); - KUNIT_EXPECT_EQ(test, shmem->pages_use_count, 1); + KUNIT_EXPECT_EQ(test, refcount_read(&shmem->pages_use_count), 1); KUNIT_EXPECT_PTR_EQ(test, sgt, shmem->sgt); for_each_sgtable_sg(sgt, sg, si) { diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h index d411215fe494..3a4be433d5f0 100644 --- a/include/drm/drm_gem_shmem_helper.h +++ b/include/drm/drm_gem_shmem_helper.h @@ -37,7 +37,7 @@ struct drm_gem_shmem_object { * Reference count on the pages table. * The pages are put when the count reaches zero. */ - unsigned int pages_use_count; + refcount_t pages_use_count; /** * @pages_pin_count: -- 2.50.1