]> www.infradead.org Git - users/dwmw2/linux.git/commitdiff
drm/amdgpu: Initialize VF partition mode
authorLijo Lazar <lijo.lazar@amd.com>
Wed, 3 Jul 2024 06:22:47 +0000 (11:52 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 10 Jul 2024 14:12:28 +0000 (10:12 -0400)
For SOCs with GFX v9.4.3, a VF may have multiple compute partitions.
Fetch the partition information during init and initialize partition
nodes. There is no support to switch partition mode in VF mode, hence
disable the same.

Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.c
drivers/gpu/drm/amd/amdgpu/aqua_vanjaram.c
drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c

index 6b0416777c5bfcb9cfe9ceff5e365d67dc872a98..ddda94e49db44a4bb9476a81c505fc63e9749210 100644 (file)
@@ -297,6 +297,7 @@ struct amdgpu_gfx_funcs {
        int (*switch_partition_mode)(struct amdgpu_device *adev,
                                     int num_xccs_per_xcp);
        int (*ih_node_to_logical_xcc)(struct amdgpu_device *adev, int ih_node);
+       int (*get_xccs_per_xcp)(struct amdgpu_device *adev);
 };
 
 struct sq_work {
index 2b99eed5ba19201fb174064cdd45c2a892e568fa..a6d456ec6aeb1a4fe22e71e2125b0f154cb1f15e 100644 (file)
@@ -219,7 +219,8 @@ int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
 {
        int mode;
 
-       if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+       if (!amdgpu_sriov_vf(xcp_mgr->adev) &&
+           xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
                return xcp_mgr->mode;
 
        if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode)
@@ -228,6 +229,12 @@ int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags)
        if (!(flags & AMDGPU_XCP_FL_LOCKED))
                mutex_lock(&xcp_mgr->xcp_lock);
        mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr);
+
+       /* First time query for VF, set the mode here */
+       if (amdgpu_sriov_vf(xcp_mgr->adev) &&
+           xcp_mgr->mode == AMDGPU_XCP_MODE_NONE)
+               xcp_mgr->mode = mode;
+
        if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode)
                dev_WARN(
                        xcp_mgr->adev->dev,
@@ -282,8 +289,7 @@ int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode,
 {
        struct amdgpu_xcp_mgr *xcp_mgr;
 
-       if (!xcp_funcs || !xcp_funcs->switch_partition_mode ||
-           !xcp_funcs->get_ip_details)
+       if (!xcp_funcs || !xcp_funcs->get_ip_details)
                return -EINVAL;
 
        xcp_mgr = kzalloc(sizeof(*xcp_mgr), GFP_KERNEL);
index 2c9a0aa41e2d5e6c4db868014bbd30ef231fc4fe..228fd4dd32f13917f2c76cec6d43e2ec1c4bf17d 100644 (file)
@@ -304,13 +304,56 @@ u64 aqua_vanjaram_encode_ext_smn_addressing(int ext_id)
        return ext_offset;
 }
 
+static enum amdgpu_gfx_partition
+__aqua_vanjaram_calc_xcp_mode(struct amdgpu_xcp_mgr *xcp_mgr)
+{
+       struct amdgpu_device *adev = xcp_mgr->adev;
+       int num_xcc, num_xcc_per_xcp = 0, mode = 0;
+
+       num_xcc = NUM_XCC(xcp_mgr->adev->gfx.xcc_mask);
+       if (adev->gfx.funcs->get_xccs_per_xcp)
+               num_xcc_per_xcp = adev->gfx.funcs->get_xccs_per_xcp(adev);
+       if ((num_xcc_per_xcp) && (num_xcc % num_xcc_per_xcp == 0))
+               mode = num_xcc / num_xcc_per_xcp;
+
+       if (num_xcc_per_xcp == 1)
+               return AMDGPU_CPX_PARTITION_MODE;
+
+       switch (mode) {
+       case 1:
+               return AMDGPU_SPX_PARTITION_MODE;
+       case 2:
+               return AMDGPU_DPX_PARTITION_MODE;
+       case 3:
+               return AMDGPU_TPX_PARTITION_MODE;
+       case 4:
+               return AMDGPU_QPX_PARTITION_MODE;
+       default:
+               return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
+       }
+
+       return AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
+}
+
 static int aqua_vanjaram_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr)
 {
-       enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
+       enum amdgpu_gfx_partition derv_mode,
+               mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE;
        struct amdgpu_device *adev = xcp_mgr->adev;
 
-       if (adev->nbio.funcs->get_compute_partition_mode)
+       derv_mode = __aqua_vanjaram_calc_xcp_mode(xcp_mgr);
+
+       if (amdgpu_sriov_vf(adev))
+               return derv_mode;
+
+       if (adev->nbio.funcs->get_compute_partition_mode) {
                mode = adev->nbio.funcs->get_compute_partition_mode(adev);
+               if (mode != derv_mode)
+                       dev_warn(
+                               adev->dev,
+                               "Mismatch in compute partition mode - reported : %d derived : %d",
+                               mode, derv_mode);
+       }
 
        return mode;
 }
@@ -624,6 +667,9 @@ static int aqua_vanjaram_xcp_mgr_init(struct amdgpu_device *adev)
 {
        int ret;
 
+       if (amdgpu_sriov_vf(adev))
+               aqua_vanjaram_xcp_funcs.switch_partition_mode = NULL;
+
        ret = amdgpu_xcp_mgr_init(adev, AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE, 1,
                                  &aqua_vanjaram_xcp_funcs);
        if (ret)
index c908e585b9ec0c782905e0829834990a25bf0662..20ea6cb01edfda9e716ff560378c2855d9381ed7 100644 (file)
@@ -652,6 +652,15 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev,
        soc15_grbm_select(adev, me, pipe, q, vm, GET_INST(GC, xcc_id));
 }
 
+static int gfx_v9_4_3_get_xccs_per_xcp(struct amdgpu_device *adev)
+{
+       u32 xcp_ctl;
+
+       /* Value is expected to be the same on all, fetch from first instance */
+       xcp_ctl = RREG32_SOC15(GC, GET_INST(GC, 0), regCP_HYP_XCP_CTL);
+
+       return REG_GET_FIELD(xcp_ctl, CP_HYP_XCP_CTL, NUM_XCC_IN_XCP);
+}
 
 static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev,
                                                int num_xccs_per_xcp)
@@ -706,6 +715,7 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_3_gfx_funcs = {
        .select_me_pipe_q = &gfx_v9_4_3_select_me_pipe_q,
        .switch_partition_mode = &gfx_v9_4_3_switch_compute_partition,
        .ih_node_to_logical_xcc = &gfx_v9_4_3_ih_to_xcc_inst,
+       .get_xccs_per_xcp = &gfx_v9_4_3_get_xccs_per_xcp,
 };
 
 static int gfx_v9_4_3_aca_bank_parser(struct aca_handle *handle,
@@ -2050,18 +2060,31 @@ static int gfx_v9_4_3_xcc_cp_resume(struct amdgpu_device *adev, int xcc_id)
 
 static int gfx_v9_4_3_cp_resume(struct amdgpu_device *adev)
 {
-       int r = 0, i, num_xcc;
+       int r = 0, i, num_xcc, num_xcp, num_xcc_per_xcp;
+
+       num_xcc = NUM_XCC(adev->gfx.xcc_mask);
+       if (amdgpu_sriov_vf(adev)) {
+               enum amdgpu_gfx_partition mode;
 
-       if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
-                                           AMDGPU_XCP_FL_NONE) ==
-           AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
-               r = amdgpu_xcp_switch_partition_mode(adev->xcp_mgr,
-                                                    amdgpu_user_partt_mode);
+               mode = amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
+                                                      AMDGPU_XCP_FL_NONE);
+               if (mode == AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
+                       return -EINVAL;
+               num_xcc_per_xcp = gfx_v9_4_3_get_xccs_per_xcp(adev);
+               adev->gfx.num_xcc_per_xcp = num_xcc_per_xcp;
+               num_xcp = num_xcc / num_xcc_per_xcp;
+               r = amdgpu_xcp_init(adev->xcp_mgr, num_xcp, mode);
 
+       } else {
+               if (amdgpu_xcp_query_partition_mode(adev->xcp_mgr,
+                                                   AMDGPU_XCP_FL_NONE) ==
+                   AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE)
+                       r = amdgpu_xcp_switch_partition_mode(
+                               adev->xcp_mgr, amdgpu_user_partt_mode);
+       }
        if (r)
                return r;
 
-       num_xcc = NUM_XCC(adev->gfx.xcc_mask);
        for (i = 0; i < num_xcc; i++) {
                r = gfx_v9_4_3_xcc_cp_resume(adev, i);
                if (r)