]> www.infradead.org Git - users/jedix/linux-maple.git/commitdiff
drm/amd/pm: Add cache logic for temperature metric
authorLijo Lazar <lijo.lazar@amd.com>
Wed, 6 Aug 2025 07:22:47 +0000 (12:52 +0530)
committerAlex Deucher <alexander.deucher@amd.com>
Wed, 6 Aug 2025 18:30:47 +0000 (14:30 -0400)
Add caching logic for baseboard and gpuboard temperature metrics tables.

Signed-off-by: Lijo Lazar <lijo.lazar@amd.com>
Reviewed-by: Asad Kamal <asad.kamal@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h

index dc48a1dd8be4c881ce83ac26e586756f12eb79e3..0a40ab817634f135078c8285fc4602bec61dc739 100644 (file)
@@ -3835,6 +3835,9 @@ int smu_set_pm_policy(struct smu_context *smu, enum pp_pm_policy p_type,
 static ssize_t smu_sys_get_temp_metrics(void *handle, enum smu_temp_metric_type type, void *table)
 {
        struct smu_context *smu = handle;
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *tables = smu_table->tables;
+       enum smu_table_id table_id;
 
        if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
                return -EOPNOTSUPP;
@@ -3842,6 +3845,21 @@ static ssize_t smu_sys_get_temp_metrics(void *handle, enum smu_temp_metric_type
        if (!smu->smu_temp.temp_funcs || !smu->smu_temp.temp_funcs->get_temp_metrics)
                return -EOPNOTSUPP;
 
+       table_id = smu_metrics_get_temp_table_id(type);
+
+       if (table_id == SMU_TABLE_COUNT)
+               return -EINVAL;
+
+       /* If the request is to get size alone, return the cached table size */
+       if (!table && tables[table_id].cache.size)
+               return tables[table_id].cache.size;
+
+       if (smu_table_cache_is_valid(&tables[table_id])) {
+               memcpy(table, tables[table_id].cache.buffer,
+                      tables[table_id].cache.size);
+               return tables[table_id].cache.size;
+       }
+
        return smu->smu_temp.temp_funcs->get_temp_metrics(smu, type, table);
 }
 
index 611b381b91478e0c3d1423530ea0a6a3368df7de..2edd867f203e96b946f33aab63813e548af9f64e 100644 (file)
@@ -249,6 +249,14 @@ struct smu_user_dpm_profile {
                tables[table_id].domain = d;            \
        } while (0)
 
+struct smu_table_cache {
+       void *buffer;
+       size_t size;
+       /* interval in ms*/
+       uint32_t interval;
+       unsigned long last_cache_time;
+};
+
 struct smu_table {
        uint64_t size;
        uint32_t align;
@@ -257,7 +265,7 @@ struct smu_table {
        void *cpu_addr;
        struct amdgpu_bo *bo;
        uint32_t version;
-       unsigned long  metrics_time;
+       struct smu_table_cache cache;
 };
 
 enum smu_perf_level_designation {
@@ -323,7 +331,8 @@ enum smu_table_id {
        SMU_TABLE_ECCINFO,
        SMU_TABLE_COMBO_PPTABLE,
        SMU_TABLE_WIFIBAND,
-       SMU_TABLE_TEMP_METRICS,
+       SMU_TABLE_GPUBOARD_TEMP_METRICS,
+       SMU_TABLE_BASEBOARD_TEMP_METRICS,
        SMU_TABLE_COUNT,
 };
 
@@ -1651,6 +1660,71 @@ typedef struct {
 struct smu_dpm_policy *smu_get_pm_policy(struct smu_context *smu,
                                         enum pp_pm_policy p_type);
 
+static inline enum smu_table_id
+smu_metrics_get_temp_table_id(enum smu_temp_metric_type type)
+{
+       switch (type) {
+       case SMU_TEMP_METRIC_BASEBOARD:
+               return SMU_TABLE_BASEBOARD_TEMP_METRICS;
+       case SMU_TEMP_METRIC_GPUBOARD:
+               return SMU_TABLE_GPUBOARD_TEMP_METRICS;
+       default:
+               return SMU_TABLE_COUNT;
+       }
+
+       return SMU_TABLE_COUNT;
+}
+
+static inline void smu_table_cache_update_time(struct smu_table *table,
+                                              unsigned long time)
+{
+       table->cache.last_cache_time = time;
+}
+
+static inline bool smu_table_cache_is_valid(struct smu_table *table)
+{
+       if (!table->cache.buffer || !table->cache.last_cache_time ||
+           !table->cache.interval || !table->cache.size ||
+           time_after(jiffies,
+                      table->cache.last_cache_time +
+                              msecs_to_jiffies(table->cache.interval)))
+               return false;
+
+       return true;
+}
+
+static inline int smu_table_cache_init(struct smu_context *smu,
+                                      enum smu_table_id table_id, size_t size,
+                                      uint32_t cache_interval)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *tables = smu_table->tables;
+
+       tables[table_id].cache.buffer = kzalloc(size, GFP_KERNEL);
+       if (!tables[table_id].cache.buffer)
+               return -ENOMEM;
+
+       tables[table_id].cache.last_cache_time = 0;
+       tables[table_id].cache.interval = cache_interval;
+       tables[table_id].cache.size = size;
+
+       return 0;
+}
+
+static inline void smu_table_cache_fini(struct smu_context *smu,
+                                       enum smu_table_id table_id)
+{
+       struct smu_table_context *smu_table = &smu->smu_table;
+       struct smu_table *tables = smu_table->tables;
+
+       if (tables[table_id].cache.buffer) {
+               kfree(tables[table_id].cache.buffer);
+               tables[table_id].cache.buffer = NULL;
+               tables[table_id].cache.last_cache_time = 0;
+               tables[table_id].cache.interval = 0;
+       }
+}
+
 #if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4)
 int smu_get_power_limit(void *handle,
                        uint32_t *limit,