amd-drm-fixes-6.14-2025-02-26:
amdgpu: - Legacy dpm suspend/resume fix - Runtime PM fix for DELL G5 SE - MAINTAINERS updates - Enforce Isolation fixes - mailmap update - EDID reading i2c fix - PSR fix - eDP fix - HPD interrupt handling fix - Clear memory fix amdkfd: - MQD handling fix -----BEGIN PGP SIGNATURE----- iHUEABYKAB0WIQQgO5Idg2tXNTSZAr293/aFa7yZ2AUCZ79y5wAKCRC93/aFa7yZ 2GkpAPkB2f0GCzIATluPJMZxN7Oh7kdhv/sNd0ptFMFWfDtRaQD/eqV35UB4Vstn w4DoHi+J1T/PSHe59e1XpkbiDdYkcAo= =33zm -----END PGP SIGNATURE----- Merge tag 'amd-drm-fixes-6.14-2025-02-26' of https://gitlab.freedesktop.org/agd5f/linux into drm-fixes amd-drm-fixes-6.14-2025-02-26: amdgpu: - Legacy dpm suspend/resume fix - Runtime PM fix for DELL G5 SE - MAINTAINERS updates - Enforce Isolation fixes - mailmap update - EDID reading i2c fix - PSR fix - eDP fix - HPD interrupt handling fix - Clear memory fix amdkfd: - MQD handling fix Signed-off-by: Dave Airlie <airlied@redhat.com> From: Alex Deucher <alexander.deucher@amd.com> Link: https://patchwork.freedesktop.org/patch/msgid/20250226200342.3685347-1-alexander.deucher@amd.com
This commit is contained in:
commit
b06a731cbc
2
.mailmap
2
.mailmap
|
@ -613,6 +613,8 @@ Richard Leitner <richard.leitner@linux.dev> <me@g0hl1n.net>
|
|||
Richard Leitner <richard.leitner@linux.dev> <richard.leitner@skidata.com>
|
||||
Robert Foss <rfoss@kernel.org> <robert.foss@linaro.org>
|
||||
Rocky Liao <quic_rjliao@quicinc.com> <rjliao@codeaurora.org>
|
||||
Rodrigo Siqueira <siqueira@igalia.com> <rodrigosiqueiramelo@gmail.com>
|
||||
Rodrigo Siqueira <siqueira@igalia.com> <Rodrigo.Siqueira@amd.com>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <guro@fb.com>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <guroan@gmail.com>
|
||||
Roman Gushchin <roman.gushchin@linux.dev> <klamm@yandex-team.ru>
|
||||
|
|
|
@ -1046,14 +1046,14 @@ F: drivers/crypto/ccp/hsti.*
|
|||
AMD DISPLAY CORE
|
||||
M: Harry Wentland <harry.wentland@amd.com>
|
||||
M: Leo Li <sunpeng.li@amd.com>
|
||||
M: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
|
||||
R: Rodrigo Siqueira <siqueira@igalia.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
T: git https://gitlab.freedesktop.org/agd5f/linux.git
|
||||
F: drivers/gpu/drm/amd/display/
|
||||
|
||||
AMD DISPLAY CORE - DML
|
||||
M: Chaitanya Dhere <chaitanya.dhere@amd.com>
|
||||
M: Austin Zheng <austin.zheng@amd.com>
|
||||
M: Jun Lei <jun.lei@amd.com>
|
||||
S: Supported
|
||||
F: drivers/gpu/drm/amd/display/dc/dml/
|
||||
|
@ -19657,7 +19657,6 @@ F: drivers/net/wireless/quantenna
|
|||
RADEON and AMDGPU DRM DRIVERS
|
||||
M: Alex Deucher <alexander.deucher@amd.com>
|
||||
M: Christian König <christian.koenig@amd.com>
|
||||
M: Xinhui Pan <Xinhui.Pan@amd.com>
|
||||
L: amd-gfx@lists.freedesktop.org
|
||||
S: Supported
|
||||
B: https://gitlab.freedesktop.org/drm/amd/-/issues
|
||||
|
|
|
@ -1638,6 +1638,13 @@ int amdgpu_device_resize_fb_bar(struct amdgpu_device *adev)
|
|||
if (amdgpu_sriov_vf(adev))
|
||||
return 0;
|
||||
|
||||
/* resizing on Dell G5 SE platforms causes problems with runtime pm */
|
||||
if ((amdgpu_runtime_pm != 0) &&
|
||||
adev->pdev->vendor == PCI_VENDOR_ID_ATI &&
|
||||
adev->pdev->device == 0x731f &&
|
||||
adev->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
|
||||
return 0;
|
||||
|
||||
/* PCI_EXT_CAP_ID_VNDR extended capability is located at 0x100 */
|
||||
if (!pci_find_ext_capability(adev->pdev, PCI_EXT_CAP_ID_VNDR))
|
||||
DRM_WARN("System can't access extended configuration space, please check!!\n");
|
||||
|
|
|
@ -1638,22 +1638,19 @@ static ssize_t amdgpu_gfx_set_enforce_isolation(struct device *dev,
|
|||
}
|
||||
|
||||
mutex_lock(&adev->enforce_isolation_mutex);
|
||||
|
||||
for (i = 0; i < num_partitions; i++) {
|
||||
if (adev->enforce_isolation[i] && !partition_values[i]) {
|
||||
if (adev->enforce_isolation[i] && !partition_values[i])
|
||||
/* Going from enabled to disabled */
|
||||
amdgpu_vmid_free_reserved(adev, AMDGPU_GFXHUB(i));
|
||||
amdgpu_mes_set_enforce_isolation(adev, i, false);
|
||||
} else if (!adev->enforce_isolation[i] && partition_values[i]) {
|
||||
else if (!adev->enforce_isolation[i] && partition_values[i])
|
||||
/* Going from disabled to enabled */
|
||||
amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(i));
|
||||
amdgpu_mes_set_enforce_isolation(adev, i, true);
|
||||
}
|
||||
adev->enforce_isolation[i] = partition_values[i];
|
||||
}
|
||||
|
||||
mutex_unlock(&adev->enforce_isolation_mutex);
|
||||
|
||||
amdgpu_mes_update_enforce_isolation(adev);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
||||
|
|
|
@ -1681,7 +1681,8 @@ bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev)
|
|||
}
|
||||
|
||||
/* Fix me -- node_id is used to identify the correct MES instances in the future */
|
||||
int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable)
|
||||
static int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev,
|
||||
uint32_t node_id, bool enable)
|
||||
{
|
||||
struct mes_misc_op_input op_input = {0};
|
||||
int r;
|
||||
|
@ -1703,6 +1704,23 @@ error:
|
|||
return r;
|
||||
}
|
||||
|
||||
int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev)
|
||||
{
|
||||
int i, r = 0;
|
||||
|
||||
if (adev->enable_mes && adev->gfx.enable_cleaner_shader) {
|
||||
mutex_lock(&adev->enforce_isolation_mutex);
|
||||
for (i = 0; i < (adev->xcp_mgr ? adev->xcp_mgr->num_xcps : 1); i++) {
|
||||
if (adev->enforce_isolation[i])
|
||||
r |= amdgpu_mes_set_enforce_isolation(adev, i, true);
|
||||
else
|
||||
r |= amdgpu_mes_set_enforce_isolation(adev, i, false);
|
||||
}
|
||||
mutex_unlock(&adev->enforce_isolation_mutex);
|
||||
}
|
||||
return r;
|
||||
}
|
||||
|
||||
#if defined(CONFIG_DEBUG_FS)
|
||||
|
||||
static int amdgpu_debugfs_mes_event_log_show(struct seq_file *m, void *unused)
|
||||
|
|
|
@ -534,6 +534,6 @@ static inline void amdgpu_mes_unlock(struct amdgpu_mes *mes)
|
|||
|
||||
bool amdgpu_mes_suspend_resume_all_supported(struct amdgpu_device *adev);
|
||||
|
||||
int amdgpu_mes_set_enforce_isolation(struct amdgpu_device *adev, uint32_t node_id, bool enable);
|
||||
int amdgpu_mes_update_enforce_isolation(struct amdgpu_device *adev);
|
||||
|
||||
#endif /* __AMDGPU_MES_H__ */
|
||||
|
|
|
@ -2281,7 +2281,7 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo,
|
|||
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
|
||||
struct amdgpu_res_cursor cursor;
|
||||
u64 addr;
|
||||
int r;
|
||||
int r = 0;
|
||||
|
||||
if (!adev->mman.buffer_funcs_enabled)
|
||||
return -EINVAL;
|
||||
|
|
|
@ -1633,6 +1633,10 @@ static int mes_v11_0_hw_init(struct amdgpu_ip_block *ip_block)
|
|||
goto failure;
|
||||
}
|
||||
|
||||
r = amdgpu_mes_update_enforce_isolation(adev);
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
out:
|
||||
/*
|
||||
* Disable KIQ ring usage from the driver once MES is enabled.
|
||||
|
|
|
@ -1743,6 +1743,10 @@ static int mes_v12_0_hw_init(struct amdgpu_ip_block *ip_block)
|
|||
goto failure;
|
||||
}
|
||||
|
||||
r = amdgpu_mes_update_enforce_isolation(adev);
|
||||
if (r)
|
||||
goto failure;
|
||||
|
||||
out:
|
||||
/*
|
||||
* Disable KIQ ring usage from the driver once MES is enabled.
|
||||
|
|
|
@ -107,6 +107,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
|
|||
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
|
||||
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
|
||||
|
||||
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
|
||||
|
@ -167,10 +169,10 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
|
|||
|
||||
m = get_mqd(mqd);
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
|
||||
m->cp_hqd_pq_control |=
|
||||
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
|
||||
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
|
||||
|
||||
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
|
||||
|
|
|
@ -154,6 +154,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
|
|||
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
|
||||
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
|
||||
|
||||
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
|
||||
|
@ -221,10 +223,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
|
|||
|
||||
m = get_mqd(mqd);
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
|
||||
m->cp_hqd_pq_control |=
|
||||
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
|
||||
|
||||
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
|
||||
|
|
|
@ -121,6 +121,8 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
|
|||
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
|
||||
0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
|
||||
|
||||
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
|
||||
|
@ -184,10 +186,9 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
|
|||
|
||||
m = get_mqd(mqd);
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
|
||||
m->cp_hqd_pq_control |=
|
||||
ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
|
||||
|
||||
m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
|
||||
|
|
|
@ -183,6 +183,9 @@ static void init_mqd(struct mqd_manager *mm, void **mqd,
|
|||
m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
|
||||
0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK;
|
||||
|
||||
m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
|
||||
|
||||
m->cp_mqd_base_addr_lo = lower_32_bits(addr);
|
||||
|
@ -245,7 +248,7 @@ static void update_mqd(struct mqd_manager *mm, void *mqd,
|
|||
|
||||
m = get_mqd(mqd);
|
||||
|
||||
m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
|
||||
m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK;
|
||||
m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
|
||||
pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
|
||||
|
||||
|
|
|
@ -1618,75 +1618,130 @@ static bool dm_should_disable_stutter(struct pci_dev *pdev)
|
|||
return false;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id hpd_disconnect_quirk_table[] = {
|
||||
struct amdgpu_dm_quirks {
|
||||
bool aux_hpd_discon;
|
||||
bool support_edp0_on_dp1;
|
||||
};
|
||||
|
||||
static struct amdgpu_dm_quirks quirk_entries = {
|
||||
.aux_hpd_discon = false,
|
||||
.support_edp0_on_dp1 = false
|
||||
};
|
||||
|
||||
static int edp0_on_dp1_callback(const struct dmi_system_id *id)
|
||||
{
|
||||
quirk_entries.support_edp0_on_dp1 = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int aux_hpd_discon_callback(const struct dmi_system_id *id)
|
||||
{
|
||||
quirk_entries.aux_hpd_discon = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct dmi_system_id dmi_quirk_table[] = {
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = aux_hpd_discon_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = edp0_on_dp1_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP Elite mt645 G8 Mobile Thin Client"),
|
||||
},
|
||||
},
|
||||
{
|
||||
.callback = edp0_on_dp1_callback,
|
||||
.matches = {
|
||||
DMI_MATCH(DMI_SYS_VENDOR, "HP"),
|
||||
DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 665 16 inch G11 Notebook PC"),
|
||||
},
|
||||
},
|
||||
{}
|
||||
/* TODO: refactor this from a fixed table to a dynamic option */
|
||||
};
|
||||
|
||||
static void retrieve_dmi_info(struct amdgpu_display_manager *dm)
|
||||
static void retrieve_dmi_info(struct amdgpu_display_manager *dm, struct dc_init_data *init_data)
|
||||
{
|
||||
const struct dmi_system_id *dmi_id;
|
||||
int dmi_id;
|
||||
struct drm_device *dev = dm->ddev;
|
||||
|
||||
dm->aux_hpd_discon_quirk = false;
|
||||
init_data->flags.support_edp0_on_dp1 = false;
|
||||
|
||||
dmi_id = dmi_first_match(hpd_disconnect_quirk_table);
|
||||
if (dmi_id) {
|
||||
dmi_id = dmi_check_system(dmi_quirk_table);
|
||||
|
||||
if (!dmi_id)
|
||||
return;
|
||||
|
||||
if (quirk_entries.aux_hpd_discon) {
|
||||
dm->aux_hpd_discon_quirk = true;
|
||||
DRM_INFO("aux_hpd_discon_quirk attached\n");
|
||||
drm_info(dev, "aux_hpd_discon_quirk attached\n");
|
||||
}
|
||||
if (quirk_entries.support_edp0_on_dp1) {
|
||||
init_data->flags.support_edp0_on_dp1 = true;
|
||||
drm_info(dev, "aux_hpd_discon_quirk attached\n");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1994,7 +2049,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
|
|||
if (amdgpu_ip_version(adev, DCE_HWIP, 0) >= IP_VERSION(3, 0, 0))
|
||||
init_data.num_virtual_links = 1;
|
||||
|
||||
retrieve_dmi_info(&adev->dm);
|
||||
retrieve_dmi_info(&adev->dm, &init_data);
|
||||
|
||||
if (adev->dm.bb_from_dmub)
|
||||
init_data.bb_from_dmub = adev->dm.bb_from_dmub;
|
||||
|
@ -7240,8 +7295,14 @@ static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector)
|
|||
struct dc_link *dc_link = aconnector->dc_link;
|
||||
struct dc_sink *dc_em_sink = aconnector->dc_em_sink;
|
||||
const struct drm_edid *drm_edid;
|
||||
struct i2c_adapter *ddc;
|
||||
|
||||
drm_edid = drm_edid_read(connector);
|
||||
if (dc_link && dc_link->aux_mode)
|
||||
ddc = &aconnector->dm_dp_aux.aux.ddc;
|
||||
else
|
||||
ddc = &aconnector->i2c->base;
|
||||
|
||||
drm_edid = drm_edid_read_ddc(connector, ddc);
|
||||
drm_edid_connector_update(connector, drm_edid);
|
||||
if (!drm_edid) {
|
||||
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
|
||||
|
@ -7286,14 +7347,21 @@ static int get_modes(struct drm_connector *connector)
|
|||
static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
|
||||
{
|
||||
struct drm_connector *connector = &aconnector->base;
|
||||
struct dc_link *dc_link = aconnector->dc_link;
|
||||
struct dc_sink_init_data init_params = {
|
||||
.link = aconnector->dc_link,
|
||||
.sink_signal = SIGNAL_TYPE_VIRTUAL
|
||||
};
|
||||
const struct drm_edid *drm_edid;
|
||||
const struct edid *edid;
|
||||
struct i2c_adapter *ddc;
|
||||
|
||||
drm_edid = drm_edid_read(connector);
|
||||
if (dc_link && dc_link->aux_mode)
|
||||
ddc = &aconnector->dm_dp_aux.aux.ddc;
|
||||
else
|
||||
ddc = &aconnector->i2c->base;
|
||||
|
||||
drm_edid = drm_edid_read_ddc(connector, ddc);
|
||||
drm_edid_connector_update(connector, drm_edid);
|
||||
if (!drm_edid) {
|
||||
DRM_ERROR("No EDID found on connector: %s.\n", connector->name);
|
||||
|
|
|
@ -894,6 +894,7 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
|||
struct drm_device *dev = adev_to_drm(adev);
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_list_iter iter;
|
||||
int i;
|
||||
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
|
@ -920,6 +921,12 @@ void amdgpu_dm_hpd_init(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
|
||||
/* Update reference counts for HPDs */
|
||||
for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
|
||||
if (amdgpu_irq_get(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
|
||||
drm_err(dev, "DM_IRQ: Failed get HPD for source=%d)!\n", i);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -935,6 +942,7 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
|
|||
struct drm_device *dev = adev_to_drm(adev);
|
||||
struct drm_connector *connector;
|
||||
struct drm_connector_list_iter iter;
|
||||
int i;
|
||||
|
||||
drm_connector_list_iter_begin(dev, &iter);
|
||||
drm_for_each_connector_iter(connector, &iter) {
|
||||
|
@ -960,4 +968,10 @@ void amdgpu_dm_hpd_fini(struct amdgpu_device *adev)
|
|||
}
|
||||
}
|
||||
drm_connector_list_iter_end(&iter);
|
||||
|
||||
/* Update reference counts for HPDs */
|
||||
for (i = DC_IRQ_SOURCE_HPD1; i <= adev->mode_info.num_hpd; i++) {
|
||||
if (amdgpu_irq_put(adev, &adev->hpd_irq, i - DC_IRQ_SOURCE_HPD1))
|
||||
drm_err(dev, "DM_IRQ: Failed put HPD for source=%d!\n", i);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,7 +54,8 @@ static bool link_supports_psrsu(struct dc_link *link)
|
|||
if (amdgpu_dc_debug_mask & DC_DISABLE_PSR_SU)
|
||||
return false;
|
||||
|
||||
return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub);
|
||||
/* Temporarily disable PSR-SU to avoid glitches */
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
|
|
|
@ -3042,6 +3042,7 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
|
|||
if (!amdgpu_dpm)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
kv_dpm_setup_asic(adev);
|
||||
ret = kv_dpm_enable(adev);
|
||||
if (ret)
|
||||
|
@ -3049,6 +3050,8 @@ static int kv_dpm_hw_init(struct amdgpu_ip_block *ip_block)
|
|||
else
|
||||
adev->pm.dpm_enabled = true;
|
||||
amdgpu_legacy_dpm_compute_clocks(adev);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -3066,32 +3069,42 @@ static int kv_dpm_suspend(struct amdgpu_ip_block *ip_block)
|
|||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_work_sync(&adev->pm.dpm.thermal.work);
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm_enabled = false;
|
||||
/* disable dpm */
|
||||
kv_dpm_disable(adev);
|
||||
/* reset the power state */
|
||||
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kv_dpm_resume(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
if (!amdgpu_dpm)
|
||||
return 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
/* asic init will reset to the boot state */
|
||||
kv_dpm_setup_asic(adev);
|
||||
ret = kv_dpm_enable(adev);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
adev->pm.dpm_enabled = false;
|
||||
else
|
||||
} else {
|
||||
adev->pm.dpm_enabled = true;
|
||||
if (adev->pm.dpm_enabled)
|
||||
amdgpu_legacy_dpm_compute_clocks(adev);
|
||||
}
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool kv_dpm_is_idle(void *handle)
|
||||
|
|
|
@ -1009,9 +1009,12 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
|
|||
enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
|
||||
int temp, size = sizeof(temp);
|
||||
|
||||
if (!adev->pm.dpm_enabled)
|
||||
return;
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
|
||||
if (!adev->pm.dpm_enabled) {
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
return;
|
||||
}
|
||||
if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
|
||||
AMDGPU_PP_SENSOR_GPU_TEMP,
|
||||
(void *)&temp,
|
||||
|
@ -1033,4 +1036,5 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
|
|||
adev->pm.dpm.state = dpm_state;
|
||||
|
||||
amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
|
|
@ -7786,6 +7786,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
|
|||
if (!amdgpu_dpm)
|
||||
return 0;
|
||||
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
si_dpm_setup_asic(adev);
|
||||
ret = si_dpm_enable(adev);
|
||||
if (ret)
|
||||
|
@ -7793,6 +7794,7 @@ static int si_dpm_hw_init(struct amdgpu_ip_block *ip_block)
|
|||
else
|
||||
adev->pm.dpm_enabled = true;
|
||||
amdgpu_legacy_dpm_compute_clocks(adev);
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
@ -7810,32 +7812,44 @@ static int si_dpm_suspend(struct amdgpu_ip_block *ip_block)
|
|||
{
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
cancel_work_sync(&adev->pm.dpm.thermal.work);
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
adev->pm.dpm_enabled = false;
|
||||
/* disable dpm */
|
||||
si_dpm_disable(adev);
|
||||
/* reset the power state */
|
||||
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int si_dpm_resume(struct amdgpu_ip_block *ip_block)
|
||||
{
|
||||
int ret;
|
||||
int ret = 0;
|
||||
struct amdgpu_device *adev = ip_block->adev;
|
||||
|
||||
if (adev->pm.dpm_enabled) {
|
||||
if (!amdgpu_dpm)
|
||||
return 0;
|
||||
|
||||
if (!adev->pm.dpm_enabled) {
|
||||
/* asic init will reset to the boot state */
|
||||
mutex_lock(&adev->pm.mutex);
|
||||
si_dpm_setup_asic(adev);
|
||||
ret = si_dpm_enable(adev);
|
||||
if (ret)
|
||||
if (ret) {
|
||||
adev->pm.dpm_enabled = false;
|
||||
else
|
||||
} else {
|
||||
adev->pm.dpm_enabled = true;
|
||||
if (adev->pm.dpm_enabled)
|
||||
amdgpu_legacy_dpm_compute_clocks(adev);
|
||||
}
|
||||
mutex_unlock(&adev->pm.mutex);
|
||||
}
|
||||
return 0;
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static bool si_dpm_is_idle(void *handle)
|
||||
|
|
Loading…
Reference in New Issue