| /* |
| * Copyright 2015 Advanced Micro Devices, Inc. |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a |
| * copy of this software and associated documentation files (the "Software"), |
| * to deal in the Software without restriction, including without limitation |
| * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| * and/or sell copies of the Software, and to permit persons to whom the |
| * Software is furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice shall be included in |
| * all copies or substantial portions of the Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| * OTHER DEALINGS IN THE SOFTWARE. |
| * |
| * Authors: AMD |
| * |
| */ |
| |
| /* The caprices of the preprocessor require that this be declared right here */ |
| #define CREATE_TRACE_POINTS |
| |
| #include "dm_services_types.h" |
| #include "dc.h" |
| #include "dc_link_dp.h" |
| #include "link_enc_cfg.h" |
| #include "dc/inc/core_types.h" |
| #include "dal_asic_id.h" |
| #include "dmub/dmub_srv.h" |
| #include "dc/inc/hw/dmcu.h" |
| #include "dc/inc/hw/abm.h" |
| #include "dc/dc_dmub_srv.h" |
| #include "dc/dc_edid_parser.h" |
| #include "dc/dc_stat.h" |
| #include "amdgpu_dm_trace.h" |
| |
| #include "vid.h" |
| #include "amdgpu.h" |
| #include "amdgpu_display.h" |
| #include "amdgpu_ucode.h" |
| #include "atom.h" |
| #include "amdgpu_dm.h" |
| #include "amdgpu_dm_plane.h" |
| #include "amdgpu_dm_crtc.h" |
| #ifdef CONFIG_DRM_AMD_DC_HDCP |
| #include "amdgpu_dm_hdcp.h" |
| #include <drm/display/drm_hdcp_helper.h> |
| #endif |
| #include "amdgpu_pm.h" |
| #include "amdgpu_atombios.h" |
| |
| #include "amd_shared.h" |
| #include "amdgpu_dm_irq.h" |
| #include "dm_helpers.h" |
| #include "amdgpu_dm_mst_types.h" |
| #if defined(CONFIG_DEBUG_FS) |
| #include "amdgpu_dm_debugfs.h" |
| #endif |
| #include "amdgpu_dm_psr.h" |
| |
| #include "ivsrcid/ivsrcid_vislands30.h" |
| |
| #include "i2caux_interface.h" |
| #include <linux/module.h> |
| #include <linux/moduleparam.h> |
| #include <linux/types.h> |
| #include <linux/pm_runtime.h> |
| #include <linux/pci.h> |
| #include <linux/firmware.h> |
| #include <linux/component.h> |
| #include <linux/dmi.h> |
| |
| #include <drm/display/drm_dp_mst_helper.h> |
| #include <drm/display/drm_hdmi_helper.h> |
| #include <drm/drm_atomic.h> |
| #include <drm/drm_atomic_uapi.h> |
| #include <drm/drm_atomic_helper.h> |
| #include <drm/drm_blend.h> |
| #include <drm/drm_fourcc.h> |
| #include <drm/drm_edid.h> |
| #include <drm/drm_vblank.h> |
| #include <drm/drm_audio_component.h> |
| #include <drm/drm_gem_atomic_helper.h> |
| #include <drm/drm_plane_helper.h> |
| |
| #include <acpi/video.h> |
| |
| #include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" |
| |
| #include "dcn/dcn_1_0_offset.h" |
| #include "dcn/dcn_1_0_sh_mask.h" |
| #include "soc15_hw_ip.h" |
| #include "soc15_common.h" |
| #include "vega10_ip_offset.h" |
| |
| #include "gc/gc_11_0_0_offset.h" |
| #include "gc/gc_11_0_0_sh_mask.h" |
| |
| #include "modules/inc/mod_freesync.h" |
| #include "modules/power/power_helpers.h" |
| #include "modules/inc/mod_info_packet.h" |
| |
| #define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" |
| MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); |
| #define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" |
| MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); |
| #define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" |
| MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); |
| #define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin" |
| MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB); |
| #define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin" |
| MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB); |
| #define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin" |
| MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); |
| #define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin" |
| MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); |
| #define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" |
| MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); |
| #define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin" |
| MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB); |
| #define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin" |
| MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); |
| #define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" |
| MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); |
| |
| #define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin" |
| MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB); |
| #define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin" |
| MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB); |
| |
| #define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" |
| MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); |
| |
| #define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" |
| MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); |
| |
| /* Number of bytes in PSP header for firmware. */ |
| #define PSP_HEADER_BYTES 0x100 |
| |
| /* Number of bytes in PSP footer for firmware. */ |
| #define PSP_FOOTER_BYTES 0x100 |
| |
| /** |
| * DOC: overview |
| * |
| * The AMDgpu display manager, **amdgpu_dm** (or even simpler, |
| * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM |
| * requests into DC requests, and DC responses into DRM responses. |
| * |
| * The root control structure is &struct amdgpu_display_manager. |
| */ |
| |
| /* basic init/fini API */ |
| static int amdgpu_dm_init(struct amdgpu_device *adev); |
| static void amdgpu_dm_fini(struct amdgpu_device *adev); |
| static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); |
| |
| static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) |
| { |
| switch (link->dpcd_caps.dongle_type) { |
| case DISPLAY_DONGLE_NONE: |
| return DRM_MODE_SUBCONNECTOR_Native; |
| case DISPLAY_DONGLE_DP_VGA_CONVERTER: |
| return DRM_MODE_SUBCONNECTOR_VGA; |
| case DISPLAY_DONGLE_DP_DVI_CONVERTER: |
| case DISPLAY_DONGLE_DP_DVI_DONGLE: |
| return DRM_MODE_SUBCONNECTOR_DVID; |
| case DISPLAY_DONGLE_DP_HDMI_CONVERTER: |
| case DISPLAY_DONGLE_DP_HDMI_DONGLE: |
| return DRM_MODE_SUBCONNECTOR_HDMIA; |
| case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: |
| default: |
| return DRM_MODE_SUBCONNECTOR_Unknown; |
| } |
| } |
| |
| static void update_subconnector_property(struct amdgpu_dm_connector *aconnector) |
| { |
| struct dc_link *link = aconnector->dc_link; |
| struct drm_connector *connector = &aconnector->base; |
| enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; |
| |
| if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) |
| return; |
| |
| if (aconnector->dc_sink) |
| subconnector = get_subconnector_type(link); |
| |
| drm_object_property_set_value(&connector->base, |
| connector->dev->mode_config.dp_subconnector_property, |
| subconnector); |
| } |
| |
| /* |
| * initializes drm_device display related structures, based on the information |
| * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, |
| * drm_encoder, drm_mode_config |
| * |
| * Returns 0 on success |
| */ |
| static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); |
| /* removes and deallocates the drm structures, created by the above function */ |
| static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); |
| |
| static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, |
| struct amdgpu_dm_connector *amdgpu_dm_connector, |
| uint32_t link_index, |
| struct amdgpu_encoder *amdgpu_encoder); |
| static int amdgpu_dm_encoder_init(struct drm_device *dev, |
| struct amdgpu_encoder *aencoder, |
| uint32_t link_index); |
| |
| static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); |
| |
| static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); |
| |
| static int amdgpu_dm_atomic_check(struct drm_device *dev, |
| struct drm_atomic_state *state); |
| |
| static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); |
| static void handle_hpd_rx_irq(void *param); |
| |
| static bool |
| is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, |
| struct drm_crtc_state *new_crtc_state); |
| /* |
| * dm_vblank_get_counter |
| * |
| * @brief |
| * Get counter for number of vertical blanks |
| * |
| * @param |
| * struct amdgpu_device *adev - [in] desired amdgpu device |
| * int disp_idx - [in] which CRTC to get the counter from |
| * |
| * @return |
| * Counter for vertical blanks |
| */ |
| static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) |
| { |
| if (crtc >= adev->mode_info.num_crtc) |
| return 0; |
| else { |
| struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; |
| |
| if (acrtc->dm_irq_params.stream == NULL) { |
| DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", |
| crtc); |
| return 0; |
| } |
| |
| return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); |
| } |
| } |
| |
| static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, |
| u32 *vbl, u32 *position) |
| { |
| uint32_t v_blank_start, v_blank_end, h_position, v_position; |
| |
| if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) |
| return -EINVAL; |
| else { |
| struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc]; |
| |
| if (acrtc->dm_irq_params.stream == NULL) { |
| DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", |
| crtc); |
| return 0; |
| } |
| |
| /* |
| * TODO rework base driver to use values directly. |
| * for now parse it back into reg-format |
| */ |
| dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, |
| &v_blank_start, |
| &v_blank_end, |
| &h_position, |
| &v_position); |
| |
| *position = v_position | (h_position << 16); |
| *vbl = v_blank_start | (v_blank_end << 16); |
| } |
| |
| return 0; |
| } |
| |
| static bool dm_is_idle(void *handle) |
| { |
| /* XXX todo */ |
| return true; |
| } |
| |
| static int dm_wait_for_idle(void *handle) |
| { |
| /* XXX todo */ |
| return 0; |
| } |
| |
| static bool dm_check_soft_reset(void *handle) |
| { |
| return false; |
| } |
| |
| static int dm_soft_reset(void *handle) |
| { |
| /* XXX todo */ |
| return 0; |
| } |
| |
| static struct amdgpu_crtc * |
| get_crtc_by_otg_inst(struct amdgpu_device *adev, |
| int otg_inst) |
| { |
| struct drm_device *dev = adev_to_drm(adev); |
| struct drm_crtc *crtc; |
| struct amdgpu_crtc *amdgpu_crtc; |
| |
| if (WARN_ON(otg_inst == -1)) |
| return adev->mode_info.crtcs[0]; |
| |
| list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { |
| amdgpu_crtc = to_amdgpu_crtc(crtc); |
| |
| if (amdgpu_crtc->otg_inst == otg_inst) |
| return amdgpu_crtc; |
| } |
| |
| return NULL; |
| } |
| |
| static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, |
| struct dm_crtc_state *new_state) |
| { |
| if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) |
| return true; |
| else if (amdgpu_dm_vrr_active(old_state) != amdgpu_dm_vrr_active(new_state)) |
| return true; |
| else |
| return false; |
| } |
| |
| /** |
| * dm_pflip_high_irq() - Handle pageflip interrupt |
| * @interrupt_params: ignored |
| * |
| * Handles the pageflip interrupt by notifying all interested parties |
| * that the pageflip has been completed. |
| */ |
| static void dm_pflip_high_irq(void *interrupt_params) |
| { |
| struct amdgpu_crtc *amdgpu_crtc; |
| struct common_irq_params *irq_params = interrupt_params; |
| struct amdgpu_device *adev = irq_params->adev; |
| unsigned long flags; |
| struct drm_pending_vblank_event *e; |
| uint32_t vpos, hpos, v_blank_start, v_blank_end; |
| bool vrr_active; |
| |
| amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); |
| |
| /* IRQ could occur when in initial stage */ |
| /* TODO work and BO cleanup */ |
| if (amdgpu_crtc == NULL) { |
| DC_LOG_PFLIP("CRTC is null, returning.\n"); |
| return; |
| } |
| |
| spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); |
| |
| if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED){ |
| DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p] \n", |
| amdgpu_crtc->pflip_status, |
| AMDGPU_FLIP_SUBMITTED, |
| amdgpu_crtc->crtc_id, |
| amdgpu_crtc); |
| spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); |
| return; |
| } |
| |
| /* page flip completed. */ |
| e = amdgpu_crtc->event; |
| amdgpu_crtc->event = NULL; |
| |
| WARN_ON(!e); |
| |
| vrr_active = amdgpu_dm_vrr_active_irq(amdgpu_crtc); |
| |
| /* Fixed refresh rate, or VRR scanout position outside front-porch? */ |
| if (!vrr_active || |
| !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start, |
| &v_blank_end, &hpos, &vpos) || |
| (vpos < v_blank_start)) { |
| /* Update to correct count and vblank timestamp if racing with |
| * vblank irq. This also updates to the correct vblank timestamp |
| * even in VRR mode, as scanout is past the front-porch atm. |
| */ |
| drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); |
| |
| /* Wake up userspace by sending the pageflip event with proper |
| * count and timestamp of vblank of flip completion. |
| */ |
| if (e) { |
| drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); |
| |
| /* Event sent, so done with vblank for this flip */ |
| drm_crtc_vblank_put(&amdgpu_crtc->base); |
| } |
| } else if (e) { |
| /* VRR active and inside front-porch: vblank count and |
| * timestamp for pageflip event will only be up to date after |
| * drm_crtc_handle_vblank() has been executed from late vblank |
| * irq handler after start of back-porch (vline 0). We queue the |
| * pageflip event for send-out by drm_crtc_handle_vblank() with |
| * updated timestamp and count, once it runs after us. |
| * |
| * We need to open-code this instead of using the helper |
| * drm_crtc_arm_vblank_event(), as that helper would |
| * call drm_crtc_accurate_vblank_count(), which we must |
| * not call in VRR mode while we are in front-porch! |
| */ |
| |
| /* sequence will be replaced by real count during send-out. */ |
| e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); |
| e->pipe = amdgpu_crtc->crtc_id; |
| |
| list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); |
| e = NULL; |
| } |
| |
| /* Keep track of vblank of this flip for flip throttling. We use the |
| * cooked hw counter, as that one incremented at start of this vblank |
| * of pageflip completion, so last_flip_vblank is the forbidden count |
| * for queueing new pageflips if vsync + VRR is enabled. |
| */ |
| amdgpu_crtc->dm_irq_params.last_flip_vblank = |
| amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); |
| |
| amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; |
| spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); |
| |
| DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", |
| amdgpu_crtc->crtc_id, amdgpu_crtc, |
| vrr_active, (int) !e); |
| } |
| |
| static void dm_vupdate_high_irq(void *interrupt_params) |
| { |
| struct common_irq_params *irq_params = interrupt_params; |
| struct amdgpu_device *adev = irq_params->adev; |
| struct amdgpu_crtc *acrtc; |
| struct drm_device *drm_dev; |
| struct drm_vblank_crtc *vblank; |
| ktime_t frame_duration_ns, previous_timestamp; |
| unsigned long flags; |
| int vrr_active; |
| |
| acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); |
| |
| if (acrtc) { |
| vrr_active = amdgpu_dm_vrr_active_irq(acrtc); |
| drm_dev = acrtc->base.dev; |
| vblank = &drm_dev->vblank[acrtc->base.index]; |
| previous_timestamp = atomic64_read(&irq_params->previous_timestamp); |
| frame_duration_ns = vblank->time - previous_timestamp; |
| |
| if (frame_duration_ns > 0) { |
| trace_amdgpu_refresh_rate_track(acrtc->base.index, |
| frame_duration_ns, |
| ktime_divns(NSEC_PER_SEC, frame_duration_ns)); |
| atomic64_set(&irq_params->previous_timestamp, vblank->time); |
| } |
| |
| DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n", |
| acrtc->crtc_id, |
| vrr_active); |
| |
| /* Core vblank handling is done here after end of front-porch in |
| * vrr mode, as vblank timestamping will give valid results |
| * while now done after front-porch. This will also deliver |
| * page-flip completion events that have been queued to us |
| * if a pageflip happened inside front-porch. |
| */ |
| if (vrr_active) { |
| dm_crtc_handle_vblank(acrtc); |
| |
| /* BTR processing for pre-DCE12 ASICs */ |
| if (acrtc->dm_irq_params.stream && |
| adev->family < AMDGPU_FAMILY_AI) { |
| spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); |
| mod_freesync_handle_v_update( |
| adev->dm.freesync_module, |
| acrtc->dm_irq_params.stream, |
| &acrtc->dm_irq_params.vrr_params); |
| |
| dc_stream_adjust_vmin_vmax( |
| adev->dm.dc, |
| acrtc->dm_irq_params.stream, |
| &acrtc->dm_irq_params.vrr_params.adjust); |
| spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); |
| } |
| } |
| } |
| } |
| |
| /** |
| * dm_crtc_high_irq() - Handles CRTC interrupt |
| * @interrupt_params: used for determining the CRTC instance |
| * |
| * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK |
| * event handler. |
| */ |
| static void dm_crtc_high_irq(void *interrupt_params) |
| { |
| struct common_irq_params *irq_params = interrupt_params; |
| struct amdgpu_device *adev = irq_params->adev; |
| struct amdgpu_crtc *acrtc; |
| unsigned long flags; |
| int vrr_active; |
| |
| acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); |
| if (!acrtc) |
| return; |
| |
| vrr_active = amdgpu_dm_vrr_active_irq(acrtc); |
| |
| DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, |
| vrr_active, acrtc->dm_irq_params.active_planes); |
| |
| /** |
| * Core vblank handling at start of front-porch is only possible |
| * in non-vrr mode, as only there vblank timestamping will give |
| * valid results while done in front-porch. Otherwise defer it |
| * to dm_vupdate_high_irq after end of front-porch. |
| */ |
| if (!vrr_active) |
| dm_crtc_handle_vblank(acrtc); |
| |
| /** |
| * Following stuff must happen at start of vblank, for crc |
| * computation and below-the-range btr support in vrr mode. |
| */ |
| amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); |
| |
| /* BTR updates need to happen before VUPDATE on Vega and above. */ |
| if (adev->family < AMDGPU_FAMILY_AI) |
| return; |
| |
| spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); |
| |
| if (acrtc->dm_irq_params.stream && |
| acrtc->dm_irq_params.vrr_params.supported && |
| acrtc->dm_irq_params.freesync_config.state == |
| VRR_STATE_ACTIVE_VARIABLE) { |
| mod_freesync_handle_v_update(adev->dm.freesync_module, |
| acrtc->dm_irq_params.stream, |
| &acrtc->dm_irq_params.vrr_params); |
| |
| dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, |
| &acrtc->dm_irq_params.vrr_params.adjust); |
| } |
| |
| /* |
| * If there aren't any active_planes then DCH HUBP may be clock-gated. |
| * In that case, pageflip completion interrupts won't fire and pageflip |
| * completion events won't get delivered. Prevent this by sending |
| * pending pageflip events from here if a flip is still pending. |
| * |
| * If any planes are enabled, use dm_pflip_high_irq() instead, to |
| * avoid race conditions between flip programming and completion, |
| * which could cause too early flip completion events. |
| */ |
| if (adev->family >= AMDGPU_FAMILY_RV && |
| acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && |
| acrtc->dm_irq_params.active_planes == 0) { |
| if (acrtc->event) { |
| drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); |
| acrtc->event = NULL; |
| drm_crtc_vblank_put(&acrtc->base); |
| } |
| acrtc->pflip_status = AMDGPU_FLIP_NONE; |
| } |
| |
| spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); |
| } |
| |
| #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) |
| /** |
| * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for |
| * DCN generation ASICs |
| * @interrupt_params: interrupt parameters |
| * |
| * Used to set crc window/read out crc value at vertical line 0 position |
| */ |
| static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) |
| { |
| struct common_irq_params *irq_params = interrupt_params; |
| struct amdgpu_device *adev = irq_params->adev; |
| struct amdgpu_crtc *acrtc; |
| |
| acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); |
| |
| if (!acrtc) |
| return; |
| |
| amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); |
| } |
| #endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ |
| |
| /** |
| * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. |
| * @adev: amdgpu_device pointer |
| * @notify: dmub notification structure |
| * |
| * Dmub AUX or SET_CONFIG command completion processing callback |
| * Copies dmub notification to DM which is to be read by AUX command. |
| * issuing thread and also signals the event to wake up the thread. |
| */ |
| static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, |
| struct dmub_notification *notify) |
| { |
| if (adev->dm.dmub_notify) |
| memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); |
| if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) |
| complete(&adev->dm.dmub_aux_transfer_done); |
| } |
| |
| /** |
| * dmub_hpd_callback - DMUB HPD interrupt processing callback. |
| * @adev: amdgpu_device pointer |
| * @notify: dmub notification structure |
| * |
| * Dmub Hpd interrupt processing callback. Gets displayindex through the |
| * ink index and calls helper to do the processing. |
| */ |
| static void dmub_hpd_callback(struct amdgpu_device *adev, |
| struct dmub_notification *notify) |
| { |
| struct amdgpu_dm_connector *aconnector; |
| struct amdgpu_dm_connector *hpd_aconnector = NULL; |
| struct drm_connector *connector; |
| struct drm_connector_list_iter iter; |
| struct dc_link *link; |
| uint8_t link_index = 0; |
| struct drm_device *dev; |
| |
| if (adev == NULL) |
| return; |
| |
| if (notify == NULL) { |
| DRM_ERROR("DMUB HPD callback notification was NULL"); |
| return; |
| } |
| |
| if (notify->link_index > adev->dm.dc->link_count) { |
| DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); |
| return; |
| } |
| |
| link_index = notify->link_index; |
| link = adev->dm.dc->links[link_index]; |
| dev = adev->dm.ddev; |
| |
| drm_connector_list_iter_begin(dev, &iter); |
| drm_for_each_connector_iter(connector, &iter) { |
| aconnector = to_amdgpu_dm_connector(connector); |
| if (link && aconnector->dc_link == link) { |
| DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); |
| hpd_aconnector = aconnector; |
| break; |
| } |
| } |
| drm_connector_list_iter_end(&iter); |
| |
| if (hpd_aconnector) { |
| if (notify->type == DMUB_NOTIFICATION_HPD) |
| handle_hpd_irq_helper(hpd_aconnector); |
| else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) |
| handle_hpd_rx_irq(hpd_aconnector); |
| } |
| } |
| |
| /** |
| * register_dmub_notify_callback - Sets callback for DMUB notify |
| * @adev: amdgpu_device pointer |
| * @type: Type of dmub notification |
| * @callback: Dmub interrupt callback function |
| * @dmub_int_thread_offload: offload indicator |
| * |
| * API to register a dmub callback handler for a dmub notification |
| * Also sets indicator whether callback processing to be offloaded. |
| * to dmub interrupt handling thread |
| * Return: true if successfully registered, false if there is existing registration |
| */ |
| static bool register_dmub_notify_callback(struct amdgpu_device *adev, |
| enum dmub_notification_type type, |
| dmub_notify_interrupt_callback_t callback, |
| bool dmub_int_thread_offload) |
| { |
| if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { |
| adev->dm.dmub_callback[type] = callback; |
| adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; |
| } else |
| return false; |
| |
| return true; |
| } |
| |
| static void dm_handle_hpd_work(struct work_struct *work) |
| { |
| struct dmub_hpd_work *dmub_hpd_wrk; |
| |
| dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); |
| |
| if (!dmub_hpd_wrk->dmub_notify) { |
| DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); |
| return; |
| } |
| |
| if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { |
| dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, |
| dmub_hpd_wrk->dmub_notify); |
| } |
| |
| kfree(dmub_hpd_wrk->dmub_notify); |
| kfree(dmub_hpd_wrk); |
| |
| } |
| |
| #define DMUB_TRACE_MAX_READ 64 |
| /** |
| * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt |
| * @interrupt_params: used for determining the Outbox instance |
| * |
| * Handles the Outbox Interrupt |
| * event handler. |
| */ |
| static void dm_dmub_outbox1_low_irq(void *interrupt_params) |
| { |
| struct dmub_notification notify; |
| struct common_irq_params *irq_params = interrupt_params; |
| struct amdgpu_device *adev = irq_params->adev; |
| struct amdgpu_display_manager *dm = &adev->dm; |
| struct dmcub_trace_buf_entry entry = { 0 }; |
| uint32_t count = 0; |
| struct dmub_hpd_work *dmub_hpd_wrk; |
| struct dc_link *plink = NULL; |
| |
| if (dc_enable_dmub_notifications(adev->dm.dc) && |
| irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { |
| |
| do { |
| dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); |
| if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { |
| DRM_ERROR("DM: notify type %d invalid!", notify.type); |
| continue; |
| } |
| if (!dm->dmub_callback[notify.type]) { |
| DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type); |
| continue; |
| } |
| if (dm->dmub_thread_offload[notify.type] == true) { |
| dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); |
| if (!dmub_hpd_wrk) { |
| DRM_ERROR("Failed to allocate dmub_hpd_wrk"); |
| return; |
| } |
| dmub_hpd_wrk->dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_ATOMIC); |
| if (!dmub_hpd_wrk->dmub_notify) { |
| kfree(dmub_hpd_wrk); |
| DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); |
| return; |
| } |
| INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); |
| if (dmub_hpd_wrk->dmub_notify) |
| memcpy(dmub_hpd_wrk->dmub_notify, ¬ify, sizeof(struct dmub_notification)); |
| dmub_hpd_wrk->adev = adev; |
| if (notify.type == DMUB_NOTIFICATION_HPD) { |
| plink = adev->dm.dc->links[notify.link_index]; |
| if (plink) { |
| plink->hpd_status = |
| notify.hpd_status == DP_HPD_PLUG; |
| } |
| } |
| queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); |
| } else { |
| dm->dmub_callback[notify.type](adev, ¬ify); |
| } |
| } while (notify.pending_notification); |
| } |
| |
| |
| do { |
| if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { |
| trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, |
| entry.param0, entry.param1); |
| |
| DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", |
| entry.trace_code, entry.tick_count, entry.param0, entry.param1); |
| } else |
| break; |
| |
| count++; |
| |
| } while (count <= DMUB_TRACE_MAX_READ); |
| |
| if (count > DMUB_TRACE_MAX_READ) |
| DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); |
| } |
| |
| static int dm_set_clockgating_state(void *handle, |
| enum amd_clockgating_state state) |
| { |
| return 0; |
| } |
| |
| static int dm_set_powergating_state(void *handle, |
| enum amd_powergating_state state) |
| { |
| return 0; |
| } |
| |
| /* Prototypes of private functions */ |
| static int dm_early_init(void* handle); |
| |
| /* Allocate memory for FBC compressed data */ |
| static void amdgpu_dm_fbc_init(struct drm_connector *connector) |
| { |
| struct drm_device *dev = connector->dev; |
| struct amdgpu_device *adev = drm_to_adev(dev); |
| struct dm_compressor_info *compressor = &adev->dm.compressor; |
| struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); |
| struct drm_display_mode *mode; |
| unsigned long max_size = 0; |
| |
| if (adev->dm.dc->fbc_compressor == NULL) |
| return; |
| |
| if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) |
| return; |
| |
| if (compressor->bo_ptr) |
| return; |
| |
| |
| list_for_each_entry(mode, &connector->modes, head) { |
| if (max_size < mode->htotal * mode->vtotal) |
| max_size = mode->htotal * mode->vtotal; |
| } |
| |
| if (max_size) { |
| int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, |
| AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, |
| &compressor->gpu_addr, &compressor->cpu_addr); |
| |
| if (r) |
| DRM_ERROR("DM: Failed to initialize FBC\n"); |
| else { |
| adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; |
| DRM_INFO("DM: FBC alloc %lu\n", max_size*4); |
| } |
| |
| } |
| |
| } |
| |
| static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, |
| int pipe, bool *enabled, |
| unsigned char *buf, int max_bytes) |
| { |
| struct drm_device *dev = dev_get_drvdata(kdev); |
| struct amdgpu_device *adev = drm_to_adev(dev); |
| struct drm_connector *connector; |
| struct drm_connector_list_iter conn_iter; |
| struct amdgpu_dm_connector *aconnector; |
| int ret = 0; |
| |
| *enabled = false; |
| |
| mutex_lock(&adev->dm.audio_lock); |
| |
| drm_connector_list_iter_begin(dev, &conn_iter); |
| drm_for_each_connector_iter(connector, &conn_iter) { |
| aconnector = to_amdgpu_dm_connector(connector); |
| if (aconnector->audio_inst != port) |
| continue; |
| |
| *enabled = true; |
| ret = drm_eld_size(connector->eld); |
| memcpy(buf, connector->eld, min(max_bytes, ret)); |
| |
| break; |
| } |
| drm_connector_list_iter_end(&conn_iter); |
| |
| mutex_unlock(&adev->dm.audio_lock); |
| |
| DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); |
| |
| return ret; |
| } |
| |
| static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { |
| .get_eld = amdgpu_dm_audio_component_get_eld, |
| }; |
| |
| static int amdgpu_dm_audio_component_bind(struct device *kdev, |
| struct device *hda_kdev, void *data) |
| { |
| struct drm_device *dev = dev_get_drvdata(kdev); |
| struct amdgpu_device *adev = drm_to_adev(dev); |
| struct drm_audio_component *acomp = data; |
| |
| acomp->ops = &amdgpu_dm_audio_component_ops; |
| acomp->dev = kdev; |
| adev->dm.audio_component = acomp; |
| |
| return 0; |
| } |
| |
| static void amdgpu_dm_audio_component_unbind(struct device *kdev, |
| struct device *hda_kdev, void *data) |
| { |
| struct drm_device *dev = dev_get_drvdata(kdev); |
| struct amdgpu_device *adev = drm_to_adev(dev); |
| struct drm_audio_component *acomp = data; |
| |
| acomp->ops = NULL; |
| acomp->dev = NULL; |
| adev->dm.audio_component = NULL; |
| } |
| |
| static const struct component_ops amdgpu_dm_audio_component_bind_ops = { |
| .bind = amdgpu_dm_audio_component_bind, |
| .unbind = amdgpu_dm_audio_component_unbind, |
| }; |
| |
| static int amdgpu_dm_audio_init(struct amdgpu_device *adev) |
| { |
| int i, ret; |
| |
| if (!amdgpu_audio) |
| return 0; |
| |
| adev->mode_info.audio.enabled = true; |
| |
| adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; |
| |
| for (i = 0; i < adev->mode_info.audio.num_pins; i++) { |
| adev->mode_info.audio.pin[i].channels = -1; |
| adev->mode_info.audio.pin[i].rate = -1; |
| adev->mode_info.audio.pin[i].bits_per_sample = -1; |
| adev->mode_info.audio.pin[i].status_bits = 0; |
| adev->mode_info.audio.pin[i].category_code = 0; |
| adev->mode_info.audio.pin[i].connected = false; |
| adev->mode_info.audio.pin[i].id = |
| adev->dm.dc->res_pool->audios[i]->inst; |
| adev->mode_info.audio.pin[i].offset = 0; |
| } |
| |
| ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); |
| if (ret < 0) |
| return ret; |
| |
| adev->dm.audio_registered = true; |
| |
| return 0; |
| } |
| |
| static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) |
| { |
| if (!amdgpu_audio) |
| return; |
| |
| if (!adev->mode_info.audio.enabled) |
| return; |
| |
| if (adev->dm.audio_registered) { |
| component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); |
| adev->dm.audio_registered = false; |
| } |
| |
| /* TODO: Disable audio? */ |
| |
| adev->mode_info.audio.enabled = false; |
| } |
| |
| static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) |
| { |
| struct drm_audio_component *acomp = adev->dm.audio_component; |
| |
| if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { |
| DRM_DEBUG_KMS("Notify ELD: %d\n", pin); |
| |
| acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, |
| pin, -1); |
| } |
| } |
| |
| static int dm_dmub_hw_init(struct amdgpu_device *adev) |
| { |
| const struct dmcub_firmware_header_v1_0 *hdr; |
| struct dmub_srv *dmub_srv = adev->dm.dmub_srv; |
| struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; |
| const struct firmware *dmub_fw = adev->dm.dmub_fw; |
| struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; |
| struct abm *abm = adev->dm.dc->res_pool->abm; |
| struct dmub_srv_hw_params hw_params; |
| enum dmub_status status; |
| const unsigned char *fw_inst_const, *fw_bss_data; |
| uint32_t i, fw_inst_const_size, fw_bss_data_size; |
| bool has_hw_support; |
| |
| if (!dmub_srv) |
| /* DMUB isn't supported on the ASIC. */ |
| return 0; |
| |
| if (!fb_info) { |
| DRM_ERROR("No framebuffer info for DMUB service.\n"); |
| return -EINVAL; |
| } |
| |
| if (!dmub_fw) { |
| /* Firmware required for DMUB support. */ |
| DRM_ERROR("No firmware provided for DMUB.\n"); |
| return -EINVAL; |
| } |
| |
| status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); |
| if (status != DMUB_STATUS_OK) { |
| DRM_ERROR("Error checking HW support for DMUB: %d\n", status); |
| return -EINVAL; |
| } |
| |
| if (!has_hw_support) { |
| DRM_INFO("DMUB unsupported on ASIC\n"); |
| return 0; |
| } |
| |
| /* Reset DMCUB if it was previously running - before we overwrite its memory. */ |
| status = dmub_srv_hw_reset(dmub_srv); |
| if (status != DMUB_STATUS_OK) |
| DRM_WARN("Error resetting DMUB HW: %d\n", status); |
| |
| hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; |
| |
| fw_inst_const = dmub_fw->data + |
| le32_to_cpu(hdr->header.ucode_array_offset_bytes) + |
| PSP_HEADER_BYTES; |
| |
| fw_bss_data = dmub_fw->data + |
| le32_to_cpu(hdr->header.ucode_array_offset_bytes) + |
| le32_to_cpu(hdr->inst_const_bytes); |
| |
| /* Copy firmware and bios info into FB memory. */ |
| fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - |
| PSP_HEADER_BYTES - PSP_FOOTER_BYTES; |
| |
| fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); |
| |
| /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, |
| * amdgpu_ucode_init_single_fw will load dmub firmware |
| * fw_inst_const part to cw0; otherwise, the firmware back door load |
| * will be done by dm_dmub_hw_init |
| */ |
| if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
| memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, |
| fw_inst_const_size); |
| } |
| |
| if (fw_bss_data_size) |
| memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, |
| fw_bss_data, fw_bss_data_size); |
| |
| /* Copy firmware bios info into FB memory. */ |
| memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, |
| adev->bios_size); |
| |
| /* Reset regions that need to be reset. */ |
| memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, |
| fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); |
| |
| memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, |
| fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); |
| |
| memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, |
| fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); |
| |
| /* Initialize hardware. */ |
| memset(&hw_params, 0, sizeof(hw_params)); |
| hw_params.fb_base = adev->gmc.fb_start; |
| hw_params.fb_offset = adev->vm_manager.vram_base_offset; |
| |
| /* backdoor load firmware and trigger dmub running */ |
| if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) |
| hw_params.load_inst_const = true; |
| |
| if (dmcu) |
| hw_params.psp_version = dmcu->psp_version; |
| |
| for (i = 0; i < fb_info->num_fb; ++i) |
| hw_params.fb[i] = &fb_info->fb[i]; |
| |
| switch (adev->ip_versions[DCE_HWIP][0]) { |
| case IP_VERSION(3, 1, 3): |
| case IP_VERSION(3, 1, 4): |
| hw_params.dpia_supported = true; |
| hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; |
| break; |
| default: |
| break; |
| } |
| |
| status = dmub_srv_hw_init(dmub_srv, &hw_params); |
| if (status != DMUB_STATUS_OK) { |
| DRM_ERROR("Error initializing DMUB HW: %d\n", status); |
| return -EINVAL; |
| } |
| |
| /* Wait for firmware load to finish. */ |
| status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); |
| if (status != DMUB_STATUS_OK) |
| DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); |
| |
| /* Init DMCU and ABM if available. */ |
| if (dmcu && abm) { |
| dmcu->funcs->dmcu_init(dmcu); |
| abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); |
| } |
| |
| if (!adev->dm.dc->ctx->dmub_srv) |
| adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); |
| if (!adev->dm.dc->ctx->dmub_srv) { |
| DRM_ERROR("Couldn't allocate DC DMUB server!\n"); |
| return -ENOMEM; |
| } |
| |
| DRM_INFO("DMUB hardware initialized: version=0x%08X\n", |
| adev->dm.dmcub_fw_version); |
| |
| return 0; |
| } |
| |
| static void dm_dmub_hw_resume(struct amdgpu_device *adev) |
| { |
| struct dmub_srv *dmub_srv = adev->dm.dmub_srv; |
| enum dmub_status status; |
| bool init; |
| |
| if (!dmub_srv) { |
| /* DMUB isn't supported on the ASIC. */ |
| return; |
| } |
| |
| status = dmub_srv_is_hw_init(dmub_srv, &init); |
| if (status != DMUB_STATUS_OK) |
| DRM_WARN("DMUB hardware init check failed: %d\n", status); |
| |
| if (status == DMUB_STATUS_OK && init) { |
| /* Wait for firmware load to finish. */ |
| status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); |
| if (status != DMUB_STATUS_OK) |
| DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); |
| } else { |
| /* Perform the full hardware initialization. */ |
| dm_dmub_hw_init(adev); |
| } |
| } |
| |
| static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) |
| { |
| uint64_t pt_base; |
| uint32_t logical_addr_low; |
| uint32_t logical_addr_high; |
| uint32_t agp_base, agp_bot, agp_top; |
| PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; |
| |
| memset(pa_config, 0, sizeof(*pa_config)); |
| |
| logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; |
| pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); |
| |
| if (adev->apu_flags & AMD_APU_IS_RAVEN2) |
| /* |
| * Raven2 has a HW issue that it is unable to use the vram which |
| * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the |
| * workaround that increase system aperture high address (add 1) |
| * to get rid of the VM fault and hardware hang. |
| */ |
| logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); |
| else |
| logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; |
| |
| agp_base = 0; |
| agp_bot = adev->gmc.agp_start >> 24; |
| agp_top = adev->gmc.agp_end >> 24; |
| |
| |
| page_table_start.high_part = (u32)(adev->gmc.gart_start >> 44) & 0xF; |
| page_table_start.low_part = (u32)(adev->gmc.gart_start >> 12); |
| page_table_end.high_part = (u32)(adev->gmc.gart_end >> 44) & 0xF; |
| page_table_end.low_part = (u32)(adev->gmc.gart_end >> 12); |
| page_table_base.high_part = upper_32_bits(pt_base) & 0xF; |
| page_table_base.low_part = lower_32_bits(pt_base); |
| |
| pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; |
| pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; |
| |
| pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24 ; |
| pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; |
| pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; |
| |
| pa_config->system_aperture.fb_base = adev->gmc.fb_start; |
| pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset; |
| pa_config->system_aperture.fb_top = adev->gmc.fb_end; |
| |
| pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; |
| pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; |
| pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; |
| |
| pa_config->is_hvm_enabled = 0; |
| |
| } |
| |
| static void dm_handle_hpd_rx_offload_work(struct work_struct *work) |
| { |
| struct hpd_rx_irq_offload_work *offload_work; |
| struct amdgpu_dm_connector *aconnector; |
| struct dc_link *dc_link; |
| struct amdgpu_device *adev; |
| enum dc_connection_type new_connection_type = dc_connection_none; |
| unsigned long flags; |
| |
| offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); |
| aconnector = offload_work->offload_wq->aconnector; |
| |
| if (!aconnector) { |
| DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); |
| goto skip; |
| } |
| |
| adev = drm_to_adev(aconnector->base.dev); |
| dc_link = aconnector->dc_link; |
| |
| mutex_lock(&aconnector->hpd_lock); |
| if (!dc_link_detect_sink(dc_link, &new_connection_type)) |
| DRM_ERROR("KMS: Failed to detect connector\n"); |
| mutex_unlock(&aconnector->hpd_lock); |
| |
| if (new_connection_type == dc_connection_none) |
| goto skip; |
| |
| if (amdgpu_in_reset(adev)) |
| goto skip; |
| |
| mutex_lock(&adev->dm.dc_lock); |
| if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) |
| dc_link_dp_handle_automated_test(dc_link); |
| else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && |
| hpd_rx_irq_check_link_loss_status(dc_link, &offload_work->data) && |
| dc_link_dp_allow_hpd_rx_irq(dc_link)) { |
| dc_link_dp_handle_link_loss(dc_link); |
| spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); |
| offload_work->offload_wq->is_handling_link_loss = false; |
| spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); |
| } |
| mutex_unlock(&adev->dm.dc_lock); |
| |
| skip: |
| kfree(offload_work); |
| |
| } |
| |
| static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) |
| { |
| int max_caps = dc->caps.max_links; |
| int i = 0; |
| struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; |
| |
| hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); |
| |
| if (!hpd_rx_offload_wq) |
| return NULL; |
| |
| |
| for (i = 0; i < max_caps; i++) { |
| hpd_rx_offload_wq[i].wq = |
| create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); |
| |
| if (hpd_rx_offload_wq[i].wq == NULL) { |
| DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); |
| goto out_err; |
| } |
| |
| spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); |
| } |
| |
| return hpd_rx_offload_wq; |
| |
| out_err: |
| for (i = 0; i < max_caps; i++) { |
| if (hpd_rx_offload_wq[i].wq) |
| destroy_workqueue(hpd_rx_offload_wq[i].wq); |
| } |
| kfree(hpd_rx_offload_wq); |
| return NULL; |
| } |
| |
| struct amdgpu_stutter_quirk { |
| u16 chip_vendor; |
| u16 chip_device; |
| u16 subsys_vendor; |
| u16 subsys_device; |
| u8 revision; |
| }; |
| |
| static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { |
| /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ |
| { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, |
| { 0, 0, 0, 0, 0 }, |
| }; |
| |
| static bool dm_should_disable_stutter(struct pci_dev *pdev) |
| { |
| const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; |
| |
| while (p && p->chip_device != 0) { |
| if (pdev->vendor == p->chip_vendor && |
| pdev->device == p->chip_device && |
| pdev->subsystem_vendor == p->subsys_vendor && |
| pdev->subsystem_device == p->subsys_device && |
| pdev->revision == p->revision) { |
| return true; |
| } |
| ++p; |
| } |
| return false; |
| } |
| |
| static const struct dmi_system_id hpd_disconnect_quirk_table[] = { |
| { |
| .matches = { |
| DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
| DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), |
| }, |
| }, |
| { |
| .matches = { |
| DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
| DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), |
| }, |
| }, |
| { |
| .matches = { |
| DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
| DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), |
| }, |
| }, |
| { |
| .matches = { |
| DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
| DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), |
| }, |
| }, |
| { |
| .matches = { |
| DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
| DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), |
| }, |
| }, |
| { |
| .matches = { |
| DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
| DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), |
| }, |
| }, |
| { |
| .matches = { |
| DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
| DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), |
| }, |
| }, |
| { |
| .matches = { |
| DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
| DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), |
| }, |
| }, |
| { |
| .matches = { |
| DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), |
| DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), |
| }, |
| }, |
| {} |
| /* TODO: refactor this from a fixed table to a dynamic option */ |
| }; |
| |
| static void retrieve_dmi_info(struct amdgpu_display_manager *dm) |
| { |
| const struct dmi_system_id *dmi_id; |
| |
| dm->aux_hpd_discon_quirk = false; |
| |
| dmi_id = dmi_first_match(hpd_disconnect_quirk_table); |
| if (dmi_id) { |
| dm->aux_hpd_discon_quirk = true; |
| DRM_INFO("aux_hpd_discon_quirk attached\n"); |
| } |
| } |
| |
| static int amdgpu_dm_init(struct amdgpu_device *adev) |
| { |
| struct dc_init_data init_data; |
| #ifdef CONFIG_DRM_AMD_DC_HDCP |
| struct dc_callback_init init_params; |
| #endif |
| int r; |
| |
| adev->dm.ddev = adev_to_drm(adev); |
| adev->dm.adev = adev; |
| |
| /* Zero all the fields */ |
| memset(&init_data, 0, sizeof(init_data)); |
| #ifdef CONFIG_DRM_AMD_DC_HDCP |
| memset(&init_params, 0, sizeof(init_params)); |
| #endif |
| |
| mutex_init(&adev->dm.dpia_aux_lock); |
| mutex_init(&adev->dm.dc_lock); |
| mutex_init(&adev->dm.audio_lock); |
| |
| if(amdgpu_dm_irq_init(adev)) { |
| DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); |
| goto error; |
| } |
| |
| init_data.asic_id.chip_family = adev->family; |
| |
| init_data.asic_id.pci_revision_id = adev->pdev->revision; |
| init_data.asic_id.hw_internal_rev = adev->external_rev_id; |
| init_data.asic_id.chip_id = adev->pdev->device; |
| |
| init_data.asic_id.vram_width = adev->gmc.vram_width; |
| /* TODO: initialize init_data.asic_id.vram_type here!!!! */ |
| init_data.asic_id.atombios_base_address = |
| adev->mode_info.atom_context->bios; |
| |
| init_data.driver = adev; |
| |
| adev->dm.cgs_device = amdgpu_cgs_create_device(adev); |
| |
| if (!adev->dm.cgs_device) { |
| DRM_ERROR("amdgpu: failed to create cgs device.\n"); |
| goto error; |
| } |
| |
| init_data.cgs_device = adev->dm.cgs_device; |
| |
| init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; |
| |
| switch (adev->ip_versions[DCE_HWIP][0]) { |
| case IP_VERSION(2, 1, 0): |
| switch (adev->dm.dmcub_fw_version) { |
| case 0: /* development */ |
| case 0x1: /* linux-firmware.git hash 6d9f399 */ |
| case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ |
| init_data.flags.disable_dmcu = false; |
| break; |
| default: |
| init_data.flags.disable_dmcu = true; |
| } |
| break; |
| case IP_VERSION(2, 0, 3): |
| init_data.flags.disable_dmcu = true; |
| break; |
| default: |
| break; |
| } |
| |
| switch (adev->asic_type) { |
| case CHIP_CARRIZO: |
| case CHIP_STONEY: |
| init_data.flags.gpu_vm_support = true; |
| break; |
| default: |
| switch (adev->ip_versions[DCE_HWIP][0]) { |
| case IP_VERSION(1, 0, 0): |
| case IP_VERSION(1, 0, 1): |
| /* enable S/G on PCO and RV2 */ |
| if ((adev->apu_flags & AMD_APU_IS_RAVEN2) || |
| (adev->apu_flags & AMD_APU_IS_PICASSO)) |
| init_data.flags.gpu_vm_support = true; |
| break; |
| case IP_VERSION(2, 1, 0): |
| case IP_VERSION(3, 0, 1): |
| case IP_VERSION(3, 1, 2): |
| case IP_VERSION(3, 1, 3): |
| case IP_VERSION(3, 1, 4): |
| case IP_VERSION(3, 1, 5): |
| case IP_VERSION(3, 1, 6): |
| init_data.flags.gpu_vm_support = true; |
| break; |
| default: |
| break; |
| } |
| break; |
| } |
| |
| if (init_data.flags.gpu_vm_support) |
| adev->mode_info.gpu_vm_support = true; |
| |
| if (amdgpu_dc_feature_mask & DC_FBC_MASK) |
| init_data.flags.fbc_support = true; |
| |
| if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) |
| init_data.flags.multi_mon_pp_mclk_switch = true; |
| |
| if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) |
| init_data.flags.disable_fractional_pwm = true; |
| |
| if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) |
| init_data.flags.edp_no_power_sequencing = true; |
| |
| if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) |
| init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; |
| if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) |
| init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; |
| |
| init_data.flags.seamless_boot_edp_requested = false; |
| |
| if (check_seamless_boot_capability(adev)) { |
| init_data.flags.seamless_boot_edp_requested = true; |
| init_data.flags.allow_seamless_boot_optimization = true; |
| DRM_INFO("Seamless boot condition check passed\n"); |
| } |
| |
| init_data.flags.enable_mipi_converter_optimization = true; |
| |
| init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; |
| init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; |
| |
| INIT_LIST_HEAD(&adev->dm.da_list); |
| |
| retrieve_dmi_info(&adev->dm); |
| |
| /* Display Core create. */ |
| adev->dm.dc = dc_create(&init_data); |
| |
| if (adev->dm.dc) { |
| DRM_INFO("Display Core initialized with v%s!\n", DC_VER); |
| } else { |
| DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); |
| goto error; |
| } |
| |
| if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { |
| adev->dm.dc->debug.force_single_disp_pipe_split = false; |
| adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; |
| } |
| |
| if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) |
| adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; |
| if (dm_should_disable_stutter(adev->pdev)) |
| adev->dm.dc->debug.disable_stutter = true; |
| |
| if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) |
| adev->dm.dc->debug.disable_stutter = true; |
| |
| if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) { |
| adev->dm.dc->debug.disable_dsc = true; |
| } |
| |
| if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) |
| adev->dm.dc->debug.disable_clock_gate = true; |
| |
| if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) |
| adev->dm.dc->debug.force_subvp_mclk_switch = true; |
| |
| adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; |
| |
| /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ |
| adev->dm.dc->debug.ignore_cable_id = true; |
| |
| r = dm_dmub_hw_init(adev); |
| if (r) { |
| DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); |
| goto error; |
| } |
| |
| dc_hardware_init(adev->dm.dc); |
| |
| adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); |
| if (!adev->dm.hpd_rx_offload_wq) { |
| DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); |
| goto error; |
| } |
| |
| if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { |
| struct dc_phy_addr_space_config pa_config; |
| |
| mmhub_read_system_context(adev, &pa_config); |
| |
| // Call the DC init_memory func |
| dc_setup_system_context(adev->dm.dc, &pa_config); |
| } |
| |
| adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); |
| if (!adev->dm.freesync_module) { |
| DRM_ERROR( |
| "amdgpu: failed to initialize freesync_module.\n"); |
| } else |
| DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", |
| adev->dm.freesync_module); |
| |
| amdgpu_dm_init_color_mod(); |
| |
| if (adev->dm.dc->caps.max_links > 0) { |
| adev->dm.vblank_control_workqueue = |
| create_singlethread_workqueue("dm_vblank_control_workqueue"); |
| if (!adev->dm.vblank_control_workqueue) |
| DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); |
| } |
| |
| #ifdef CONFIG_DRM_AMD_DC_HDCP |
| if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { |
| adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); |
| |
| if (!adev->dm.hdcp_workqueue) |
| DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); |
| else |
| DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); |
| |
| dc_init_callbacks(adev->dm.dc, &init_params); |
| } |
| #endif |
| #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) |
| adev->dm.crc_rd_wrk = amdgpu_dm_crtc_secure_display_create_work(); |
| #endif |
| if (dc_is_dmub_outbox_supported(adev->dm.dc)) { |
| init_completion(&adev->dm.dmub_aux_transfer_done); |
| adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); |
| if (!adev->dm.dmub_notify) { |
| DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); |
| goto error; |
| } |
| |
| adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); |
| if (!adev->dm.delayed_hpd_wq) { |
| DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); |
| goto error; |
| } |
| |
| amdgpu_dm_outbox_init(adev); |
| if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, |
| dmub_aux_setconfig_callback, false)) { |
| DRM_ERROR("amdgpu: fail to register dmub aux callback"); |
| goto error; |
| } |
| if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { |
| DRM_ERROR("amdgpu: fail to register dmub hpd callback"); |
| goto error; |
| } |
| if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { |
| DRM_ERROR("amdgpu: fail to register dmub hpd callback"); |
| goto error; |
| } |
| } |
| |
| /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. |
| * It is expected that DMUB will resend any pending notifications at this point, for |
| * example HPD from DPIA. |
| */ |
| if (dc_is_dmub_outbox_supported(adev->dm.dc)) |
| dc_enable_dmub_outbox(adev->dm.dc); |
| |
| if (amdgpu_dm_initialize_drm_device(adev)) { |
| DRM_ERROR( |
| "amdgpu: failed to initialize sw for display support.\n"); |
| goto error; |
| } |
| |
| /* create fake encoders for MST */ |
| dm_dp_create_fake_mst_encoders(adev); |
| |
| /* TODO: Add_display_info? */ |
| |
| /* TODO use dynamic cursor width */ |
| adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; |
| adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; |
| |
| if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { |
| DRM_ERROR( |
| "amdgpu: failed to initialize sw for display support.\n"); |
| goto error; |
| } |
| |
| |
| DRM_DEBUG_DRIVER("KMS initialized.\n"); |
| |
| return 0; |
| error: |
| amdgpu_dm_fini(adev); |
| |
| return -EINVAL; |
| } |
| |
| static int amdgpu_dm_early_fini(void *handle) |
| { |
| struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| |
| amdgpu_dm_audio_fini(adev); |
| |
| return 0; |
| } |
| |
| static void amdgpu_dm_fini(struct amdgpu_device *adev) |
| { |
| int i; |
| |
| if (adev->dm.vblank_control_workqueue) { |
| destroy_workqueue(adev->dm.vblank_control_workqueue); |
| adev->dm.vblank_control_workqueue = NULL; |
| } |
| |
| for (i = 0; i < adev->dm.display_indexes_num; i++) { |
| drm_encoder_cleanup(&adev->dm.mst_encoders[i].base); |
| } |
| |
| amdgpu_dm_destroy_drm_device(&adev->dm); |
| |
| #if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) |
| if (adev->dm.crc_rd_wrk) { |
| flush_work(&adev->dm.crc_rd_wrk->notify_ta_work); |
| kfree(adev->dm.crc_rd_wrk); |
| adev->dm.crc_rd_wrk = NULL; |
| } |
| #endif |
| #ifdef CONFIG_DRM_AMD_DC_HDCP |
| if (adev->dm.hdcp_workqueue) { |
| hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); |
| adev->dm.hdcp_workqueue = NULL; |
| } |
| |
| if (adev->dm.dc) |
| dc_deinit_callbacks(adev->dm.dc); |
| #endif |
| |
| dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); |
| |
| if (dc_enable_dmub_notifications(adev->dm.dc)) { |
| kfree(adev->dm.dmub_notify); |
| adev->dm.dmub_notify = NULL; |
| destroy_workqueue(adev->dm.delayed_hpd_wq); |
| adev->dm.delayed_hpd_wq = NULL; |
| } |
| |
| if (adev->dm.dmub_bo) |
| amdgpu_bo_free_kernel(&adev->dm.dmub_bo, |
| &adev->dm.dmub_bo_gpu_addr, |
| &adev->dm.dmub_bo_cpu_addr); |
| |
| if (adev->dm.hpd_rx_offload_wq) { |
| for (i = 0; i < adev->dm.dc->caps.max_links; i++) { |
| if (adev->dm.hpd_rx_offload_wq[i].wq) { |
| destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); |
| adev->dm.hpd_rx_offload_wq[i].wq = NULL; |
| } |
| } |
| |
| kfree(adev->dm.hpd_rx_offload_wq); |
| adev->dm.hpd_rx_offload_wq = NULL; |
| } |
| |
| /* DC Destroy TODO: Replace destroy DAL */ |
| if (adev->dm.dc) |
| dc_destroy(&adev->dm.dc); |
| /* |
| * TODO: pageflip, vlank interrupt |
| * |
| * amdgpu_dm_irq_fini(adev); |
| */ |
| |
| if (adev->dm.cgs_device) { |
| amdgpu_cgs_destroy_device(adev->dm.cgs_device); |
| adev->dm.cgs_device = NULL; |
| } |
| if (adev->dm.freesync_module) { |
| mod_freesync_destroy(adev->dm.freesync_module); |
| adev->dm.freesync_module = NULL; |
| } |
| |
| mutex_destroy(&adev->dm.audio_lock); |
| mutex_destroy(&adev->dm.dc_lock); |
| mutex_destroy(&adev->dm.dpia_aux_lock); |
| |
| return; |
| } |
| |
| static int load_dmcu_fw(struct amdgpu_device *adev) |
| { |
| const char *fw_name_dmcu = NULL; |
| int r; |
| const struct dmcu_firmware_header_v1_0 *hdr; |
| |
| switch(adev->asic_type) { |
| #if defined(CONFIG_DRM_AMD_DC_SI) |
| case CHIP_TAHITI: |
| case CHIP_PITCAIRN: |
| case CHIP_VERDE: |
| case CHIP_OLAND: |
| #endif |
| case CHIP_BONAIRE: |
| case CHIP_HAWAII: |
| case CHIP_KAVERI: |
| case CHIP_KABINI: |
| case CHIP_MULLINS: |
| case CHIP_TONGA: |
| case CHIP_FIJI: |
| case CHIP_CARRIZO: |
| case CHIP_STONEY: |
| case CHIP_POLARIS11: |
| case CHIP_POLARIS10: |
| case CHIP_POLARIS12: |
| case CHIP_VEGAM: |
| case CHIP_VEGA10: |
| case CHIP_VEGA12: |
| case CHIP_VEGA20: |
| return 0; |
| case CHIP_NAVI12: |
| fw_name_dmcu = FIRMWARE_NAVI12_DMCU; |
| break; |
| case CHIP_RAVEN: |
| if (ASICREV_IS_PICASSO(adev->external_rev_id)) |
| fw_name_dmcu = FIRMWARE_RAVEN_DMCU; |
| else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) |
| fw_name_dmcu = FIRMWARE_RAVEN_DMCU; |
| else |
| return 0; |
| break; |
| default: |
| switch (adev->ip_versions[DCE_HWIP][0]) { |
| case IP_VERSION(2, 0, 2): |
| case IP_VERSION(2, 0, 3): |
| case IP_VERSION(2, 0, 0): |
| case IP_VERSION(2, 1, 0): |
| case IP_VERSION(3, 0, 0): |
| case IP_VERSION(3, 0, 2): |
| case IP_VERSION(3, 0, 3): |
| case IP_VERSION(3, 0, 1): |
| case IP_VERSION(3, 1, 2): |
| case IP_VERSION(3, 1, 3): |
| case IP_VERSION(3, 1, 4): |
| case IP_VERSION(3, 1, 5): |
| case IP_VERSION(3, 1, 6): |
| case IP_VERSION(3, 2, 0): |
| case IP_VERSION(3, 2, 1): |
| return 0; |
| default: |
| break; |
| } |
| DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); |
| return -EINVAL; |
| } |
| |
| if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { |
| DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); |
| return 0; |
| } |
| |
| r = request_firmware_direct(&adev->dm.fw_dmcu, fw_name_dmcu, adev->dev); |
| if (r == -ENOENT) { |
| /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ |
| DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); |
| adev->dm.fw_dmcu = NULL; |
| return 0; |
| } |
| if (r) { |
| dev_err(adev->dev, "amdgpu_dm: Can't load firmware \"%s\"\n", |
| fw_name_dmcu); |
| return r; |
| } |
| |
| r = amdgpu_ucode_validate(adev->dm.fw_dmcu); |
| if (r) { |
| dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", |
| fw_name_dmcu); |
| release_firmware(adev->dm.fw_dmcu); |
| adev->dm.fw_dmcu = NULL; |
| return r; |
| } |
| |
| hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; |
| adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; |
| adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; |
| adev->firmware.fw_size += |
| ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); |
| |
| adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; |
| adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; |
| adev->firmware.fw_size += |
| ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); |
| |
| adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); |
| |
| DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); |
| |
| return 0; |
| } |
| |
| static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) |
| { |
| struct amdgpu_device *adev = ctx; |
| |
| return dm_read_reg(adev->dm.dc->ctx, address); |
| } |
| |
| static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, |
| uint32_t value) |
| { |
| struct amdgpu_device *adev = ctx; |
| |
| return dm_write_reg(adev->dm.dc->ctx, address, value); |
| } |
| |
| static int dm_dmub_sw_init(struct amdgpu_device *adev) |
| { |
| struct dmub_srv_create_params create_params; |
| struct dmub_srv_region_params region_params; |
| struct dmub_srv_region_info region_info; |
| struct dmub_srv_fb_params fb_params; |
| struct dmub_srv_fb_info *fb_info; |
| struct dmub_srv *dmub_srv; |
| const struct dmcub_firmware_header_v1_0 *hdr; |
| const char *fw_name_dmub; |
| enum dmub_asic dmub_asic; |
| enum dmub_status status; |
| int r; |
| |
| switch (adev->ip_versions[DCE_HWIP][0]) { |
| case IP_VERSION(2, 1, 0): |
| dmub_asic = DMUB_ASIC_DCN21; |
| fw_name_dmub = FIRMWARE_RENOIR_DMUB; |
| if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) |
| fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; |
| break; |
| case IP_VERSION(3, 0, 0): |
| if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) { |
| dmub_asic = DMUB_ASIC_DCN30; |
| fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; |
| } else { |
| dmub_asic = DMUB_ASIC_DCN30; |
| fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; |
| } |
| break; |
| case IP_VERSION(3, 0, 1): |
| dmub_asic = DMUB_ASIC_DCN301; |
| fw_name_dmub = FIRMWARE_VANGOGH_DMUB; |
| break; |
| case IP_VERSION(3, 0, 2): |
| dmub_asic = DMUB_ASIC_DCN302; |
| fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; |
| break; |
| case IP_VERSION(3, 0, 3): |
| dmub_asic = DMUB_ASIC_DCN303; |
| fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; |
| break; |
| case IP_VERSION(3, 1, 2): |
| case IP_VERSION(3, 1, 3): |
| dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; |
| fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; |
| break; |
| case IP_VERSION(3, 1, 4): |
| dmub_asic = DMUB_ASIC_DCN314; |
| fw_name_dmub = FIRMWARE_DCN_314_DMUB; |
| break; |
| case IP_VERSION(3, 1, 5): |
| dmub_asic = DMUB_ASIC_DCN315; |
| fw_name_dmub = FIRMWARE_DCN_315_DMUB; |
| break; |
| case IP_VERSION(3, 1, 6): |
| dmub_asic = DMUB_ASIC_DCN316; |
| fw_name_dmub = FIRMWARE_DCN316_DMUB; |
| break; |
| case IP_VERSION(3, 2, 0): |
| dmub_asic = DMUB_ASIC_DCN32; |
| fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; |
| break; |
| case IP_VERSION(3, 2, 1): |
| dmub_asic = DMUB_ASIC_DCN321; |
| fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; |
| break; |
| default: |
| /* ASIC doesn't support DMUB. */ |
| return 0; |
| } |
| |
| r = request_firmware_direct(&adev->dm.dmub_fw, fw_name_dmub, adev->dev); |
| if (r) { |
| DRM_ERROR("DMUB firmware loading failed: %d\n", r); |
| return 0; |
| } |
| |
| r = amdgpu_ucode_validate(adev->dm.dmub_fw); |
| if (r) { |
| DRM_ERROR("Couldn't validate DMUB firmware: %d\n", r); |
| return 0; |
| } |
| |
| hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; |
| adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); |
| |
| if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { |
| adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = |
| AMDGPU_UCODE_ID_DMCUB; |
| adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = |
| adev->dm.dmub_fw; |
| adev->firmware.fw_size += |
| ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); |
| |
| DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", |
| adev->dm.dmcub_fw_version); |
| } |
| |
| |
| adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); |
| dmub_srv = adev->dm.dmub_srv; |
| |
| if (!dmub_srv) { |
| DRM_ERROR("Failed to allocate DMUB service!\n"); |
| return -ENOMEM; |
| } |
| |
| memset(&create_params, 0, sizeof(create_params)); |
| create_params.user_ctx = adev; |
| create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; |
| create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; |
| create_params.asic = dmub_asic; |
| |
| /* Create the DMUB service. */ |
| status = dmub_srv_create(dmub_srv, &create_params); |
| if (status != DMUB_STATUS_OK) { |
| DRM_ERROR("Error creating DMUB service: %d\n", status); |
| return -EINVAL; |
| } |
| |
| /* Calculate the size of all the regions for the DMUB service. */ |
| memset(®ion_params, 0, sizeof(region_params)); |
| |
| region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - |
| PSP_HEADER_BYTES - PSP_FOOTER_BYTES; |
| region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); |
| region_params.vbios_size = adev->bios_size; |
| region_params.fw_bss_data = region_params.bss_data_size ? |
| adev->dm.dmub_fw->data + |
| le32_to_cpu(hdr->header.ucode_array_offset_bytes) + |
| le32_to_cpu(hdr->inst_const_bytes) : NULL; |
| region_params.fw_inst_const = |
| adev->dm.dmub_fw->data + |
| le32_to_cpu(hdr->header.ucode_array_offset_bytes) + |
| PSP_HEADER_BYTES; |
| |
| status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, |
| ®ion_info); |
| |
| if (status != DMUB_STATUS_OK) { |
| DRM_ERROR("Error calculating DMUB region info: %d\n", status); |
| return -EINVAL; |
| } |
| |
| /* |
| * Allocate a framebuffer based on the total size of all the regions. |
| * TODO: Move this into GART. |
| */ |
| r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, |
| AMDGPU_GEM_DOMAIN_VRAM, &adev->dm.dmub_bo, |
| &adev->dm.dmub_bo_gpu_addr, |
| &adev->dm.dmub_bo_cpu_addr); |
| if (r) |
| return r; |
| |
| /* Rebase the regions on the framebuffer address. */ |
| memset(&fb_params, 0, sizeof(fb_params)); |
| fb_params.cpu_addr = adev->dm.dmub_bo_cpu_addr; |
| fb_params.gpu_addr = adev->dm.dmub_bo_gpu_addr; |
| fb_params.region_info = ®ion_info; |
| |
| adev->dm.dmub_fb_info = |
| kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); |
| fb_info = adev->dm.dmub_fb_info; |
| |
| if (!fb_info) { |
| DRM_ERROR( |
| "Failed to allocate framebuffer info for DMUB service!\n"); |
| return -ENOMEM; |
| } |
| |
| status = dmub_srv_calc_fb_info(dmub_srv, &fb_params, fb_info); |
| if (status != DMUB_STATUS_OK) { |
| DRM_ERROR("Error calculating DMUB FB info: %d\n", status); |
| return -EINVAL; |
| } |
| |
| return 0; |
| } |
| |
| static int dm_sw_init(void *handle) |
| { |
| struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| int r; |
| |
| r = dm_dmub_sw_init(adev); |
| if (r) |
| return r; |
| |
| return load_dmcu_fw(adev); |
| } |
| |
| static int dm_sw_fini(void *handle) |
| { |
| struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| |
| kfree(adev->dm.dmub_fb_info); |
| adev->dm.dmub_fb_info = NULL; |
| |
| if (adev->dm.dmub_srv) { |
| dmub_srv_destroy(adev->dm.dmub_srv); |
| adev->dm.dmub_srv = NULL; |
| } |
| |
| release_firmware(adev->dm.dmub_fw); |
| adev->dm.dmub_fw = NULL; |
| |
| release_firmware(adev->dm.fw_dmcu); |
| adev->dm.fw_dmcu = NULL; |
| |
| return 0; |
| } |
| |
| static int detect_mst_link_for_all_connectors(struct drm_device *dev) |
| { |
| struct amdgpu_dm_connector *aconnector; |
| struct drm_connector *connector; |
| struct drm_connector_list_iter iter; |
| int ret = 0; |
| |
| drm_connector_list_iter_begin(dev, &iter); |
| drm_for_each_connector_iter(connector, &iter) { |
| aconnector = to_amdgpu_dm_connector(connector); |
| if (aconnector->dc_link->type == dc_connection_mst_branch && |
| aconnector->mst_mgr.aux) { |
| DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", |
| aconnector, |
| aconnector->base.base.id); |
| |
| ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); |
| if (ret < 0) { |
| DRM_ERROR("DM_MST: Failed to start MST\n"); |
| aconnector->dc_link->type = |
| dc_connection_single; |
| break; |
| } |
| } |
| } |
| drm_connector_list_iter_end(&iter); |
| |
| return ret; |
| } |
| |
| static int dm_late_init(void *handle) |
| { |
| struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| |
| struct dmcu_iram_parameters params; |
| unsigned int linear_lut[16]; |
| int i; |
| struct dmcu *dmcu = NULL; |
| |
| dmcu = adev->dm.dc->res_pool->dmcu; |
| |
| for (i = 0; i < 16; i++) |
| linear_lut[i] = 0xFFFF * i / 15; |
| |
| params.set = 0; |
| params.backlight_ramping_override = false; |
| params.backlight_ramping_start = 0xCCCC; |
| params.backlight_ramping_reduction = 0xCCCCCCCC; |
| params.backlight_lut_array_size = 16; |
| params.backlight_lut_array = linear_lut; |
| |
| /* Min backlight level after ABM reduction, Don't allow below 1% |
| * 0xFFFF x 0.01 = 0x28F |
| */ |
| params.min_abm_backlight = 0x28F; |
| /* In the case where abm is implemented on dmcub, |
| * dmcu object will be null. |
| * ABM 2.4 and up are implemented on dmcub. |
| */ |
| if (dmcu) { |
| if (!dmcu_load_iram(dmcu, params)) |
| return -EINVAL; |
| } else if (adev->dm.dc->ctx->dmub_srv) { |
| struct dc_link *edp_links[MAX_NUM_EDP]; |
| int edp_num; |
| |
| get_edp_links(adev->dm.dc, edp_links, &edp_num); |
| for (i = 0; i < edp_num; i++) { |
| if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) |
| return -EINVAL; |
| } |
| } |
| |
| return detect_mst_link_for_all_connectors(adev_to_drm(adev)); |
| } |
| |
| static void s3_handle_mst(struct drm_device *dev, bool suspend) |
| { |
| struct amdgpu_dm_connector *aconnector; |
| struct drm_connector *connector; |
| struct drm_connector_list_iter iter; |
| struct drm_dp_mst_topology_mgr *mgr; |
| int ret; |
| bool need_hotplug = false; |
| |
| drm_connector_list_iter_begin(dev, &iter); |
| drm_for_each_connector_iter(connector, &iter) { |
| aconnector = to_amdgpu_dm_connector(connector); |
| if (aconnector->dc_link->type != dc_connection_mst_branch || |
| aconnector->mst_port) |
| continue; |
| |
| mgr = &aconnector->mst_mgr; |
| |
| if (suspend) { |
| drm_dp_mst_topology_mgr_suspend(mgr); |
| } else { |
| ret = drm_dp_mst_topology_mgr_resume(mgr, true); |
| if (ret < 0) { |
| dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, |
| aconnector->dc_link); |
| need_hotplug = true; |
| } |
| } |
| } |
| drm_connector_list_iter_end(&iter); |
| |
| if (need_hotplug) |
| drm_kms_helper_hotplug_event(dev); |
| } |
| |
| static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) |
| { |
| int ret = 0; |
| |
| /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends |
| * on window driver dc implementation. |
| * For Navi1x, clock settings of dcn watermarks are fixed. the settings |
| * should be passed to smu during boot up and resume from s3. |
| * boot up: dc calculate dcn watermark clock settings within dc_create, |
| * dcn20_resource_construct |
| * then call pplib functions below to pass the settings to smu: |
| * smu_set_watermarks_for_clock_ranges |
| * smu_set_watermarks_table |
| * navi10_set_watermarks_table |
| * smu_write_watermarks_table |
| * |
| * For Renoir, clock settings of dcn watermark are also fixed values. |
| * dc has implemented different flow for window driver: |
| * dc_hardware_init / dc_set_power_state |
| * dcn10_init_hw |
| * notify_wm_ranges |
| * set_wm_ranges |
| * -- Linux |
| * smu_set_watermarks_for_clock_ranges |
| * renoir_set_watermarks_table |
| * smu_write_watermarks_table |
| * |
| * For Linux, |
| * dc_hardware_init -> amdgpu_dm_init |
| * dc_set_power_state --> dm_resume |
| * |
| * therefore, this function apply to navi10/12/14 but not Renoir |
| * * |
| */ |
| switch (adev->ip_versions[DCE_HWIP][0]) { |
| case IP_VERSION(2, 0, 2): |
| case IP_VERSION(2, 0, 0): |
| break; |
| default: |
| return 0; |
| } |
| |
| ret = amdgpu_dpm_write_watermarks_table(adev); |
| if (ret) { |
| DRM_ERROR("Failed to update WMTABLE!\n"); |
| return ret; |
| } |
| |
| return 0; |
| } |
| |
| /** |
| * dm_hw_init() - Initialize DC device |
| * @handle: The base driver device containing the amdgpu_dm device. |
| * |
| * Initialize the &struct amdgpu_display_manager device. This involves calling |
| * the initializers of each DM component, then populating the struct with them. |
| * |
| * Although the function implies hardware initialization, both hardware and |
| * software are initialized here. Splitting them out to their relevant init |
| * hooks is a future TODO item. |
| * |
| * Some notable things that are initialized here: |
| * |
| * - Display Core, both software and hardware |
| * - DC modules that we need (freesync and color management) |
| * - DRM software states |
| * - Interrupt sources and handlers |
| * - Vblank support |
| * - Debug FS entries, if enabled |
| */ |
| static int dm_hw_init(void *handle) |
| { |
| struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| /* Create DAL display manager */ |
| amdgpu_dm_init(adev); |
| amdgpu_dm_hpd_init(adev); |
| |
| return 0; |
| } |
| |
| /** |
| * dm_hw_fini() - Teardown DC device |
| * @handle: The base driver device containing the amdgpu_dm device. |
| * |
| * Teardown components within &struct amdgpu_display_manager that require |
| * cleanup. This involves cleaning up the DRM device, DC, and any modules that |
| * were loaded. Also flush IRQ workqueues and disable them. |
| */ |
| static int dm_hw_fini(void *handle) |
| { |
| struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
| |
| amdgpu_dm_hpd_fini(adev); |
| |
| amdgpu_dm_irq_fini(adev); |
| amdgpu_dm_fini(adev); |
| return 0; |
| } |
| |
| |
| static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, |
| struct dc_state *state, bool enable) |
| { |
| enum dc_irq_source irq_source; |
| struct amdgpu_crtc *acrtc; |
| int rc = -EBUSY; |
| int i = 0; |
| |
| for (i = 0; i < state->stream_count; i++) { |
| acrtc = get_crtc_by_otg_inst( |
| adev, state->stream_status[i].primary_otg_inst); |
| |
| if (acrtc && state->stream_status[i].plane_count != 0) { |
| irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; |
| rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; |
| DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", |
| acrtc->crtc_id, enable ? "en" : "dis", rc); |
| if (rc) |
| DRM_WARN("Failed to %s pflip interrupts\n", |
| enable ? "enable" : "disable"); |
| |
| if (enable) { |
| rc = dm_enable_vblank(&acrtc->base); |
| if (rc) |
| DRM_WARN("Failed to enable vblank interrupts\n"); |
| } else { |
| dm_disable_vblank(&acrtc->base); |
| } |
| |
| } |
| } |
| |
| } |
| |
| static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) |
| { |
| struct dc_state *context = NULL; |
| enum dc_status res = DC_ERROR_UNEXPECTED; |
| int i; |
| struct dc_stream_state *del_streams[MAX_PIPES]; |
| int del_streams_count = 0; |
| |
| memset(del_streams, 0, sizeof(del_streams)); |
| |
| context = dc_create_state(dc); |
| if (context == NULL) |
| goto context_alloc_fail; |
| |
| dc_resource_state_copy_construct_current(dc, context); |
| |
| /* First remove from context all streams */ |
| for (i = 0; i < context->stream_count; i++) { |
| struct dc_stream_state *stream = context->streams[i]; |
| |
| del_streams[del_streams_count++] = stream; |
| } |
| |
| /* Remove all planes for removed streams and then remove the streams */ |
| for (i = 0; i < del_streams_count; i++) { |
| if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { |
| res = DC_FAIL_DETACH_SURFACES; |
| goto fail; |
| } |
| |
| res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); |
| if (res != DC_OK) |
| goto fail; |
| } |
| |
| res = dc_commit_state(dc, context); |
| |
| fail: |
| dc_release_state(context); |
| |
| context_alloc_fail: |
| return res; |
| } |
| |
| static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) |
| { |
| int i; |
| |
| if (dm->hpd_rx_offload_wq) { |
| for (i = 0; i < dm->dc->caps.max_links; i++) |
| flush_workqueue(dm->hpd_rx_offload_wq[i].wq); |
| } |
| } |
| |
| static int dm_suspend(void *handle) |
| { |
| struct amdgpu_device *adev = handle; |
| struct amdgpu_display_manager *dm = &adev->dm; |
| int ret = 0; |
| |
| if (amdgpu_in_reset(adev)) { |
| mutex_lock(&dm->dc_lock); |
| |
| dc_allow_idle_optimizations(adev->dm.dc, false); |
| |
| dm->cached_dc_state = dc_copy_state(dm->dc->current_state); |
| |
| dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); |
| |
| amdgpu_dm_commit_zero_streams(dm->dc); |
| |
| amdgpu_dm_irq_suspend(adev); |
| |
| hpd_rx_irq_work_suspend(dm); |
| |
| return ret; |
| } |
| |
| WARN_ON(adev->dm.cached_state); |
| adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); |
| |
| s3_handle_mst(adev_to_drm(adev), true); |
| |
| amdgpu_dm_irq_suspend(adev); |
| |
| hpd_rx_irq_work_suspend(dm); |
| |
| dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); |
| |
| return 0; |
| } |
| |
| struct amdgpu_dm_connector * |
| amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, |
| struct drm_crtc *crtc) |
| { |
| uint32_t i; |
| struct drm_connector_state *new_con_state; |
| struct drm_connector *connector; |
| struct drm_crtc *crtc_from_state; |
| |
| for_each_new_connector_in_state(state, connector, new_con_state, i) { |
| crtc_from_state = new_con_state->crtc; |
| |
| if (crtc_from_state == crtc) |
| return to_amdgpu_dm_connector(connector); |
| } |
| |
| return NULL; |
| } |
| |
| static void emulated_link_detect(struct dc_link *link) |
| { |
| struct dc_sink_init_data sink_init_data = { 0 }; |
| struct display_sink_capability sink_caps = { 0 }; |
| enum dc_edid_status edid_status; |
| struct dc_context *dc_ctx = link->ctx; |
| struct dc_sink *sink = NULL; |
| struct dc_sink *prev_sink = NULL; |
| |
| link->type = dc_connection_none; |
| prev_sink = link->local_sink; |
| |
| if (prev_sink) |
| dc_sink_release(prev_sink); |
| |
| switch (link->connector_signal) { |
| case SIGNAL_TYPE_HDMI_TYPE_A: { |
| sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; |
| sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; |
| break; |
| } |
| |
| case SIGNAL_TYPE_DVI_SINGLE_LINK: { |
| sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; |
| sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; |
| break; |
| } |
| |
| case SIGNAL_TYPE_DVI_DUAL_LINK: { |
| sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; |
| sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; |
| break; |
| } |
| |
| case SIGNAL_TYPE_LVDS: { |
| sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; |
| sink_caps.signal = SIGNAL_TYPE_LVDS; |
| break; |
| } |
| |
| case SIGNAL_TYPE_EDP: { |
| sink_caps.transaction_type = |
| DDC_TRANSACTION_TYPE_I2C_OVER_AUX; |
| sink_caps.signal = SIGNAL_TYPE_EDP; |
| break; |
| } |
| |
| case SIGNAL_TYPE_DISPLAY_PORT: { |
| sink_caps.transaction_type = |
| DDC_TRANSACTION_TYPE_I2C_OVER_AUX; |
| sink_caps.signal = SIGNAL_TYPE_VIRTUAL; |
| break; |
| } |
| |
| default: |
| DC_ERROR("Invalid connector type! signal:%d\n", |
| link->connector_signal); |
| return; |
| } |
| |
| sink_init_data.link = link; |
| sink_init_data.sink_signal = sink_caps.signal; |
| |
| sink = dc_sink_create(&sink_init_data); |
| if (!sink) { |
| DC_ERROR("Failed to create sink!\n"); |
| return; |
| } |
| |
| /* dc_sink_create returns a new reference */ |
| link->local_sink = sink; |
| |
| edid_status = dm_helpers_read_local_edid( |
| link->ctx, |
| link, |
| sink); |
| |
| if (edid_status != EDID_OK) |
| DC_ERROR("Failed to read EDID"); |
| |
| } |
| |
| static void dm_gpureset_commit_state(struct dc_state *dc_state, |
| struct amdgpu_display_manager *dm) |
| { |
| struct { |
| struct dc_surface_update surface_updates[MAX_SURFACES]; |
| struct dc_plane_info plane_infos[MAX_SURFACES]; |
| struct dc_scaling_info scaling_infos[MAX_SURFACES]; |
| struct dc_flip_addrs flip_addrs[MAX_SURFACES]; |
| struct dc_stream_update stream_update; |
| } * bundle; |
| int k, m; |
| |
| bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); |
| |
| if (!bundle) { |
| dm_error("Failed to allocate update bundle\n"); |
| goto cleanup; |
| } |
| |
| for (k = 0; k < dc_state->stream_count; k++) { |
| bundle->stream_update.stream = dc_state->streams[k]; |
| |
| for (m = 0; m < dc_state->stream_status->plane_count; m++) { |
| bundle->surface_updates[m].surface = |
| dc_state->stream_status->plane_states[m]; |
| bundle->surface_updates[m].surface->force_full_update = |
| true; |
| } |
| dc_commit_updates_for_stream( |
| dm->dc, bundle->surface_updates, |
| dc_state->stream_status->plane_count, |
| dc_state->streams[k], &bundle->stream_update, dc_state); |
| } |
| |
| cleanup: |
| kfree(bundle); |
| |
| return; |
| } |
| |
| static int dm_resume(void *handle) |
| { |
| struct amdgpu_device *adev = handle; |
| struct drm_device *ddev = adev_to_drm(adev); |
| struct amdgpu_display_manager *dm = &adev->dm; |
| struct amdgpu_dm_connector *aconnector; |
| struct drm_connector *connector; |
| struct drm_connector_list_iter iter; |
| struct drm_crtc *crtc; |
| struct drm_crtc_state *new_crtc_state; |
| struct dm_crtc_state *dm_new_crtc_state; |
| struct drm_plane *plane; |
| struct drm_plane_state *new_plane_state; |
| struct dm_plane_state *dm_new_plane_state; |
| struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); |
| enum dc_connection_type new_connection_type = dc_connection_none; |
| struct dc_state *dc_state; |
| int i, r, j; |
| |
| if (amdgpu_in_reset(adev)) { |
| dc_state = dm->cached_dc_state; |
| |
| /* |
| * The dc->current_state is backed up into dm->cached_dc_state |
| * before we commit 0 streams. |
| * |
| * DC will clear link encoder assignments on the real state |
| * but the changes won't propagate over to the copy we made |
| * before the 0 streams commit. |
| * |
| * DC expects that link encoder assignments are *not* valid |
| * when committing a state, so as a workaround we can copy |
| * off of the current state. |
| * |
| * We lose the previous assignments, but we had already |
| * commit 0 streams anyway. |
| */ |
| link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); |
| |
| r = dm_dmub_hw_init(adev); |
| if (r) |
| DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); |
| |
| dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); |
| dc_resume(dm->dc); |
| |
| amdgpu_dm_irq_resume_early(adev); |
| |
| for (i = 0; i < dc_state->stream_count; i++) { |
| dc_state->streams[i]->mode_changed = true; |
| for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { |
| dc_state->stream_status[i].plane_states[j]->update_flags.raw |
| = 0xffffffff; |
| } |
| } |
| |
| if (dc_is_dmub_outbox_supported(adev->dm.dc)) { |
| amdgpu_dm_outbox_init(adev); |
| dc_enable_dmub_outbox(adev->dm.dc); |
| } |
| |
| WARN_ON(!dc_commit_state(dm->dc, dc_state)); |
| |
| dm_gpureset_commit_state(dm->cached_dc_state, dm); |
| |
| dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); |
| |
| dc_release_state(dm->cached_dc_state); |
| dm->cached_dc_state = NULL; |
| |
| amdgpu_dm_irq_resume_late(adev); |
| |
| mutex_unlock(&dm->dc_lock); |
| |
| return 0; |
| } |
| /* Recreate dc_state - DC invalidates it when setting power state to S3. */ |
| dc_release_state(dm_state->context); |
| dm_state->context = dc_create_state(dm->dc); |
| /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ |
| dc_resource_state_construct(dm->dc, dm_state->context); |
| |
| /* Before powering on DC we need to re-initialize DMUB. */ |
| dm_dmub_hw_resume(adev); |
| |
| /* Re-enable outbox interrupts for DPIA. */ |
| if (dc_is_dmub_outbox_supported(adev->dm.dc)) { |
| amdgpu_dm_outbox_init(adev); |
| dc_enable_dmub_outbox(adev->dm.dc); |
| } |
| |
| /* power on hardware */ |
| dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); |
| |
| /* program HPD filter */ |
| dc_resume(dm->dc); |
| |
| /* |
| * early enable HPD Rx IRQ, should be done before set mode as short |
| * pulse interrupts are used for MST |
| */ |
| amdgpu_dm_irq_resume_early(adev); |
| |
| /* On resume we need to rewrite the MSTM control bits to enable MST*/ |
| s3_handle_mst(ddev, false); |
| |
| /* Do detection*/ |
| drm_connector_list_iter_begin(ddev, &iter); |
| drm_for_each_connector_iter(connector, &iter) { |
| aconnector = to_amdgpu_dm_connector(connector); |
| |
| /* |
| * this is the case when traversing through already created |
| * MST connectors, should be skipped |
| */ |
| if (aconnector->dc_link && |
| aconnector->dc_link->type == dc_connection_mst_branch) |
| continue; |
| |
| mutex_lock(&aconnector->hpd_lock); |
| if (!dc_link_detect_sink(aconnector->dc_link, &new_connection_type)) |
| DRM_ERROR("KMS: Failed to detect connector\n"); |
| |
| if (aconnector->base.force && new_connection_type == dc_connection_none) { |
| emulated_link_detect(aconnector->dc_link); |
| } else { |
| mutex_lock(&dm->dc_lock); |
| dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); |
| mutex_unlock(&dm->dc_lock); |
| } |
| |
| if (aconnector->fake_enable && aconnector->dc_link->local_sink) |
| aconnector->fake_enable = false; |
| |
| if (aconnector->dc_sink) |
| dc_sink_release(aconnector->dc_sink); |
| aconnector->dc_sink = NULL; |
| amdgpu_dm_update_connector_after_detect(aconnector); |
| mutex_unlock(&aconnector->hpd_lock); |
| } |
| drm_connector_list_iter_end(&iter); |
| |
| /* Force mode set in atomic commit */ |
| for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) |
| new_crtc_state->active_changed = true; |
| |
| /* |
| * atomic_check is expected to create the dc states. We need to release |
| * them here, since they were duplicated as part of the suspend |
| * procedure. |
| */ |
| for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { |
| dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); |
| if (dm_new_crtc_state->stream) { |
| WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); |
| dc_stream_release(dm_new_crtc_state->stream); |
| dm_new_crtc_state->stream = NULL; |
| } |
| } |
| |
| for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { |
| dm_new_plane_state = to_dm_plane_state(new_plane_state); |
| if (dm_new_plane_state->dc_state) { |
| WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); |
| dc_plane_state_release(dm_new_plane_state->dc_state); |
| dm_new_plane_state->dc_state = NULL; |
| } |
| } |
| |
| drm_atomic_helper_resume(ddev, dm->cached_state); |
| |
| dm->cached_state = NULL; |
| |
| amdgpu_dm_irq_resume_late(adev); |
| |
| amdgpu_dm_smu_write_watermarks_table(adev); |
| |
| return 0; |
| } |
| |
| /** |
| * DOC: DM Lifecycle |
| * |
| * DM (and consequently DC) is registered in the amdgpu base driver as a IP |
| * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to |
| * the base driver's device list to be initialized and torn down accordingly. |
| * |
| * The functions to do so are provided as hooks in &struct amd_ip_funcs. |
| */ |
| |
| static const struct amd_ip_funcs amdgpu_dm_funcs = { |
| .name = "dm", |
| .early_init = dm_early_init, |
| .late_init = dm_late_init, |
| .sw_init = dm_sw_init, |
| .sw_fini = dm_sw_fini, |
| .early_fini = amdgpu_dm_early_fini, |
| .hw_init = dm_hw_init, |
| .hw_fini = dm_hw_fini, |
| .suspend = dm_suspend, |
| .resume = dm_resume, |
| .is_idle = dm_is_idle, |
| .wait_for_idle = dm_wait_for_idle, |
| .check_soft_reset = dm_check_soft_reset, |
| .soft_reset = dm_soft_reset, |
| .set_clockgating_state = dm_set_clockgating_state, |
| .set_powergating_state = dm_set_powergating_state, |
| }; |
| |
| const struct amdgpu_ip_block_version dm_ip_block = |
| { |
| .type = AMD_IP_BLOCK_TYPE_DCE, |
| .major = 1, |
| .minor = 0, |
| .rev = 0, |
| .funcs = &amdgpu_dm_funcs, |
| }; |
| |
| |
| /** |
| * DOC: atomic |
| * |
| * *WIP* |
| */ |
| |
| static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { |
| .fb_create = amdgpu_display_user_framebuffer_create, |
| .get_format_info = amd_get_format_info, |
| .atomic_check = amdgpu_dm_atomic_check, |
| .atomic_commit = drm_atomic_helper_commit, |
| }; |
| |
| static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { |
| .atomic_commit_tail = amdgpu_dm_atomic_commit_tail, |
| .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, |
| }; |
| |
| static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) |
| { |
| struct amdgpu_dm_backlight_caps *caps; |
| struct amdgpu_display_manager *dm; |
| struct drm_connector *conn_base; |
| struct amdgpu_device *adev; |
| struct dc_link *link = NULL; |
| struct drm_luminance_range_info *luminance_range; |
| int i; |
| |
| if (!aconnector || !aconnector->dc_link) |
| return; |
| |
| link = aconnector->dc_link; |
| if (link->connector_signal != SIGNAL_TYPE_EDP) |
| return; |
| |
| conn_base = &aconnector->base; |
| adev = drm_to_adev(conn_base->dev); |
| dm = &adev->dm; |
| for (i = 0; i < dm->num_of_edps; i++) { |
| if (link == dm->backlight_link[i]) |
| break; |
| } |
| if (i >= dm->num_of_edps) |
| return; |
| caps = &dm->backlight_caps[i]; |
| caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; |
| caps->aux_support = false; |
| |
| if (caps->ext_caps->bits.oled == 1 /*|| |
| caps->ext_caps->bits.sdr_aux_backlight_control == 1 || |
| caps->ext_caps->bits.hdr_aux_backlight_control == 1*/) |
| caps->aux_support = true; |
| |
| if (amdgpu_backlight == 0) |
| caps->aux_support = false; |
| else if (amdgpu_backlight == 1) |
| caps->aux_support = true; |
| |
| luminance_range = &conn_base->display_info.luminance_range; |
| caps->aux_min_input_signal = luminance_range->min_luminance; |
| caps->aux_max_input_signal = luminance_range->max_luminance; |
| } |
| |
| void amdgpu_dm_update_connector_after_detect( |
| struct amdgpu_dm_connector *aconnector) |
| { |
| struct drm_connector *connector = &aconnector->base; |
| struct drm_device *dev = connector->dev; |
| struct dc_sink *sink; |
| |
| /* MST handled by drm_mst framework */ |
| if (aconnector->mst_mgr.mst_state == true) |
| return; |
| |
| sink = aconnector->dc_link->local_sink; |
| if (sink) |
| dc_sink_retain(sink); |
| |
| /* |
| * Edid mgmt connector gets first update only in mode_valid hook and then |
| * the connector sink is set to either fake or physical sink depends on link status. |
| * Skip if already done during boot. |
| */ |
| if (aconnector->base.force != DRM_FORCE_UNSPECIFIED |
| && aconnector->dc_em_sink) { |
| |
| /* |
| * For S3 resume with headless use eml_sink to fake stream |
| * because on resume connector->sink is set to NULL |
| */ |
| mutex_lock(&dev->mode_config.mutex); |
| |
| if (sink) { |
| if (aconnector->dc_sink) { |
| amdgpu_dm_update_freesync_caps(connector, NULL); |
| /* |
| * retain and release below are used to |
| * bump up refcount for sink because the link doesn't point |
| * to it anymore after disconnect, so on next crtc to connector |
| * reshuffle by UMD we will get into unwanted dc_sink release |
| */ |
| dc_sink_release(aconnector->dc_sink); |
| } |
| aconnector->dc_sink = sink; |
| dc_sink_retain(aconnector->dc_sink); |
| amdgpu_dm_update_freesync_caps(connector, |
| aconnector->edid); |
| } else { |
| amdgpu_dm_update_freesync_caps(connector, NULL); |
| if (!aconnector->dc_sink) { |
| aconnector->dc_sink = aconnector->dc_em_sink; |
| dc_sink_retain(aconnector->dc_sink); |
| } |
| } |
| |
| mutex_unlock(&dev->mode_config.mutex); |
| |
| if (sink) |
| dc_sink_release(sink); |
| return; |
| } |
| |
| /* |
| * TODO: temporary guard to look for proper fix |
| * if this sink is MST sink, we should not do anything |
| */ |
| if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { |
| dc_sink_release(sink); |
| return; |
| } |
| |
| if (aconnector->dc_sink == sink) { |
| /* |
| * We got a DP short pulse (Link Loss, DP CTS, etc...). |
| * Do nothing!! |
| */ |
| DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", |
| aconnector->connector_id); |
| if (sink) |
| dc_sink_release(sink); |
| return; |
| } |
| |
| DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", |
| aconnector->connector_id, aconnector->dc_sink, sink); |
| |
| mutex_lock(&dev->mode_config.mutex); |
| |
| /* |
| * 1. Update status of the drm connector |
| * 2. Send an event and let userspace tell us what to do |
| */ |
| if (sink) { |
| /* |
| * TODO: check if we still need the S3 mode update workaround. |
| * If yes, put it here. |
| */ |
| if (aconnector->dc_sink) { |
| amdgpu_dm_update_freesync_caps(connector, NULL); |
| dc_sink_release(aconnector->dc_sink); |
| } |
| |
| aconnector->dc_sink = sink; |
| dc_sink_retain(aconnector->dc_sink); |
| if (sink->dc_edid.length == 0) { |
| aconnector->edid = NULL; |
| if (aconnector->dc_link->aux_mode) { |
| drm_dp_cec_unset_edid( |
| &aconnector->dm_dp_aux.aux); |
| } |
| } else { |
| aconnector->edid = |
| (struct edid *)sink->dc_edid.raw_edid; |
| |
| if (aconnector->dc_link->aux_mode) |
| drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, |
| aconnector->edid); |
| } |
| |
| drm_connector_update_edid_property(connector, aconnector->edid); |
| amdgpu_dm_update_freesync_caps(connector, aconnector->edid); |
| update_connector_ext_caps(aconnector); |
| } else { |
| drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); |
| amdgpu_dm_update_freesync_caps(connector, NULL); |
| drm_connector_update_edid_property(connector, NULL); |
| aconnector->num_modes = 0; |
| dc_sink_release(aconnector->dc_sink); |
| aconnector->dc_sink = NULL; |
| aconnector->edid = NULL; |
| #ifdef CONFIG_DRM_AMD_DC_HDCP |
| /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ |
| if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) |
| connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; |
| #endif |
| } |
| |
| mutex_unlock(&dev->mode_config.mutex); |
| |
| update_subconnector_property(aconnector); |
| |
| if (sink) |
| dc_sink_release(sink); |
| } |
| |
| static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) |
| { |
| struct drm_connector *connector = &aconnector->base; |
| struct drm_device *dev = connector->dev; |
| enum dc_connection_type new_connection_type = dc_connection_none; |
| struct amdgpu_device *adev = drm_to_adev(dev); |
| #ifdef CONFIG_DRM_AMD_DC_HDCP |
| struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); |
| #endif |
| bool ret = false; |
| |
| if (adev->dm.disable_hpd_irq) |
| return; |
| |
| /* |
| * In case of failure or MST no need to update connector status or notify the OS |
| * since (for MST case) MST does this in its own context. |
| */ |
| mutex_lock(&aconnector->hpd_lock); |
| |
| #ifdef CONFIG_DRM_AMD_DC_HDCP |
| if (adev->dm.hdcp_workqueue) { |
| hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); |
| dm_con_state->update_hdcp = true; |
| } |
| #endif |
| if (aconnector->fake_enable) |
| aconnector->fake_enable = false; |
| |
| if (!dc_link_detect_sink |