| /* |
| * Copyright © 2006-2007 Intel Corporation |
| * |
| * Permission is hereby granted, free of charge, to any person obtaining a |
| * copy of this software and associated documentation files (the "Software"), |
| * to deal in the Software without restriction, including without limitation |
| * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| * and/or sell copies of the Software, and to permit persons to whom the |
| * Software is furnished to do so, subject to the following conditions: |
| * |
| * The above copyright notice and this permission notice (including the next |
| * paragraph) shall be included in all copies or substantial portions of the |
| * Software. |
| * |
| * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
| * DEALINGS IN THE SOFTWARE. |
| * |
| * Authors: |
| * Eric Anholt <eric@anholt.net> |
| */ |
| |
| #include <linux/dma-resv.h> |
| #include <linux/i2c.h> |
| #include <linux/input.h> |
| #include <linux/kernel.h> |
| #include <linux/module.h> |
| #include <linux/slab.h> |
| #include <linux/string_helpers.h> |
| |
| #include <drm/display/drm_dp_helper.h> |
| #include <drm/display/drm_dp_tunnel.h> |
| #include <drm/drm_atomic.h> |
| #include <drm/drm_atomic_helper.h> |
| #include <drm/drm_atomic_uapi.h> |
| #include <drm/drm_damage_helper.h> |
| #include <drm/drm_edid.h> |
| #include <drm/drm_fourcc.h> |
| #include <drm/drm_probe_helper.h> |
| #include <drm/drm_rect.h> |
| |
| #include "gem/i915_gem_lmem.h" |
| #include "gem/i915_gem_object.h" |
| |
| #include "g4x_dp.h" |
| #include "g4x_hdmi.h" |
| #include "hsw_ips.h" |
| #include "i915_config.h" |
| #include "i915_drv.h" |
| #include "i915_reg.h" |
| #include "i915_utils.h" |
| #include "i9xx_plane.h" |
| #include "i9xx_wm.h" |
| #include "intel_atomic.h" |
| #include "intel_atomic_plane.h" |
| #include "intel_audio.h" |
| #include "intel_bw.h" |
| #include "intel_cdclk.h" |
| #include "intel_clock_gating.h" |
| #include "intel_color.h" |
| #include "intel_crt.h" |
| #include "intel_crtc.h" |
| #include "intel_crtc_state_dump.h" |
| #include "intel_ddi.h" |
| #include "intel_de.h" |
| #include "intel_display_driver.h" |
| #include "intel_display_power.h" |
| #include "intel_display_types.h" |
| #include "intel_dmc.h" |
| #include "intel_dp.h" |
| #include "intel_dp_link_training.h" |
| #include "intel_dp_mst.h" |
| #include "intel_dp_tunnel.h" |
| #include "intel_dpll.h" |
| #include "intel_dpll_mgr.h" |
| #include "intel_dpt.h" |
| #include "intel_dpt_common.h" |
| #include "intel_drrs.h" |
| #include "intel_dsb.h" |
| #include "intel_dsi.h" |
| #include "intel_dvo.h" |
| #include "intel_fb.h" |
| #include "intel_fbc.h" |
| #include "intel_fbdev.h" |
| #include "intel_fdi.h" |
| #include "intel_fifo_underrun.h" |
| #include "intel_frontbuffer.h" |
| #include "intel_hdmi.h" |
| #include "intel_hotplug.h" |
| #include "intel_link_bw.h" |
| #include "intel_lvds.h" |
| #include "intel_lvds_regs.h" |
| #include "intel_modeset_setup.h" |
| #include "intel_modeset_verify.h" |
| #include "intel_overlay.h" |
| #include "intel_panel.h" |
| #include "intel_pch_display.h" |
| #include "intel_pch_refclk.h" |
| #include "intel_pcode.h" |
| #include "intel_pipe_crc.h" |
| #include "intel_plane_initial.h" |
| #include "intel_pmdemand.h" |
| #include "intel_pps.h" |
| #include "intel_psr.h" |
| #include "intel_psr_regs.h" |
| #include "intel_sdvo.h" |
| #include "intel_snps_phy.h" |
| #include "intel_tc.h" |
| #include "intel_tv.h" |
| #include "intel_vblank.h" |
| #include "intel_vdsc.h" |
| #include "intel_vdsc_regs.h" |
| #include "intel_vga.h" |
| #include "intel_vrr.h" |
| #include "intel_wm.h" |
| #include "skl_scaler.h" |
| #include "skl_universal_plane.h" |
| #include "skl_watermark.h" |
| #include "vlv_dsi.h" |
| #include "vlv_dsi_pll.h" |
| #include "vlv_dsi_regs.h" |
| #include "vlv_sideband.h" |
| |
| static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); |
| static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); |
| static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); |
| static void bdw_set_pipe_misc(const struct intel_crtc_state *crtc_state); |
| |
| /* returns HPLL frequency in kHz */ |
| int vlv_get_hpll_vco(struct drm_i915_private *dev_priv) |
| { |
| int hpll_freq, vco_freq[] = { 800, 1600, 2000, 2400 }; |
| |
| /* Obtain SKU information */ |
| hpll_freq = vlv_cck_read(dev_priv, CCK_FUSE_REG) & |
| CCK_FUSE_HPLL_FREQ_MASK; |
| |
| return vco_freq[hpll_freq] * 1000; |
| } |
| |
| int vlv_get_cck_clock(struct drm_i915_private *dev_priv, |
| const char *name, u32 reg, int ref_freq) |
| { |
| u32 val; |
| int divider; |
| |
| val = vlv_cck_read(dev_priv, reg); |
| divider = val & CCK_FREQUENCY_VALUES; |
| |
| drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) != |
| (divider << CCK_FREQUENCY_STATUS_SHIFT), |
| "%s change in progress\n", name); |
| |
| return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1); |
| } |
| |
| int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv, |
| const char *name, u32 reg) |
| { |
| int hpll; |
| |
| vlv_cck_get(dev_priv); |
| |
| if (dev_priv->hpll_freq == 0) |
| dev_priv->hpll_freq = vlv_get_hpll_vco(dev_priv); |
| |
| hpll = vlv_get_cck_clock(dev_priv, name, reg, dev_priv->hpll_freq); |
| |
| vlv_cck_put(dev_priv); |
| |
| return hpll; |
| } |
| |
| void intel_update_czclk(struct drm_i915_private *dev_priv) |
| { |
| if (!(IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))) |
| return; |
| |
| dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk", |
| CCK_CZ_CLOCK_CONTROL); |
| |
| drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n", |
| dev_priv->czclk_freq); |
| } |
| |
| static bool is_hdr_mode(const struct intel_crtc_state *crtc_state) |
| { |
| return (crtc_state->active_planes & |
| ~(icl_hdr_plane_mask() | BIT(PLANE_CURSOR))) == 0; |
| } |
| |
| /* WA Display #0827: Gen9:all */ |
| static void |
| skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable) |
| { |
| intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), |
| DUPS1_GATING_DIS | DUPS2_GATING_DIS, |
| enable ? DUPS1_GATING_DIS | DUPS2_GATING_DIS : 0); |
| } |
| |
| /* Wa_2006604312:icl,ehl */ |
| static void |
| icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, |
| bool enable) |
| { |
| intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), |
| DPFR_GATING_DIS, |
| enable ? DPFR_GATING_DIS : 0); |
| } |
| |
| /* Wa_1604331009:icl,jsl,ehl */ |
| static void |
| icl_wa_cursorclkgating(struct drm_i915_private *dev_priv, enum pipe pipe, |
| bool enable) |
| { |
| intel_de_rmw(dev_priv, CLKGATE_DIS_PSL(pipe), |
| CURSOR_GATING_DIS, |
| enable ? CURSOR_GATING_DIS : 0); |
| } |
| |
| static bool |
| is_trans_port_sync_slave(const struct intel_crtc_state *crtc_state) |
| { |
| return crtc_state->master_transcoder != INVALID_TRANSCODER; |
| } |
| |
| bool |
| is_trans_port_sync_master(const struct intel_crtc_state *crtc_state) |
| { |
| return crtc_state->sync_mode_slaves_mask != 0; |
| } |
| |
| bool |
| is_trans_port_sync_mode(const struct intel_crtc_state *crtc_state) |
| { |
| return is_trans_port_sync_master(crtc_state) || |
| is_trans_port_sync_slave(crtc_state); |
| } |
| |
| static enum pipe bigjoiner_master_pipe(const struct intel_crtc_state *crtc_state) |
| { |
| return ffs(crtc_state->bigjoiner_pipes) - 1; |
| } |
| |
| u8 intel_crtc_bigjoiner_slave_pipes(const struct intel_crtc_state *crtc_state) |
| { |
| if (crtc_state->bigjoiner_pipes) |
| return crtc_state->bigjoiner_pipes & ~BIT(bigjoiner_master_pipe(crtc_state)); |
| else |
| return 0; |
| } |
| |
| bool intel_crtc_is_bigjoiner_slave(const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| |
| return crtc_state->bigjoiner_pipes && |
| crtc->pipe != bigjoiner_master_pipe(crtc_state); |
| } |
| |
| bool intel_crtc_is_bigjoiner_master(const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| |
| return crtc_state->bigjoiner_pipes && |
| crtc->pipe == bigjoiner_master_pipe(crtc_state); |
| } |
| |
| static int intel_bigjoiner_num_pipes(const struct intel_crtc_state *crtc_state) |
| { |
| return hweight8(crtc_state->bigjoiner_pipes); |
| } |
| |
| struct intel_crtc *intel_master_crtc(const struct intel_crtc_state *crtc_state) |
| { |
| struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); |
| |
| if (intel_crtc_is_bigjoiner_slave(crtc_state)) |
| return intel_crtc_for_pipe(i915, bigjoiner_master_pipe(crtc_state)); |
| else |
| return to_intel_crtc(crtc_state->uapi.crtc); |
| } |
| |
| static void |
| intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| |
| if (DISPLAY_VER(dev_priv) >= 4) { |
| enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; |
| |
| /* Wait for the Pipe State to go off */ |
| if (intel_de_wait_for_clear(dev_priv, TRANSCONF(cpu_transcoder), |
| TRANSCONF_STATE_ENABLE, 100)) |
| drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); |
| } else { |
| intel_wait_for_pipe_scanline_stopped(crtc); |
| } |
| } |
| |
| void assert_transcoder(struct drm_i915_private *dev_priv, |
| enum transcoder cpu_transcoder, bool state) |
| { |
| bool cur_state; |
| enum intel_display_power_domain power_domain; |
| intel_wakeref_t wakeref; |
| |
| /* we keep both pipes enabled on 830 */ |
| if (IS_I830(dev_priv)) |
| state = true; |
| |
| power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder); |
| wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); |
| if (wakeref) { |
| u32 val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); |
| cur_state = !!(val & TRANSCONF_ENABLE); |
| |
| intel_display_power_put(dev_priv, power_domain, wakeref); |
| } else { |
| cur_state = false; |
| } |
| |
| I915_STATE_WARN(dev_priv, cur_state != state, |
| "transcoder %s assertion failure (expected %s, current %s)\n", |
| transcoder_name(cpu_transcoder), str_on_off(state), |
| str_on_off(cur_state)); |
| } |
| |
| static void assert_plane(struct intel_plane *plane, bool state) |
| { |
| struct drm_i915_private *i915 = to_i915(plane->base.dev); |
| enum pipe pipe; |
| bool cur_state; |
| |
| cur_state = plane->get_hw_state(plane, &pipe); |
| |
| I915_STATE_WARN(i915, cur_state != state, |
| "%s assertion failure (expected %s, current %s)\n", |
| plane->base.name, str_on_off(state), |
| str_on_off(cur_state)); |
| } |
| |
| #define assert_plane_enabled(p) assert_plane(p, true) |
| #define assert_plane_disabled(p) assert_plane(p, false) |
| |
| static void assert_planes_disabled(struct intel_crtc *crtc) |
| { |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| struct intel_plane *plane; |
| |
| for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) |
| assert_plane_disabled(plane); |
| } |
| |
| void vlv_wait_port_ready(struct drm_i915_private *dev_priv, |
| struct intel_digital_port *dig_port, |
| unsigned int expected_mask) |
| { |
| u32 port_mask; |
| i915_reg_t dpll_reg; |
| |
| switch (dig_port->base.port) { |
| default: |
| MISSING_CASE(dig_port->base.port); |
| fallthrough; |
| case PORT_B: |
| port_mask = DPLL_PORTB_READY_MASK; |
| dpll_reg = DPLL(0); |
| break; |
| case PORT_C: |
| port_mask = DPLL_PORTC_READY_MASK; |
| dpll_reg = DPLL(0); |
| expected_mask <<= 4; |
| break; |
| case PORT_D: |
| port_mask = DPLL_PORTD_READY_MASK; |
| dpll_reg = DPIO_PHY_STATUS; |
| break; |
| } |
| |
| if (intel_de_wait_for_register(dev_priv, dpll_reg, |
| port_mask, expected_mask, 1000)) |
| drm_WARN(&dev_priv->drm, 1, |
| "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n", |
| dig_port->base.base.base.id, dig_port->base.base.name, |
| intel_de_read(dev_priv, dpll_reg) & port_mask, |
| expected_mask); |
| } |
| |
| void intel_enable_transcoder(const struct intel_crtc_state *new_crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; |
| enum pipe pipe = crtc->pipe; |
| u32 val; |
| |
| drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe)); |
| |
| assert_planes_disabled(crtc); |
| |
| /* |
| * A pipe without a PLL won't actually be able to drive bits from |
| * a plane. On ILK+ the pipe PLLs are integrated, so we don't |
| * need the check. |
| */ |
| if (HAS_GMCH(dev_priv)) { |
| if (intel_crtc_has_type(new_crtc_state, INTEL_OUTPUT_DSI)) |
| assert_dsi_pll_enabled(dev_priv); |
| else |
| assert_pll_enabled(dev_priv, pipe); |
| } else { |
| if (new_crtc_state->has_pch_encoder) { |
| /* if driving the PCH, we need FDI enabled */ |
| assert_fdi_rx_pll_enabled(dev_priv, |
| intel_crtc_pch_transcoder(crtc)); |
| assert_fdi_tx_pll_enabled(dev_priv, |
| (enum pipe) cpu_transcoder); |
| } |
| /* FIXME: assert CPU port conditions for SNB+ */ |
| } |
| |
| /* Wa_22012358565:adl-p */ |
| if (DISPLAY_VER(dev_priv) == 13) |
| intel_de_rmw(dev_priv, PIPE_ARB_CTL(pipe), |
| 0, PIPE_ARB_USE_PROG_SLOTS); |
| |
| val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); |
| if (val & TRANSCONF_ENABLE) { |
| /* we keep both pipes enabled on 830 */ |
| drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv)); |
| return; |
| } |
| |
| intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), |
| val | TRANSCONF_ENABLE); |
| intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); |
| |
| /* |
| * Until the pipe starts PIPEDSL reads will return a stale value, |
| * which causes an apparent vblank timestamp jump when PIPEDSL |
| * resets to its proper value. That also messes up the frame count |
| * when it's derived from the timestamps. So let's wait for the |
| * pipe to start properly before we call drm_crtc_vblank_on() |
| */ |
| if (intel_crtc_max_vblank_count(new_crtc_state) == 0) |
| intel_wait_for_pipe_scanline_moving(crtc); |
| } |
| |
| void intel_disable_transcoder(const struct intel_crtc_state *old_crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; |
| enum pipe pipe = crtc->pipe; |
| u32 val; |
| |
| drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe)); |
| |
| /* |
| * Make sure planes won't keep trying to pump pixels to us, |
| * or we might hang the display. |
| */ |
| assert_planes_disabled(crtc); |
| |
| val = intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)); |
| if ((val & TRANSCONF_ENABLE) == 0) |
| return; |
| |
| /* |
| * Double wide has implications for planes |
| * so best keep it disabled when not needed. |
| */ |
| if (old_crtc_state->double_wide) |
| val &= ~TRANSCONF_DOUBLE_WIDE; |
| |
| /* Don't disable pipe or pipe PLLs if needed */ |
| if (!IS_I830(dev_priv)) |
| val &= ~TRANSCONF_ENABLE; |
| |
| intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); |
| |
| if (DISPLAY_VER(dev_priv) >= 12) |
| intel_de_rmw(dev_priv, hsw_chicken_trans_reg(dev_priv, cpu_transcoder), |
| FECSTALL_DIS_DPTSTREAM_DPTTG, 0); |
| |
| if ((val & TRANSCONF_ENABLE) == 0) |
| intel_wait_for_pipe_off(old_crtc_state); |
| } |
| |
| unsigned int intel_rotation_info_size(const struct intel_rotation_info *rot_info) |
| { |
| unsigned int size = 0; |
| int i; |
| |
| for (i = 0 ; i < ARRAY_SIZE(rot_info->plane); i++) |
| size += rot_info->plane[i].dst_stride * rot_info->plane[i].width; |
| |
| return size; |
| } |
| |
| unsigned int intel_remapped_info_size(const struct intel_remapped_info *rem_info) |
| { |
| unsigned int size = 0; |
| int i; |
| |
| for (i = 0 ; i < ARRAY_SIZE(rem_info->plane); i++) { |
| unsigned int plane_size; |
| |
| if (rem_info->plane[i].linear) |
| plane_size = rem_info->plane[i].size; |
| else |
| plane_size = rem_info->plane[i].dst_stride * rem_info->plane[i].height; |
| |
| if (plane_size == 0) |
| continue; |
| |
| if (rem_info->plane_alignment) |
| size = ALIGN(size, rem_info->plane_alignment); |
| |
| size += plane_size; |
| } |
| |
| return size; |
| } |
| |
| bool intel_plane_uses_fence(const struct intel_plane_state *plane_state) |
| { |
| struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); |
| struct drm_i915_private *dev_priv = to_i915(plane->base.dev); |
| |
| return DISPLAY_VER(dev_priv) < 4 || |
| (plane->fbc && |
| plane_state->view.gtt.type == I915_GTT_VIEW_NORMAL); |
| } |
| |
| /* |
| * Convert the x/y offsets into a linear offset. |
| * Only valid with 0/180 degree rotation, which is fine since linear |
| * offset is only used with linear buffers on pre-hsw and tiled buffers |
| * with gen2/3, and 90/270 degree rotations isn't supported on any of them. |
| */ |
| u32 intel_fb_xy_to_linear(int x, int y, |
| const struct intel_plane_state *state, |
| int color_plane) |
| { |
| const struct drm_framebuffer *fb = state->hw.fb; |
| unsigned int cpp = fb->format->cpp[color_plane]; |
| unsigned int pitch = state->view.color_plane[color_plane].mapping_stride; |
| |
| return y * pitch + x * cpp; |
| } |
| |
| /* |
| * Add the x/y offsets derived from fb->offsets[] to the user |
| * specified plane src x/y offsets. The resulting x/y offsets |
| * specify the start of scanout from the beginning of the gtt mapping. |
| */ |
| void intel_add_fb_offsets(int *x, int *y, |
| const struct intel_plane_state *state, |
| int color_plane) |
| |
| { |
| *x += state->view.color_plane[color_plane].x; |
| *y += state->view.color_plane[color_plane].y; |
| } |
| |
| u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv, |
| u32 pixel_format, u64 modifier) |
| { |
| struct intel_crtc *crtc; |
| struct intel_plane *plane; |
| |
| if (!HAS_DISPLAY(dev_priv)) |
| return 0; |
| |
| /* |
| * We assume the primary plane for pipe A has |
| * the highest stride limits of them all, |
| * if in case pipe A is disabled, use the first pipe from pipe_mask. |
| */ |
| crtc = intel_first_crtc(dev_priv); |
| if (!crtc) |
| return 0; |
| |
| plane = to_intel_plane(crtc->base.primary); |
| |
| return plane->max_stride(plane, pixel_format, modifier, |
| DRM_MODE_ROTATE_0); |
| } |
| |
| void intel_set_plane_visible(struct intel_crtc_state *crtc_state, |
| struct intel_plane_state *plane_state, |
| bool visible) |
| { |
| struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); |
| |
| plane_state->uapi.visible = visible; |
| |
| if (visible) |
| crtc_state->uapi.plane_mask |= drm_plane_mask(&plane->base); |
| else |
| crtc_state->uapi.plane_mask &= ~drm_plane_mask(&plane->base); |
| } |
| |
| void intel_plane_fixup_bitmasks(struct intel_crtc_state *crtc_state) |
| { |
| struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
| struct drm_plane *plane; |
| |
| /* |
| * Active_planes aliases if multiple "primary" or cursor planes |
| * have been used on the same (or wrong) pipe. plane_mask uses |
| * unique ids, hence we can use that to reconstruct active_planes. |
| */ |
| crtc_state->enabled_planes = 0; |
| crtc_state->active_planes = 0; |
| |
| drm_for_each_plane_mask(plane, &dev_priv->drm, |
| crtc_state->uapi.plane_mask) { |
| crtc_state->enabled_planes |= BIT(to_intel_plane(plane)->id); |
| crtc_state->active_planes |= BIT(to_intel_plane(plane)->id); |
| } |
| } |
| |
| void intel_plane_disable_noatomic(struct intel_crtc *crtc, |
| struct intel_plane *plane) |
| { |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| struct intel_crtc_state *crtc_state = |
| to_intel_crtc_state(crtc->base.state); |
| struct intel_plane_state *plane_state = |
| to_intel_plane_state(plane->base.state); |
| |
| drm_dbg_kms(&dev_priv->drm, |
| "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n", |
| plane->base.base.id, plane->base.name, |
| crtc->base.base.id, crtc->base.name); |
| |
| intel_set_plane_visible(crtc_state, plane_state, false); |
| intel_plane_fixup_bitmasks(crtc_state); |
| crtc_state->data_rate[plane->id] = 0; |
| crtc_state->data_rate_y[plane->id] = 0; |
| crtc_state->rel_data_rate[plane->id] = 0; |
| crtc_state->rel_data_rate_y[plane->id] = 0; |
| crtc_state->min_cdclk[plane->id] = 0; |
| |
| if ((crtc_state->active_planes & ~BIT(PLANE_CURSOR)) == 0 && |
| hsw_ips_disable(crtc_state)) { |
| crtc_state->ips_enabled = false; |
| intel_crtc_wait_for_next_vblank(crtc); |
| } |
| |
| /* |
| * Vblank time updates from the shadow to live plane control register |
| * are blocked if the memory self-refresh mode is active at that |
| * moment. So to make sure the plane gets truly disabled, disable |
| * first the self-refresh mode. The self-refresh enable bit in turn |
| * will be checked/applied by the HW only at the next frame start |
| * event which is after the vblank start event, so we need to have a |
| * wait-for-vblank between disabling the plane and the pipe. |
| */ |
| if (HAS_GMCH(dev_priv) && |
| intel_set_memory_cxsr(dev_priv, false)) |
| intel_crtc_wait_for_next_vblank(crtc); |
| |
| /* |
| * Gen2 reports pipe underruns whenever all planes are disabled. |
| * So disable underrun reporting before all the planes get disabled. |
| */ |
| if (DISPLAY_VER(dev_priv) == 2 && !crtc_state->active_planes) |
| intel_set_cpu_fifo_underrun_reporting(dev_priv, crtc->pipe, false); |
| |
| intel_plane_disable_arm(plane, crtc_state); |
| intel_crtc_wait_for_next_vblank(crtc); |
| } |
| |
| unsigned int |
| intel_plane_fence_y_offset(const struct intel_plane_state *plane_state) |
| { |
| int x = 0, y = 0; |
| |
| intel_plane_adjust_aligned_offset(&x, &y, plane_state, 0, |
| plane_state->view.color_plane[0].offset, 0); |
| |
| return y; |
| } |
| |
| static void icl_set_pipe_chicken(const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum pipe pipe = crtc->pipe; |
| u32 tmp; |
| |
| tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe)); |
| |
| /* |
| * Display WA #1153: icl |
| * enable hardware to bypass the alpha math |
| * and rounding for per-pixel values 00 and 0xff |
| */ |
| tmp |= PER_PIXEL_ALPHA_BYPASS_EN; |
| /* |
| * Display WA # 1605353570: icl |
| * Set the pixel rounding bit to 1 for allowing |
| * passthrough of Frame buffer pixels unmodified |
| * across pipe |
| */ |
| tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU; |
| |
| /* |
| * Underrun recovery must always be disabled on display 13+. |
| * DG2 chicken bit meaning is inverted compared to other platforms. |
| */ |
| if (IS_DG2(dev_priv)) |
| tmp &= ~UNDERRUN_RECOVERY_ENABLE_DG2; |
| else if (DISPLAY_VER(dev_priv) >= 13) |
| tmp |= UNDERRUN_RECOVERY_DISABLE_ADLP; |
| |
| /* Wa_14010547955:dg2 */ |
| if (IS_DG2(dev_priv)) |
| tmp |= DG2_RENDER_CCSTAG_4_3_EN; |
| |
| intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp); |
| } |
| |
| bool intel_has_pending_fb_unpin(struct drm_i915_private *dev_priv) |
| { |
| struct drm_crtc *crtc; |
| bool cleanup_done; |
| |
| drm_for_each_crtc(crtc, &dev_priv->drm) { |
| struct drm_crtc_commit *commit; |
| spin_lock(&crtc->commit_lock); |
| commit = list_first_entry_or_null(&crtc->commit_list, |
| struct drm_crtc_commit, commit_entry); |
| cleanup_done = commit ? |
| try_wait_for_completion(&commit->cleanup_done) : true; |
| spin_unlock(&crtc->commit_lock); |
| |
| if (cleanup_done) |
| continue; |
| |
| intel_crtc_wait_for_next_vblank(to_intel_crtc(crtc)); |
| |
| return true; |
| } |
| |
| return false; |
| } |
| |
| /* |
| * Finds the encoder associated with the given CRTC. This can only be |
| * used when we know that the CRTC isn't feeding multiple encoders! |
| */ |
| struct intel_encoder * |
| intel_get_crtc_new_encoder(const struct intel_atomic_state *state, |
| const struct intel_crtc_state *crtc_state) |
| { |
| const struct drm_connector_state *connector_state; |
| const struct drm_connector *connector; |
| struct intel_encoder *encoder = NULL; |
| struct intel_crtc *master_crtc; |
| int num_encoders = 0; |
| int i; |
| |
| master_crtc = intel_master_crtc(crtc_state); |
| |
| for_each_new_connector_in_state(&state->base, connector, connector_state, i) { |
| if (connector_state->crtc != &master_crtc->base) |
| continue; |
| |
| encoder = to_intel_encoder(connector_state->best_encoder); |
| num_encoders++; |
| } |
| |
| drm_WARN(state->base.dev, num_encoders != 1, |
| "%d encoders for pipe %c\n", |
| num_encoders, pipe_name(master_crtc->pipe)); |
| |
| return encoder; |
| } |
| |
| static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| const struct drm_rect *dst = &crtc_state->pch_pfit.dst; |
| enum pipe pipe = crtc->pipe; |
| int width = drm_rect_width(dst); |
| int height = drm_rect_height(dst); |
| int x = dst->x1; |
| int y = dst->y1; |
| |
| if (!crtc_state->pch_pfit.enabled) |
| return; |
| |
| /* Force use of hard-coded filter coefficients |
| * as some pre-programmed values are broken, |
| * e.g. x201. |
| */ |
| if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv)) |
| intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | |
| PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe)); |
| else |
| intel_de_write_fw(dev_priv, PF_CTL(pipe), PF_ENABLE | |
| PF_FILTER_MED_3x3); |
| intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), |
| PF_WIN_XPOS(x) | PF_WIN_YPOS(y)); |
| intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), |
| PF_WIN_XSIZE(width) | PF_WIN_YSIZE(height)); |
| } |
| |
| static void intel_crtc_dpms_overlay_disable(struct intel_crtc *crtc) |
| { |
| if (crtc->overlay) |
| (void) intel_overlay_switch_off(crtc->overlay); |
| |
| /* Let userspace switch the overlay on again. In most cases userspace |
| * has to recompute where to put it anyway. |
| */ |
| } |
| |
| static bool needs_nv12_wa(const struct intel_crtc_state *crtc_state) |
| { |
| struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
| |
| if (!crtc_state->nv12_planes) |
| return false; |
| |
| /* WA Display #0827: Gen9:all */ |
| if (DISPLAY_VER(dev_priv) == 9) |
| return true; |
| |
| return false; |
| } |
| |
| static bool needs_scalerclk_wa(const struct intel_crtc_state *crtc_state) |
| { |
| struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
| |
| /* Wa_2006604312:icl,ehl */ |
| if (crtc_state->scaler_state.scaler_users > 0 && DISPLAY_VER(dev_priv) == 11) |
| return true; |
| |
| return false; |
| } |
| |
| static bool needs_cursorclk_wa(const struct intel_crtc_state *crtc_state) |
| { |
| struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
| |
| /* Wa_1604331009:icl,jsl,ehl */ |
| if (is_hdr_mode(crtc_state) && |
| crtc_state->active_planes & BIT(PLANE_CURSOR) && |
| DISPLAY_VER(dev_priv) == 11) |
| return true; |
| |
| return false; |
| } |
| |
| static void intel_async_flip_vtd_wa(struct drm_i915_private *i915, |
| enum pipe pipe, bool enable) |
| { |
| if (DISPLAY_VER(i915) == 9) { |
| /* |
| * "Plane N strech max must be programmed to 11b (x1) |
| * when Async flips are enabled on that plane." |
| */ |
| intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), |
| SKL_PLANE1_STRETCH_MAX_MASK, |
| enable ? SKL_PLANE1_STRETCH_MAX_X1 : SKL_PLANE1_STRETCH_MAX_X8); |
| } else { |
| /* Also needed on HSW/BDW albeit undocumented */ |
| intel_de_rmw(i915, CHICKEN_PIPESL_1(pipe), |
| HSW_PRI_STRETCH_MAX_MASK, |
| enable ? HSW_PRI_STRETCH_MAX_X1 : HSW_PRI_STRETCH_MAX_X8); |
| } |
| } |
| |
| static bool needs_async_flip_vtd_wa(const struct intel_crtc_state *crtc_state) |
| { |
| struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); |
| |
| return crtc_state->uapi.async_flip && i915_vtd_active(i915) && |
| (DISPLAY_VER(i915) == 9 || IS_BROADWELL(i915) || IS_HASWELL(i915)); |
| } |
| |
| static void intel_encoders_audio_enable(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| const struct drm_connector_state *conn_state; |
| struct drm_connector *conn; |
| int i; |
| |
| for_each_new_connector_in_state(&state->base, conn, conn_state, i) { |
| struct intel_encoder *encoder = |
| to_intel_encoder(conn_state->best_encoder); |
| |
| if (conn_state->crtc != &crtc->base) |
| continue; |
| |
| if (encoder->audio_enable) |
| encoder->audio_enable(encoder, crtc_state, conn_state); |
| } |
| } |
| |
| static void intel_encoders_audio_disable(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *old_crtc_state = |
| intel_atomic_get_old_crtc_state(state, crtc); |
| const struct drm_connector_state *old_conn_state; |
| struct drm_connector *conn; |
| int i; |
| |
| for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { |
| struct intel_encoder *encoder = |
| to_intel_encoder(old_conn_state->best_encoder); |
| |
| if (old_conn_state->crtc != &crtc->base) |
| continue; |
| |
| if (encoder->audio_disable) |
| encoder->audio_disable(encoder, old_crtc_state, old_conn_state); |
| } |
| } |
| |
| #define is_enabling(feature, old_crtc_state, new_crtc_state) \ |
| ((!(old_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state)) && \ |
| (new_crtc_state)->feature) |
| #define is_disabling(feature, old_crtc_state, new_crtc_state) \ |
| ((old_crtc_state)->feature && \ |
| (!(new_crtc_state)->feature || intel_crtc_needs_modeset(new_crtc_state))) |
| |
| static bool planes_enabling(const struct intel_crtc_state *old_crtc_state, |
| const struct intel_crtc_state *new_crtc_state) |
| { |
| if (!new_crtc_state->hw.active) |
| return false; |
| |
| return is_enabling(active_planes, old_crtc_state, new_crtc_state); |
| } |
| |
| static bool planes_disabling(const struct intel_crtc_state *old_crtc_state, |
| const struct intel_crtc_state *new_crtc_state) |
| { |
| if (!old_crtc_state->hw.active) |
| return false; |
| |
| return is_disabling(active_planes, old_crtc_state, new_crtc_state); |
| } |
| |
| static bool vrr_params_changed(const struct intel_crtc_state *old_crtc_state, |
| const struct intel_crtc_state *new_crtc_state) |
| { |
| return old_crtc_state->vrr.flipline != new_crtc_state->vrr.flipline || |
| old_crtc_state->vrr.vmin != new_crtc_state->vrr.vmin || |
| old_crtc_state->vrr.vmax != new_crtc_state->vrr.vmax || |
| old_crtc_state->vrr.guardband != new_crtc_state->vrr.guardband || |
| old_crtc_state->vrr.pipeline_full != new_crtc_state->vrr.pipeline_full; |
| } |
| |
| static bool vrr_enabling(const struct intel_crtc_state *old_crtc_state, |
| const struct intel_crtc_state *new_crtc_state) |
| { |
| if (!new_crtc_state->hw.active) |
| return false; |
| |
| return is_enabling(vrr.enable, old_crtc_state, new_crtc_state) || |
| (new_crtc_state->vrr.enable && |
| (new_crtc_state->update_m_n || new_crtc_state->update_lrr || |
| vrr_params_changed(old_crtc_state, new_crtc_state))); |
| } |
| |
| static bool vrr_disabling(const struct intel_crtc_state *old_crtc_state, |
| const struct intel_crtc_state *new_crtc_state) |
| { |
| if (!old_crtc_state->hw.active) |
| return false; |
| |
| return is_disabling(vrr.enable, old_crtc_state, new_crtc_state) || |
| (old_crtc_state->vrr.enable && |
| (new_crtc_state->update_m_n || new_crtc_state->update_lrr || |
| vrr_params_changed(old_crtc_state, new_crtc_state))); |
| } |
| |
| static bool audio_enabling(const struct intel_crtc_state *old_crtc_state, |
| const struct intel_crtc_state *new_crtc_state) |
| { |
| if (!new_crtc_state->hw.active) |
| return false; |
| |
| return is_enabling(has_audio, old_crtc_state, new_crtc_state) || |
| (new_crtc_state->has_audio && |
| memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); |
| } |
| |
| static bool audio_disabling(const struct intel_crtc_state *old_crtc_state, |
| const struct intel_crtc_state *new_crtc_state) |
| { |
| if (!old_crtc_state->hw.active) |
| return false; |
| |
| return is_disabling(has_audio, old_crtc_state, new_crtc_state) || |
| (old_crtc_state->has_audio && |
| memcmp(old_crtc_state->eld, new_crtc_state->eld, MAX_ELD_BYTES) != 0); |
| } |
| |
| #undef is_disabling |
| #undef is_enabling |
| |
| static void intel_post_plane_update(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| struct drm_i915_private *dev_priv = to_i915(state->base.dev); |
| const struct intel_crtc_state *old_crtc_state = |
| intel_atomic_get_old_crtc_state(state, crtc); |
| const struct intel_crtc_state *new_crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| enum pipe pipe = crtc->pipe; |
| |
| intel_psr_post_plane_update(state, crtc); |
| |
| intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits); |
| |
| if (new_crtc_state->update_wm_post && new_crtc_state->hw.active) |
| intel_update_watermarks(dev_priv); |
| |
| intel_fbc_post_update(state, crtc); |
| |
| if (needs_async_flip_vtd_wa(old_crtc_state) && |
| !needs_async_flip_vtd_wa(new_crtc_state)) |
| intel_async_flip_vtd_wa(dev_priv, pipe, false); |
| |
| if (needs_nv12_wa(old_crtc_state) && |
| !needs_nv12_wa(new_crtc_state)) |
| skl_wa_827(dev_priv, pipe, false); |
| |
| if (needs_scalerclk_wa(old_crtc_state) && |
| !needs_scalerclk_wa(new_crtc_state)) |
| icl_wa_scalerclkgating(dev_priv, pipe, false); |
| |
| if (needs_cursorclk_wa(old_crtc_state) && |
| !needs_cursorclk_wa(new_crtc_state)) |
| icl_wa_cursorclkgating(dev_priv, pipe, false); |
| |
| if (intel_crtc_needs_color_update(new_crtc_state)) |
| intel_color_post_update(new_crtc_state); |
| |
| if (audio_enabling(old_crtc_state, new_crtc_state)) |
| intel_encoders_audio_enable(state, crtc); |
| } |
| |
| static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| u8 update_planes = crtc_state->update_planes; |
| const struct intel_plane_state __maybe_unused *plane_state; |
| struct intel_plane *plane; |
| int i; |
| |
| for_each_new_intel_plane_in_state(state, plane, plane_state, i) { |
| if (plane->pipe == crtc->pipe && |
| update_planes & BIT(plane->id)) |
| plane->enable_flip_done(plane); |
| } |
| } |
| |
| static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| u8 update_planes = crtc_state->update_planes; |
| const struct intel_plane_state __maybe_unused *plane_state; |
| struct intel_plane *plane; |
| int i; |
| |
| for_each_new_intel_plane_in_state(state, plane, plane_state, i) { |
| if (plane->pipe == crtc->pipe && |
| update_planes & BIT(plane->id)) |
| plane->disable_flip_done(plane); |
| } |
| } |
| |
| static void intel_crtc_async_flip_disable_wa(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *old_crtc_state = |
| intel_atomic_get_old_crtc_state(state, crtc); |
| const struct intel_crtc_state *new_crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| u8 disable_async_flip_planes = old_crtc_state->async_flip_planes & |
| ~new_crtc_state->async_flip_planes; |
| const struct intel_plane_state *old_plane_state; |
| struct intel_plane *plane; |
| bool need_vbl_wait = false; |
| int i; |
| |
| for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { |
| if (plane->need_async_flip_disable_wa && |
| plane->pipe == crtc->pipe && |
| disable_async_flip_planes & BIT(plane->id)) { |
| /* |
| * Apart from the async flip bit we want to |
| * preserve the old state for the plane. |
| */ |
| plane->async_flip(plane, old_crtc_state, |
| old_plane_state, false); |
| need_vbl_wait = true; |
| } |
| } |
| |
| if (need_vbl_wait) |
| intel_crtc_wait_for_next_vblank(crtc); |
| } |
| |
| static void intel_pre_plane_update(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| struct drm_i915_private *dev_priv = to_i915(state->base.dev); |
| const struct intel_crtc_state *old_crtc_state = |
| intel_atomic_get_old_crtc_state(state, crtc); |
| const struct intel_crtc_state *new_crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| enum pipe pipe = crtc->pipe; |
| |
| if (vrr_disabling(old_crtc_state, new_crtc_state)) { |
| intel_vrr_disable(old_crtc_state); |
| intel_crtc_update_active_timings(old_crtc_state, false); |
| } |
| |
| if (audio_disabling(old_crtc_state, new_crtc_state)) |
| intel_encoders_audio_disable(state, crtc); |
| |
| intel_drrs_deactivate(old_crtc_state); |
| |
| intel_psr_pre_plane_update(state, crtc); |
| |
| if (hsw_ips_pre_update(state, crtc)) |
| intel_crtc_wait_for_next_vblank(crtc); |
| |
| if (intel_fbc_pre_update(state, crtc)) |
| intel_crtc_wait_for_next_vblank(crtc); |
| |
| if (!needs_async_flip_vtd_wa(old_crtc_state) && |
| needs_async_flip_vtd_wa(new_crtc_state)) |
| intel_async_flip_vtd_wa(dev_priv, pipe, true); |
| |
| /* Display WA 827 */ |
| if (!needs_nv12_wa(old_crtc_state) && |
| needs_nv12_wa(new_crtc_state)) |
| skl_wa_827(dev_priv, pipe, true); |
| |
| /* Wa_2006604312:icl,ehl */ |
| if (!needs_scalerclk_wa(old_crtc_state) && |
| needs_scalerclk_wa(new_crtc_state)) |
| icl_wa_scalerclkgating(dev_priv, pipe, true); |
| |
| /* Wa_1604331009:icl,jsl,ehl */ |
| if (!needs_cursorclk_wa(old_crtc_state) && |
| needs_cursorclk_wa(new_crtc_state)) |
| icl_wa_cursorclkgating(dev_priv, pipe, true); |
| |
| /* |
| * Vblank time updates from the shadow to live plane control register |
| * are blocked if the memory self-refresh mode is active at that |
| * moment. So to make sure the plane gets truly disabled, disable |
| * first the self-refresh mode. The self-refresh enable bit in turn |
| * will be checked/applied by the HW only at the next frame start |
| * event which is after the vblank start event, so we need to have a |
| * wait-for-vblank between disabling the plane and the pipe. |
| */ |
| if (HAS_GMCH(dev_priv) && old_crtc_state->hw.active && |
| new_crtc_state->disable_cxsr && intel_set_memory_cxsr(dev_priv, false)) |
| intel_crtc_wait_for_next_vblank(crtc); |
| |
| /* |
| * IVB workaround: must disable low power watermarks for at least |
| * one frame before enabling scaling. LP watermarks can be re-enabled |
| * when scaling is disabled. |
| * |
| * WaCxSRDisabledForSpriteScaling:ivb |
| */ |
| if (old_crtc_state->hw.active && |
| new_crtc_state->disable_lp_wm && ilk_disable_lp_wm(dev_priv)) |
| intel_crtc_wait_for_next_vblank(crtc); |
| |
| /* |
| * If we're doing a modeset we don't need to do any |
| * pre-vblank watermark programming here. |
| */ |
| if (!intel_crtc_needs_modeset(new_crtc_state)) { |
| /* |
| * For platforms that support atomic watermarks, program the |
| * 'intermediate' watermarks immediately. On pre-gen9 platforms, these |
| * will be the intermediate values that are safe for both pre- and |
| * post- vblank; when vblank happens, the 'active' values will be set |
| * to the final 'target' values and we'll do this again to get the |
| * optimal watermarks. For gen9+ platforms, the values we program here |
| * will be the final target values which will get automatically latched |
| * at vblank time; no further programming will be necessary. |
| * |
| * If a platform hasn't been transitioned to atomic watermarks yet, |
| * we'll continue to update watermarks the old way, if flags tell |
| * us to. |
| */ |
| if (!intel_initial_watermarks(state, crtc)) |
| if (new_crtc_state->update_wm_pre) |
| intel_update_watermarks(dev_priv); |
| } |
| |
| /* |
| * Gen2 reports pipe underruns whenever all planes are disabled. |
| * So disable underrun reporting before all the planes get disabled. |
| * |
| * We do this after .initial_watermarks() so that we have a |
| * chance of catching underruns with the intermediate watermarks |
| * vs. the old plane configuration. |
| */ |
| if (DISPLAY_VER(dev_priv) == 2 && planes_disabling(old_crtc_state, new_crtc_state)) |
| intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); |
| |
| /* |
| * WA for platforms where async address update enable bit |
| * is double buffered and only latched at start of vblank. |
| */ |
| if (old_crtc_state->async_flip_planes & ~new_crtc_state->async_flip_planes) |
| intel_crtc_async_flip_disable_wa(state, crtc); |
| } |
| |
| static void intel_crtc_disable_planes(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| const struct intel_crtc_state *new_crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| unsigned int update_mask = new_crtc_state->update_planes; |
| const struct intel_plane_state *old_plane_state; |
| struct intel_plane *plane; |
| unsigned fb_bits = 0; |
| int i; |
| |
| intel_crtc_dpms_overlay_disable(crtc); |
| |
| for_each_old_intel_plane_in_state(state, plane, old_plane_state, i) { |
| if (crtc->pipe != plane->pipe || |
| !(update_mask & BIT(plane->id))) |
| continue; |
| |
| intel_plane_disable_arm(plane, new_crtc_state); |
| |
| if (old_plane_state->uapi.visible) |
| fb_bits |= plane->frontbuffer_bit; |
| } |
| |
| intel_frontbuffer_flip(dev_priv, fb_bits); |
| } |
| |
| static void intel_encoders_update_prepare(struct intel_atomic_state *state) |
| { |
| struct drm_i915_private *i915 = to_i915(state->base.dev); |
| struct intel_crtc_state *new_crtc_state, *old_crtc_state; |
| struct intel_crtc *crtc; |
| int i; |
| |
| /* |
| * Make sure the DPLL state is up-to-date for fastset TypeC ports after non-blocking commits. |
| * TODO: Update the DPLL state for all cases in the encoder->update_prepare() hook. |
| */ |
| if (i915->display.dpll.mgr) { |
| for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { |
| if (intel_crtc_needs_modeset(new_crtc_state)) |
| continue; |
| |
| new_crtc_state->shared_dpll = old_crtc_state->shared_dpll; |
| new_crtc_state->dpll_hw_state = old_crtc_state->dpll_hw_state; |
| } |
| } |
| } |
| |
| static void intel_encoders_pre_pll_enable(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| const struct drm_connector_state *conn_state; |
| struct drm_connector *conn; |
| int i; |
| |
| for_each_new_connector_in_state(&state->base, conn, conn_state, i) { |
| struct intel_encoder *encoder = |
| to_intel_encoder(conn_state->best_encoder); |
| |
| if (conn_state->crtc != &crtc->base) |
| continue; |
| |
| if (encoder->pre_pll_enable) |
| encoder->pre_pll_enable(state, encoder, |
| crtc_state, conn_state); |
| } |
| } |
| |
| static void intel_encoders_pre_enable(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| const struct drm_connector_state *conn_state; |
| struct drm_connector *conn; |
| int i; |
| |
| for_each_new_connector_in_state(&state->base, conn, conn_state, i) { |
| struct intel_encoder *encoder = |
| to_intel_encoder(conn_state->best_encoder); |
| |
| if (conn_state->crtc != &crtc->base) |
| continue; |
| |
| if (encoder->pre_enable) |
| encoder->pre_enable(state, encoder, |
| crtc_state, conn_state); |
| } |
| } |
| |
| static void intel_encoders_enable(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| const struct drm_connector_state *conn_state; |
| struct drm_connector *conn; |
| int i; |
| |
| for_each_new_connector_in_state(&state->base, conn, conn_state, i) { |
| struct intel_encoder *encoder = |
| to_intel_encoder(conn_state->best_encoder); |
| |
| if (conn_state->crtc != &crtc->base) |
| continue; |
| |
| if (encoder->enable) |
| encoder->enable(state, encoder, |
| crtc_state, conn_state); |
| intel_opregion_notify_encoder(encoder, true); |
| } |
| } |
| |
| static void intel_encoders_disable(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *old_crtc_state = |
| intel_atomic_get_old_crtc_state(state, crtc); |
| const struct drm_connector_state *old_conn_state; |
| struct drm_connector *conn; |
| int i; |
| |
| for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { |
| struct intel_encoder *encoder = |
| to_intel_encoder(old_conn_state->best_encoder); |
| |
| if (old_conn_state->crtc != &crtc->base) |
| continue; |
| |
| intel_opregion_notify_encoder(encoder, false); |
| if (encoder->disable) |
| encoder->disable(state, encoder, |
| old_crtc_state, old_conn_state); |
| } |
| } |
| |
| static void intel_encoders_post_disable(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *old_crtc_state = |
| intel_atomic_get_old_crtc_state(state, crtc); |
| const struct drm_connector_state *old_conn_state; |
| struct drm_connector *conn; |
| int i; |
| |
| for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { |
| struct intel_encoder *encoder = |
| to_intel_encoder(old_conn_state->best_encoder); |
| |
| if (old_conn_state->crtc != &crtc->base) |
| continue; |
| |
| if (encoder->post_disable) |
| encoder->post_disable(state, encoder, |
| old_crtc_state, old_conn_state); |
| } |
| } |
| |
| static void intel_encoders_post_pll_disable(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *old_crtc_state = |
| intel_atomic_get_old_crtc_state(state, crtc); |
| const struct drm_connector_state *old_conn_state; |
| struct drm_connector *conn; |
| int i; |
| |
| for_each_old_connector_in_state(&state->base, conn, old_conn_state, i) { |
| struct intel_encoder *encoder = |
| to_intel_encoder(old_conn_state->best_encoder); |
| |
| if (old_conn_state->crtc != &crtc->base) |
| continue; |
| |
| if (encoder->post_pll_disable) |
| encoder->post_pll_disable(state, encoder, |
| old_crtc_state, old_conn_state); |
| } |
| } |
| |
| static void intel_encoders_update_pipe(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| const struct drm_connector_state *conn_state; |
| struct drm_connector *conn; |
| int i; |
| |
| for_each_new_connector_in_state(&state->base, conn, conn_state, i) { |
| struct intel_encoder *encoder = |
| to_intel_encoder(conn_state->best_encoder); |
| |
| if (conn_state->crtc != &crtc->base) |
| continue; |
| |
| if (encoder->update_pipe) |
| encoder->update_pipe(state, encoder, |
| crtc_state, conn_state); |
| } |
| } |
| |
| static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct intel_plane *plane = to_intel_plane(crtc->base.primary); |
| |
| plane->disable_arm(plane, crtc_state); |
| } |
| |
| static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
| |
| if (crtc_state->has_pch_encoder) { |
| intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, |
| &crtc_state->fdi_m_n); |
| } else if (intel_crtc_has_dp_encoder(crtc_state)) { |
| intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, |
| &crtc_state->dp_m_n); |
| intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, |
| &crtc_state->dp_m2_n2); |
| } |
| |
| intel_set_transcoder_timings(crtc_state); |
| |
| ilk_set_pipeconf(crtc_state); |
| } |
| |
| static void ilk_crtc_enable(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *new_crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum pipe pipe = crtc->pipe; |
| |
| if (drm_WARN_ON(&dev_priv->drm, crtc->active)) |
| return; |
| |
| /* |
| * Sometimes spurious CPU pipe underruns happen during FDI |
| * training, at least with VGA+HDMI cloning. Suppress them. |
| * |
| * On ILK we get an occasional spurious CPU pipe underruns |
| * between eDP port A enable and vdd enable. Also PCH port |
| * enable seems to result in the occasional CPU pipe underrun. |
| * |
| * Spurious PCH underruns also occur during PCH enabling. |
| */ |
| intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); |
| intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); |
| |
| ilk_configure_cpu_transcoder(new_crtc_state); |
| |
| intel_set_pipe_src_size(new_crtc_state); |
| |
| crtc->active = true; |
| |
| intel_encoders_pre_enable(state, crtc); |
| |
| if (new_crtc_state->has_pch_encoder) { |
| ilk_pch_pre_enable(state, crtc); |
| } else { |
| assert_fdi_tx_disabled(dev_priv, pipe); |
| assert_fdi_rx_disabled(dev_priv, pipe); |
| } |
| |
| ilk_pfit_enable(new_crtc_state); |
| |
| /* |
| * On ILK+ LUT must be loaded before the pipe is running but with |
| * clocks enabled |
| */ |
| intel_color_load_luts(new_crtc_state); |
| intel_color_commit_noarm(new_crtc_state); |
| intel_color_commit_arm(new_crtc_state); |
| /* update DSPCNTR to configure gamma for pipe bottom color */ |
| intel_disable_primary_plane(new_crtc_state); |
| |
| intel_initial_watermarks(state, crtc); |
| intel_enable_transcoder(new_crtc_state); |
| |
| if (new_crtc_state->has_pch_encoder) |
| ilk_pch_enable(state, crtc); |
| |
| intel_crtc_vblank_on(new_crtc_state); |
| |
| intel_encoders_enable(state, crtc); |
| |
| if (HAS_PCH_CPT(dev_priv)) |
| intel_wait_for_pipe_scanline_moving(crtc); |
| |
| /* |
| * Must wait for vblank to avoid spurious PCH FIFO underruns. |
| * And a second vblank wait is needed at least on ILK with |
| * some interlaced HDMI modes. Let's do the double wait always |
| * in case there are more corner cases we don't know about. |
| */ |
| if (new_crtc_state->has_pch_encoder) { |
| intel_crtc_wait_for_next_vblank(crtc); |
| intel_crtc_wait_for_next_vblank(crtc); |
| } |
| intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); |
| intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); |
| } |
| |
| static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv, |
| enum pipe pipe, bool apply) |
| { |
| u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)); |
| u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS; |
| |
| if (apply) |
| val |= mask; |
| else |
| val &= ~mask; |
| |
| intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val); |
| } |
| |
| static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| |
| intel_de_write(dev_priv, WM_LINETIME(crtc->pipe), |
| HSW_LINETIME(crtc_state->linetime) | |
| HSW_IPS_LINETIME(crtc_state->ips_linetime)); |
| } |
| |
| static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct drm_i915_private *i915 = to_i915(crtc->base.dev); |
| |
| intel_de_rmw(i915, hsw_chicken_trans_reg(i915, crtc_state->cpu_transcoder), |
| HSW_FRAME_START_DELAY_MASK, |
| HSW_FRAME_START_DELAY(crtc_state->framestart_delay - 1)); |
| } |
| |
| static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, |
| const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *master_crtc = intel_master_crtc(crtc_state); |
| |
| /* |
| * Enable sequence steps 1-7 on bigjoiner master |
| */ |
| if (intel_crtc_is_bigjoiner_slave(crtc_state)) |
| intel_encoders_pre_pll_enable(state, master_crtc); |
| |
| if (crtc_state->shared_dpll) |
| intel_enable_shared_dpll(crtc_state); |
| |
| if (intel_crtc_is_bigjoiner_slave(crtc_state)) |
| intel_encoders_pre_enable(state, master_crtc); |
| } |
| |
| static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
| |
| if (crtc_state->has_pch_encoder) { |
| intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, |
| &crtc_state->fdi_m_n); |
| } else if (intel_crtc_has_dp_encoder(crtc_state)) { |
| intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, |
| &crtc_state->dp_m_n); |
| intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, |
| &crtc_state->dp_m2_n2); |
| } |
| |
| intel_set_transcoder_timings(crtc_state); |
| if (HAS_VRR(dev_priv)) |
| intel_vrr_set_transcoder_timings(crtc_state); |
| |
| if (cpu_transcoder != TRANSCODER_EDP) |
| intel_de_write(dev_priv, TRANS_MULT(cpu_transcoder), |
| crtc_state->pixel_multiplier - 1); |
| |
| hsw_set_frame_start_delay(crtc_state); |
| |
| hsw_set_transconf(crtc_state); |
| } |
| |
| static void hsw_crtc_enable(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *new_crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum pipe pipe = crtc->pipe, hsw_workaround_pipe; |
| enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; |
| bool psl_clkgate_wa; |
| |
| if (drm_WARN_ON(&dev_priv->drm, crtc->active)) |
| return; |
| |
| intel_dmc_enable_pipe(dev_priv, crtc->pipe); |
| |
| if (!new_crtc_state->bigjoiner_pipes) { |
| intel_encoders_pre_pll_enable(state, crtc); |
| |
| if (new_crtc_state->shared_dpll) |
| intel_enable_shared_dpll(new_crtc_state); |
| |
| intel_encoders_pre_enable(state, crtc); |
| } else { |
| icl_ddi_bigjoiner_pre_enable(state, new_crtc_state); |
| } |
| |
| intel_dsc_enable(new_crtc_state); |
| |
| if (DISPLAY_VER(dev_priv) >= 13) |
| intel_uncompressed_joiner_enable(new_crtc_state); |
| |
| intel_set_pipe_src_size(new_crtc_state); |
| if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) |
| bdw_set_pipe_misc(new_crtc_state); |
| |
| if (!intel_crtc_is_bigjoiner_slave(new_crtc_state) && |
| !transcoder_is_dsi(cpu_transcoder)) |
| hsw_configure_cpu_transcoder(new_crtc_state); |
| |
| crtc->active = true; |
| |
| /* Display WA #1180: WaDisableScalarClockGating: glk */ |
| psl_clkgate_wa = DISPLAY_VER(dev_priv) == 10 && |
| new_crtc_state->pch_pfit.enabled; |
| if (psl_clkgate_wa) |
| glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, true); |
| |
| if (DISPLAY_VER(dev_priv) >= 9) |
| skl_pfit_enable(new_crtc_state); |
| else |
| ilk_pfit_enable(new_crtc_state); |
| |
| /* |
| * On ILK+ LUT must be loaded before the pipe is running but with |
| * clocks enabled |
| */ |
| intel_color_load_luts(new_crtc_state); |
| intel_color_commit_noarm(new_crtc_state); |
| intel_color_commit_arm(new_crtc_state); |
| /* update DSPCNTR to configure gamma/csc for pipe bottom color */ |
| if (DISPLAY_VER(dev_priv) < 9) |
| intel_disable_primary_plane(new_crtc_state); |
| |
| hsw_set_linetime_wm(new_crtc_state); |
| |
| if (DISPLAY_VER(dev_priv) >= 11) |
| icl_set_pipe_chicken(new_crtc_state); |
| |
| intel_initial_watermarks(state, crtc); |
| |
| if (intel_crtc_is_bigjoiner_slave(new_crtc_state)) |
| intel_crtc_vblank_on(new_crtc_state); |
| |
| intel_encoders_enable(state, crtc); |
| |
| if (psl_clkgate_wa) { |
| intel_crtc_wait_for_next_vblank(crtc); |
| glk_pipe_scaler_clock_gating_wa(dev_priv, pipe, false); |
| } |
| |
| /* If we change the relative order between pipe/planes enabling, we need |
| * to change the workaround. */ |
| hsw_workaround_pipe = new_crtc_state->hsw_workaround_pipe; |
| if (IS_HASWELL(dev_priv) && hsw_workaround_pipe != INVALID_PIPE) { |
| struct intel_crtc *wa_crtc; |
| |
| wa_crtc = intel_crtc_for_pipe(dev_priv, hsw_workaround_pipe); |
| |
| intel_crtc_wait_for_next_vblank(wa_crtc); |
| intel_crtc_wait_for_next_vblank(wa_crtc); |
| } |
| } |
| |
| void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum pipe pipe = crtc->pipe; |
| |
| /* To avoid upsetting the power well on haswell only disable the pfit if |
| * it's in use. The hw state code will make sure we get this right. */ |
| if (!old_crtc_state->pch_pfit.enabled) |
| return; |
| |
| intel_de_write_fw(dev_priv, PF_CTL(pipe), 0); |
| intel_de_write_fw(dev_priv, PF_WIN_POS(pipe), 0); |
| intel_de_write_fw(dev_priv, PF_WIN_SZ(pipe), 0); |
| } |
| |
| static void ilk_crtc_disable(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *old_crtc_state = |
| intel_atomic_get_old_crtc_state(state, crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum pipe pipe = crtc->pipe; |
| |
| /* |
| * Sometimes spurious CPU pipe underruns happen when the |
| * pipe is already disabled, but FDI RX/TX is still enabled. |
| * Happens at least with VGA+HDMI cloning. Suppress them. |
| */ |
| intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); |
| intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); |
| |
| intel_encoders_disable(state, crtc); |
| |
| intel_crtc_vblank_off(old_crtc_state); |
| |
| intel_disable_transcoder(old_crtc_state); |
| |
| ilk_pfit_disable(old_crtc_state); |
| |
| if (old_crtc_state->has_pch_encoder) |
| ilk_pch_disable(state, crtc); |
| |
| intel_encoders_post_disable(state, crtc); |
| |
| if (old_crtc_state->has_pch_encoder) |
| ilk_pch_post_disable(state, crtc); |
| |
| intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); |
| intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, true); |
| |
| intel_disable_shared_dpll(old_crtc_state); |
| } |
| |
| static void hsw_crtc_disable(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *old_crtc_state = |
| intel_atomic_get_old_crtc_state(state, crtc); |
| struct drm_i915_private *i915 = to_i915(crtc->base.dev); |
| |
| /* |
| * FIXME collapse everything to one hook. |
| * Need care with mst->ddi interactions. |
| */ |
| if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { |
| intel_encoders_disable(state, crtc); |
| intel_encoders_post_disable(state, crtc); |
| } |
| |
| intel_disable_shared_dpll(old_crtc_state); |
| |
| if (!intel_crtc_is_bigjoiner_slave(old_crtc_state)) { |
| struct intel_crtc *slave_crtc; |
| |
| intel_encoders_post_pll_disable(state, crtc); |
| |
| intel_dmc_disable_pipe(i915, crtc->pipe); |
| |
| for_each_intel_crtc_in_pipe_mask(&i915->drm, slave_crtc, |
| intel_crtc_bigjoiner_slave_pipes(old_crtc_state)) |
| intel_dmc_disable_pipe(i915, slave_crtc->pipe); |
| } |
| } |
| |
| static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| |
| if (!crtc_state->gmch_pfit.control) |
| return; |
| |
| /* |
| * The panel fitter should only be adjusted whilst the pipe is disabled, |
| * according to register description and PRM. |
| */ |
| drm_WARN_ON(&dev_priv->drm, |
| intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE); |
| assert_transcoder_disabled(dev_priv, crtc_state->cpu_transcoder); |
| |
| intel_de_write(dev_priv, PFIT_PGM_RATIOS, |
| crtc_state->gmch_pfit.pgm_ratios); |
| intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control); |
| |
| /* Border color in case we don't scale up to the full screen. Black by |
| * default, change to something else for debugging. */ |
| intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0); |
| } |
| |
| bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy) |
| { |
| if (phy == PHY_NONE) |
| return false; |
| else if (IS_ALDERLAKE_S(dev_priv)) |
| return phy <= PHY_E; |
| else if (IS_DG1(dev_priv) || IS_ROCKETLAKE(dev_priv)) |
| return phy <= PHY_D; |
| else if (IS_JASPERLAKE(dev_priv) || IS_ELKHARTLAKE(dev_priv)) |
| return phy <= PHY_C; |
| else if (IS_ALDERLAKE_P(dev_priv) || IS_DISPLAY_VER(dev_priv, 11, 12)) |
| return phy <= PHY_B; |
| else |
| /* |
| * DG2 outputs labelled as "combo PHY" in the bspec use |
| * SNPS PHYs with completely different programming, |
| * hence we always return false here. |
| */ |
| return false; |
| } |
| |
| bool intel_phy_is_tc(struct drm_i915_private *dev_priv, enum phy phy) |
| { |
| /* |
| * DG2's "TC1", although TC-capable output, doesn't share the same flow |
| * as other platforms on the display engine side and rather rely on the |
| * SNPS PHY, that is programmed separately |
| */ |
| if (IS_DG2(dev_priv)) |
| return false; |
| |
| if (DISPLAY_VER(dev_priv) >= 13) |
| return phy >= PHY_F && phy <= PHY_I; |
| else if (IS_TIGERLAKE(dev_priv)) |
| return phy >= PHY_D && phy <= PHY_I; |
| else if (IS_ICELAKE(dev_priv)) |
| return phy >= PHY_C && phy <= PHY_F; |
| |
| return false; |
| } |
| |
| bool intel_phy_is_snps(struct drm_i915_private *dev_priv, enum phy phy) |
| { |
| /* |
| * For DG2, and for DG2 only, all four "combo" ports and the TC1 port |
| * (PHY E) use Synopsis PHYs. See intel_phy_is_tc(). |
| */ |
| return IS_DG2(dev_priv) && phy > PHY_NONE && phy <= PHY_E; |
| } |
| |
| enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port) |
| { |
| if (DISPLAY_VER(i915) >= 13 && port >= PORT_D_XELPD) |
| return PHY_D + port - PORT_D_XELPD; |
| else if (DISPLAY_VER(i915) >= 13 && port >= PORT_TC1) |
| return PHY_F + port - PORT_TC1; |
| else if (IS_ALDERLAKE_S(i915) && port >= PORT_TC1) |
| return PHY_B + port - PORT_TC1; |
| else if ((IS_DG1(i915) || IS_ROCKETLAKE(i915)) && port >= PORT_TC1) |
| return PHY_C + port - PORT_TC1; |
| else if ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) && |
| port == PORT_D) |
| return PHY_A; |
| |
| return PHY_A + port - PORT_A; |
| } |
| |
| enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv, enum port port) |
| { |
| if (!intel_phy_is_tc(dev_priv, intel_port_to_phy(dev_priv, port))) |
| return TC_PORT_NONE; |
| |
| if (DISPLAY_VER(dev_priv) >= 12) |
| return TC_PORT_1 + port - PORT_TC1; |
| else |
| return TC_PORT_1 + port - PORT_C; |
| } |
| |
| enum intel_display_power_domain |
| intel_aux_power_domain(struct intel_digital_port *dig_port) |
| { |
| struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); |
| |
| if (intel_tc_port_in_tbt_alt_mode(dig_port)) |
| return intel_display_power_tbt_aux_domain(i915, dig_port->aux_ch); |
| |
| return intel_display_power_legacy_aux_domain(i915, dig_port->aux_ch); |
| } |
| |
| static void get_crtc_power_domains(struct intel_crtc_state *crtc_state, |
| struct intel_power_domain_mask *mask) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
| struct drm_encoder *encoder; |
| enum pipe pipe = crtc->pipe; |
| |
| bitmap_zero(mask->bits, POWER_DOMAIN_NUM); |
| |
| if (!crtc_state->hw.active) |
| return; |
| |
| set_bit(POWER_DOMAIN_PIPE(pipe), mask->bits); |
| set_bit(POWER_DOMAIN_TRANSCODER(cpu_transcoder), mask->bits); |
| if (crtc_state->pch_pfit.enabled || |
| crtc_state->pch_pfit.force_thru) |
| set_bit(POWER_DOMAIN_PIPE_PANEL_FITTER(pipe), mask->bits); |
| |
| drm_for_each_encoder_mask(encoder, &dev_priv->drm, |
| crtc_state->uapi.encoder_mask) { |
| struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
| |
| set_bit(intel_encoder->power_domain, mask->bits); |
| } |
| |
| if (HAS_DDI(dev_priv) && crtc_state->has_audio) |
| set_bit(POWER_DOMAIN_AUDIO_MMIO, mask->bits); |
| |
| if (crtc_state->shared_dpll) |
| set_bit(POWER_DOMAIN_DISPLAY_CORE, mask->bits); |
| |
| if (crtc_state->dsc.compression_enable) |
| set_bit(intel_dsc_power_domain(crtc, cpu_transcoder), mask->bits); |
| } |
| |
| void intel_modeset_get_crtc_power_domains(struct intel_crtc_state *crtc_state, |
| struct intel_power_domain_mask *old_domains) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum intel_display_power_domain domain; |
| struct intel_power_domain_mask domains, new_domains; |
| |
| get_crtc_power_domains(crtc_state, &domains); |
| |
| bitmap_andnot(new_domains.bits, |
| domains.bits, |
| crtc->enabled_power_domains.mask.bits, |
| POWER_DOMAIN_NUM); |
| bitmap_andnot(old_domains->bits, |
| crtc->enabled_power_domains.mask.bits, |
| domains.bits, |
| POWER_DOMAIN_NUM); |
| |
| for_each_power_domain(domain, &new_domains) |
| intel_display_power_get_in_set(dev_priv, |
| &crtc->enabled_power_domains, |
| domain); |
| } |
| |
| void intel_modeset_put_crtc_power_domains(struct intel_crtc *crtc, |
| struct intel_power_domain_mask *domains) |
| { |
| intel_display_power_put_mask_in_set(to_i915(crtc->base.dev), |
| &crtc->enabled_power_domains, |
| domains); |
| } |
| |
| static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
| |
| if (intel_crtc_has_dp_encoder(crtc_state)) { |
| intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, |
| &crtc_state->dp_m_n); |
| intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, |
| &crtc_state->dp_m2_n2); |
| } |
| |
| intel_set_transcoder_timings(crtc_state); |
| |
| i9xx_set_pipeconf(crtc_state); |
| } |
| |
| static void valleyview_crtc_enable(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *new_crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum pipe pipe = crtc->pipe; |
| |
| if (drm_WARN_ON(&dev_priv->drm, crtc->active)) |
| return; |
| |
| i9xx_configure_cpu_transcoder(new_crtc_state); |
| |
| intel_set_pipe_src_size(new_crtc_state); |
| |
| intel_de_write(dev_priv, VLV_PIPE_MSA_MISC(pipe), 0); |
| |
| if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { |
| intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY); |
| intel_de_write(dev_priv, CHV_CANVAS(pipe), 0); |
| } |
| |
| crtc->active = true; |
| |
| intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); |
| |
| intel_encoders_pre_pll_enable(state, crtc); |
| |
| if (IS_CHERRYVIEW(dev_priv)) |
| chv_enable_pll(new_crtc_state); |
| else |
| vlv_enable_pll(new_crtc_state); |
| |
| intel_encoders_pre_enable(state, crtc); |
| |
| i9xx_pfit_enable(new_crtc_state); |
| |
| intel_color_load_luts(new_crtc_state); |
| intel_color_commit_noarm(new_crtc_state); |
| intel_color_commit_arm(new_crtc_state); |
| /* update DSPCNTR to configure gamma for pipe bottom color */ |
| intel_disable_primary_plane(new_crtc_state); |
| |
| intel_initial_watermarks(state, crtc); |
| intel_enable_transcoder(new_crtc_state); |
| |
| intel_crtc_vblank_on(new_crtc_state); |
| |
| intel_encoders_enable(state, crtc); |
| } |
| |
| static void i9xx_crtc_enable(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| const struct intel_crtc_state *new_crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum pipe pipe = crtc->pipe; |
| |
| if (drm_WARN_ON(&dev_priv->drm, crtc->active)) |
| return; |
| |
| i9xx_configure_cpu_transcoder(new_crtc_state); |
| |
| intel_set_pipe_src_size(new_crtc_state); |
| |
| crtc->active = true; |
| |
| if (DISPLAY_VER(dev_priv) != 2) |
| intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); |
| |
| intel_encoders_pre_enable(state, crtc); |
| |
| i9xx_enable_pll(new_crtc_state); |
| |
| i9xx_pfit_enable(new_crtc_state); |
| |
| intel_color_load_luts(new_crtc_state); |
| intel_color_commit_noarm(new_crtc_state); |
| intel_color_commit_arm(new_crtc_state); |
| /* update DSPCNTR to configure gamma for pipe bottom color */ |
| intel_disable_primary_plane(new_crtc_state); |
| |
| if (!intel_initial_watermarks(state, crtc)) |
| intel_update_watermarks(dev_priv); |
| intel_enable_transcoder(new_crtc_state); |
| |
| intel_crtc_vblank_on(new_crtc_state); |
| |
| intel_encoders_enable(state, crtc); |
| |
| /* prevents spurious underruns */ |
| if (DISPLAY_VER(dev_priv) == 2) |
| intel_crtc_wait_for_next_vblank(crtc); |
| } |
| |
| static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| |
| if (!old_crtc_state->gmch_pfit.control) |
| return; |
| |
| assert_transcoder_disabled(dev_priv, old_crtc_state->cpu_transcoder); |
| |
| drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n", |
| intel_de_read(dev_priv, PFIT_CONTROL)); |
| intel_de_write(dev_priv, PFIT_CONTROL, 0); |
| } |
| |
| static void i9xx_crtc_disable(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| struct intel_crtc_state *old_crtc_state = |
| intel_atomic_get_old_crtc_state(state, crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum pipe pipe = crtc->pipe; |
| |
| /* |
| * On gen2 planes are double buffered but the pipe isn't, so we must |
| * wait for planes to fully turn off before disabling the pipe. |
| */ |
| if (DISPLAY_VER(dev_priv) == 2) |
| intel_crtc_wait_for_next_vblank(crtc); |
| |
| intel_encoders_disable(state, crtc); |
| |
| intel_crtc_vblank_off(old_crtc_state); |
| |
| intel_disable_transcoder(old_crtc_state); |
| |
| i9xx_pfit_disable(old_crtc_state); |
| |
| intel_encoders_post_disable(state, crtc); |
| |
| if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DSI)) { |
| if (IS_CHERRYVIEW(dev_priv)) |
| chv_disable_pll(dev_priv, pipe); |
| else if (IS_VALLEYVIEW(dev_priv)) |
| vlv_disable_pll(dev_priv, pipe); |
| else |
| i9xx_disable_pll(old_crtc_state); |
| } |
| |
| intel_encoders_post_pll_disable(state, crtc); |
| |
| if (DISPLAY_VER(dev_priv) != 2) |
| intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); |
| |
| if (!dev_priv->display.funcs.wm->initial_watermarks) |
| intel_update_watermarks(dev_priv); |
| |
| /* clock the pipe down to 640x480@60 to potentially save power */ |
| if (IS_I830(dev_priv)) |
| i830_enable_pipe(dev_priv, pipe); |
| } |
| |
| void intel_encoder_destroy(struct drm_encoder *encoder) |
| { |
| struct intel_encoder *intel_encoder = to_intel_encoder(encoder); |
| |
| drm_encoder_cleanup(encoder); |
| kfree(intel_encoder); |
| } |
| |
| static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc) |
| { |
| const struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| |
| /* GDG double wide on either pipe, otherwise pipe A only */ |
| return DISPLAY_VER(dev_priv) < 4 && |
| (crtc->pipe == PIPE_A || IS_I915G(dev_priv)); |
| } |
| |
| static u32 ilk_pipe_pixel_rate(const struct intel_crtc_state *crtc_state) |
| { |
| u32 pixel_rate = crtc_state->hw.pipe_mode.crtc_clock; |
| struct drm_rect src; |
| |
| /* |
| * We only use IF-ID interlacing. If we ever use |
| * PF-ID we'll need to adjust the pixel_rate here. |
| */ |
| |
| if (!crtc_state->pch_pfit.enabled) |
| return pixel_rate; |
| |
| drm_rect_init(&src, 0, 0, |
| drm_rect_width(&crtc_state->pipe_src) << 16, |
| drm_rect_height(&crtc_state->pipe_src) << 16); |
| |
| return intel_adjusted_rate(&src, &crtc_state->pch_pfit.dst, |
| pixel_rate); |
| } |
| |
| static void intel_mode_from_crtc_timings(struct drm_display_mode *mode, |
| const struct drm_display_mode *timings) |
| { |
| mode->hdisplay = timings->crtc_hdisplay; |
| mode->htotal = timings->crtc_htotal; |
| mode->hsync_start = timings->crtc_hsync_start; |
| mode->hsync_end = timings->crtc_hsync_end; |
| |
| mode->vdisplay = timings->crtc_vdisplay; |
| mode->vtotal = timings->crtc_vtotal; |
| mode->vsync_start = timings->crtc_vsync_start; |
| mode->vsync_end = timings->crtc_vsync_end; |
| |
| mode->flags = timings->flags; |
| mode->type = DRM_MODE_TYPE_DRIVER; |
| |
| mode->clock = timings->crtc_clock; |
| |
| drm_mode_set_name(mode); |
| } |
| |
| static void intel_crtc_compute_pixel_rate(struct intel_crtc_state *crtc_state) |
| { |
| struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
| |
| if (HAS_GMCH(dev_priv)) |
| /* FIXME calculate proper pipe pixel rate for GMCH pfit */ |
| crtc_state->pixel_rate = |
| crtc_state->hw.pipe_mode.crtc_clock; |
| else |
| crtc_state->pixel_rate = |
| ilk_pipe_pixel_rate(crtc_state); |
| } |
| |
| static void intel_bigjoiner_adjust_timings(const struct intel_crtc_state *crtc_state, |
| struct drm_display_mode *mode) |
| { |
| int num_pipes = intel_bigjoiner_num_pipes(crtc_state); |
| |
| if (num_pipes < 2) |
| return; |
| |
| mode->crtc_clock /= num_pipes; |
| mode->crtc_hdisplay /= num_pipes; |
| mode->crtc_hblank_start /= num_pipes; |
| mode->crtc_hblank_end /= num_pipes; |
| mode->crtc_hsync_start /= num_pipes; |
| mode->crtc_hsync_end /= num_pipes; |
| mode->crtc_htotal /= num_pipes; |
| } |
| |
| static void intel_splitter_adjust_timings(const struct intel_crtc_state *crtc_state, |
| struct drm_display_mode *mode) |
| { |
| int overlap = crtc_state->splitter.pixel_overlap; |
| int n = crtc_state->splitter.link_count; |
| |
| if (!crtc_state->splitter.enable) |
| return; |
| |
| /* |
| * eDP MSO uses segment timings from EDID for transcoder |
| * timings, but full mode for everything else. |
| * |
| * h_full = (h_segment - pixel_overlap) * link_count |
| */ |
| mode->crtc_hdisplay = (mode->crtc_hdisplay - overlap) * n; |
| mode->crtc_hblank_start = (mode->crtc_hblank_start - overlap) * n; |
| mode->crtc_hblank_end = (mode->crtc_hblank_end - overlap) * n; |
| mode->crtc_hsync_start = (mode->crtc_hsync_start - overlap) * n; |
| mode->crtc_hsync_end = (mode->crtc_hsync_end - overlap) * n; |
| mode->crtc_htotal = (mode->crtc_htotal - overlap) * n; |
| mode->crtc_clock *= n; |
| } |
| |
| static void intel_crtc_readout_derived_state(struct intel_crtc_state *crtc_state) |
| { |
| struct drm_display_mode *mode = &crtc_state->hw.mode; |
| struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; |
| struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; |
| |
| /* |
| * Start with the adjusted_mode crtc timings, which |
| * have been filled with the transcoder timings. |
| */ |
| drm_mode_copy(pipe_mode, adjusted_mode); |
| |
| /* Expand MSO per-segment transcoder timings to full */ |
| intel_splitter_adjust_timings(crtc_state, pipe_mode); |
| |
| /* |
| * We want the full numbers in adjusted_mode normal timings, |
| * adjusted_mode crtc timings are left with the raw transcoder |
| * timings. |
| */ |
| intel_mode_from_crtc_timings(adjusted_mode, pipe_mode); |
| |
| /* Populate the "user" mode with full numbers */ |
| drm_mode_copy(mode, pipe_mode); |
| intel_mode_from_crtc_timings(mode, mode); |
| mode->hdisplay = drm_rect_width(&crtc_state->pipe_src) * |
| (intel_bigjoiner_num_pipes(crtc_state) ?: 1); |
| mode->vdisplay = drm_rect_height(&crtc_state->pipe_src); |
| |
| /* Derive per-pipe timings in case bigjoiner is used */ |
| intel_bigjoiner_adjust_timings(crtc_state, pipe_mode); |
| intel_mode_from_crtc_timings(pipe_mode, pipe_mode); |
| |
| intel_crtc_compute_pixel_rate(crtc_state); |
| } |
| |
| void intel_encoder_get_config(struct intel_encoder *encoder, |
| struct intel_crtc_state *crtc_state) |
| { |
| encoder->get_config(encoder, crtc_state); |
| |
| intel_crtc_readout_derived_state(crtc_state); |
| } |
| |
| static void intel_bigjoiner_compute_pipe_src(struct intel_crtc_state *crtc_state) |
| { |
| int num_pipes = intel_bigjoiner_num_pipes(crtc_state); |
| int width, height; |
| |
| if (num_pipes < 2) |
| return; |
| |
| width = drm_rect_width(&crtc_state->pipe_src); |
| height = drm_rect_height(&crtc_state->pipe_src); |
| |
| drm_rect_init(&crtc_state->pipe_src, 0, 0, |
| width / num_pipes, height); |
| } |
| |
| static int intel_crtc_compute_pipe_src(struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct drm_i915_private *i915 = to_i915(crtc->base.dev); |
| |
| intel_bigjoiner_compute_pipe_src(crtc_state); |
| |
| /* |
| * Pipe horizontal size must be even in: |
| * - DVO ganged mode |
| * - LVDS dual channel mode |
| * - Double wide pipe |
| */ |
| if (drm_rect_width(&crtc_state->pipe_src) & 1) { |
| if (crtc_state->double_wide) { |
| drm_dbg_kms(&i915->drm, |
| "[CRTC:%d:%s] Odd pipe source width not supported with double wide pipe\n", |
| crtc->base.base.id, crtc->base.name); |
| return -EINVAL; |
| } |
| |
| if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS) && |
| intel_is_dual_link_lvds(i915)) { |
| drm_dbg_kms(&i915->drm, |
| "[CRTC:%d:%s] Odd pipe source width not supported with dual link LVDS\n", |
| crtc->base.base.id, crtc->base.name); |
| return -EINVAL; |
| } |
| } |
| |
| return 0; |
| } |
| |
| static int intel_crtc_compute_pipe_mode(struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct drm_i915_private *i915 = to_i915(crtc->base.dev); |
| struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; |
| struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; |
| int clock_limit = i915->max_dotclk_freq; |
| |
| /* |
| * Start with the adjusted_mode crtc timings, which |
| * have been filled with the transcoder timings. |
| */ |
| drm_mode_copy(pipe_mode, adjusted_mode); |
| |
| /* Expand MSO per-segment transcoder timings to full */ |
| intel_splitter_adjust_timings(crtc_state, pipe_mode); |
| |
| /* Derive per-pipe timings in case bigjoiner is used */ |
| intel_bigjoiner_adjust_timings(crtc_state, pipe_mode); |
| intel_mode_from_crtc_timings(pipe_mode, pipe_mode); |
| |
| if (DISPLAY_VER(i915) < 4) { |
| clock_limit = i915->display.cdclk.max_cdclk_freq * 9 / 10; |
| |
| /* |
| * Enable double wide mode when the dot clock |
| * is > 90% of the (display) core speed. |
| */ |
| if (intel_crtc_supports_double_wide(crtc) && |
| pipe_mode->crtc_clock > clock_limit) { |
| clock_limit = i915->max_dotclk_freq; |
| crtc_state->double_wide = true; |
| } |
| } |
| |
| if (pipe_mode->crtc_clock > clock_limit) { |
| drm_dbg_kms(&i915->drm, |
| "[CRTC:%d:%s] requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n", |
| crtc->base.base.id, crtc->base.name, |
| pipe_mode->crtc_clock, clock_limit, |
| str_yes_no(crtc_state->double_wide)); |
| return -EINVAL; |
| } |
| |
| return 0; |
| } |
| |
| static int intel_crtc_compute_config(struct intel_atomic_state *state, |
| struct intel_crtc *crtc) |
| { |
| struct intel_crtc_state *crtc_state = |
| intel_atomic_get_new_crtc_state(state, crtc); |
| int ret; |
| |
| ret = intel_dpll_crtc_compute_clock(state, crtc); |
| if (ret) |
| return ret; |
| |
| ret = intel_crtc_compute_pipe_src(crtc_state); |
| if (ret) |
| return ret; |
| |
| ret = intel_crtc_compute_pipe_mode(crtc_state); |
| if (ret) |
| return ret; |
| |
| intel_crtc_compute_pixel_rate(crtc_state); |
| |
| if (crtc_state->has_pch_encoder) |
| return ilk_fdi_compute_config(crtc, crtc_state); |
| |
| return 0; |
| } |
| |
| static void |
| intel_reduce_m_n_ratio(u32 *num, u32 *den) |
| { |
| while (*num > DATA_LINK_M_N_MASK || |
| *den > DATA_LINK_M_N_MASK) { |
| *num >>= 1; |
| *den >>= 1; |
| } |
| } |
| |
| static void compute_m_n(u32 *ret_m, u32 *ret_n, |
| u32 m, u32 n, u32 constant_n) |
| { |
| if (constant_n) |
| *ret_n = constant_n; |
| else |
| *ret_n = min_t(unsigned int, roundup_pow_of_two(n), DATA_LINK_N_MAX); |
| |
| *ret_m = div_u64(mul_u32_u32(m, *ret_n), n); |
| intel_reduce_m_n_ratio(ret_m, ret_n); |
| } |
| |
| void |
| intel_link_compute_m_n(u16 bits_per_pixel_x16, int nlanes, |
| int pixel_clock, int link_clock, |
| int bw_overhead, |
| struct intel_link_m_n *m_n) |
| { |
| u32 link_symbol_clock = intel_dp_link_symbol_clock(link_clock); |
| u32 data_m = intel_dp_effective_data_rate(pixel_clock, bits_per_pixel_x16, |
| bw_overhead); |
| u32 data_n = drm_dp_max_dprx_data_rate(link_clock, nlanes); |
| |
| /* |
| * Windows/BIOS uses fixed M/N values always. Follow suit. |
| * |
| * Also several DP dongles in particular seem to be fussy |
| * about too large link M/N values. Presumably the 20bit |
| * value used by Windows/BIOS is acceptable to everyone. |
| */ |
| m_n->tu = 64; |
| compute_m_n(&m_n->data_m, &m_n->data_n, |
| data_m, data_n, |
| 0x8000000); |
| |
| compute_m_n(&m_n->link_m, &m_n->link_n, |
| pixel_clock, link_symbol_clock, |
| 0x80000); |
| } |
| |
| void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) |
| { |
| /* |
| * There may be no VBT; and if the BIOS enabled SSC we can |
| * just keep using it to avoid unnecessary flicker. Whereas if the |
| * BIOS isn't using it, don't assume it will work even if the VBT |
| * indicates as much. |
| */ |
| if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) { |
| bool bios_lvds_use_ssc = intel_de_read(dev_priv, |
| PCH_DREF_CONTROL) & |
| DREF_SSC1_ENABLE; |
| |
| if (dev_priv->display.vbt.lvds_use_ssc != bios_lvds_use_ssc) { |
| drm_dbg_kms(&dev_priv->drm, |
| "SSC %s by BIOS, overriding VBT which says %s\n", |
| str_enabled_disabled(bios_lvds_use_ssc), |
| str_enabled_disabled(dev_priv->display.vbt.lvds_use_ssc)); |
| dev_priv->display.vbt.lvds_use_ssc = bios_lvds_use_ssc; |
| } |
| } |
| } |
| |
| void intel_zero_m_n(struct intel_link_m_n *m_n) |
| { |
| /* corresponds to 0 register value */ |
| memset(m_n, 0, sizeof(*m_n)); |
| m_n->tu = 1; |
| } |
| |
| void intel_set_m_n(struct drm_i915_private *i915, |
| const struct intel_link_m_n *m_n, |
| i915_reg_t data_m_reg, i915_reg_t data_n_reg, |
| i915_reg_t link_m_reg, i915_reg_t link_n_reg) |
| { |
| intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); |
| intel_de_write(i915, data_n_reg, m_n->data_n); |
| intel_de_write(i915, link_m_reg, m_n->link_m); |
| /* |
| * On BDW+ writing LINK_N arms the double buffered update |
| * of all the M/N registers, so it must be written last. |
| */ |
| intel_de_write(i915, link_n_reg, m_n->link_n); |
| } |
| |
| bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv, |
| enum transcoder transcoder) |
| { |
| if (IS_HASWELL(dev_priv)) |
| return transcoder == TRANSCODER_EDP; |
| |
| return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv); |
| } |
| |
| void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, |
| enum transcoder transcoder, |
| const struct intel_link_m_n *m_n) |
| { |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum pipe pipe = crtc->pipe; |
| |
| if (DISPLAY_VER(dev_priv) >= 5) |
| intel_set_m_n(dev_priv, m_n, |
| PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), |
| PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); |
| else |
| intel_set_m_n(dev_priv, m_n, |
| PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), |
| PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); |
| } |
| |
| void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, |
| enum transcoder transcoder, |
| const struct intel_link_m_n *m_n) |
| { |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| |
| if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) |
| return; |
| |
| intel_set_m_n(dev_priv, m_n, |
| PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), |
| PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); |
| } |
| |
| static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum pipe pipe = crtc->pipe; |
| enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
| const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; |
| u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; |
| int vsyncshift = 0; |
| |
| /* We need to be careful not to changed the adjusted mode, for otherwise |
| * the hw state checker will get angry at the mismatch. */ |
| crtc_vdisplay = adjusted_mode->crtc_vdisplay; |
| crtc_vtotal = adjusted_mode->crtc_vtotal; |
| crtc_vblank_start = adjusted_mode->crtc_vblank_start; |
| crtc_vblank_end = adjusted_mode->crtc_vblank_end; |
| |
| if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { |
| /* the chip adds 2 halflines automatically */ |
| crtc_vtotal -= 1; |
| crtc_vblank_end -= 1; |
| |
| if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) |
| vsyncshift = (adjusted_mode->crtc_htotal - 1) / 2; |
| else |
| vsyncshift = adjusted_mode->crtc_hsync_start - |
| adjusted_mode->crtc_htotal / 2; |
| if (vsyncshift < 0) |
| vsyncshift += adjusted_mode->crtc_htotal; |
| } |
| |
| /* |
| * VBLANK_START no longer works on ADL+, instead we must use |
| * TRANS_SET_CONTEXT_LATENCY to configure the pipe vblank start. |
| */ |
| if (DISPLAY_VER(dev_priv) >= 13) { |
| intel_de_write(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder), |
| crtc_vblank_start - crtc_vdisplay); |
| |
| /* |
| * VBLANK_START not used by hw, just clear it |
| * to make it stand out in register dumps. |
| */ |
| crtc_vblank_start = 1; |
| } |
| |
| if (DISPLAY_VER(dev_priv) >= 4) |
| intel_de_write(dev_priv, TRANS_VSYNCSHIFT(cpu_transcoder), |
| vsyncshift); |
| |
| intel_de_write(dev_priv, TRANS_HTOTAL(cpu_transcoder), |
| HACTIVE(adjusted_mode->crtc_hdisplay - 1) | |
| HTOTAL(adjusted_mode->crtc_htotal - 1)); |
| intel_de_write(dev_priv, TRANS_HBLANK(cpu_transcoder), |
| HBLANK_START(adjusted_mode->crtc_hblank_start - 1) | |
| HBLANK_END(adjusted_mode->crtc_hblank_end - 1)); |
| intel_de_write(dev_priv, TRANS_HSYNC(cpu_transcoder), |
| HSYNC_START(adjusted_mode->crtc_hsync_start - 1) | |
| HSYNC_END(adjusted_mode->crtc_hsync_end - 1)); |
| |
| intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), |
| VACTIVE(crtc_vdisplay - 1) | |
| VTOTAL(crtc_vtotal - 1)); |
| intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), |
| VBLANK_START(crtc_vblank_start - 1) | |
| VBLANK_END(crtc_vblank_end - 1)); |
| intel_de_write(dev_priv, TRANS_VSYNC(cpu_transcoder), |
| VSYNC_START(adjusted_mode->crtc_vsync_start - 1) | |
| VSYNC_END(adjusted_mode->crtc_vsync_end - 1)); |
| |
| /* Workaround: when the EDP input selection is B, the VTOTAL_B must be |
| * programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is |
| * documented on the DDI_FUNC_CTL register description, EDP Input Select |
| * bits. */ |
| if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP && |
| (pipe == PIPE_B || pipe == PIPE_C)) |
| intel_de_write(dev_priv, TRANS_VTOTAL(pipe), |
| VACTIVE(crtc_vdisplay - 1) | |
| VTOTAL(crtc_vtotal - 1)); |
| } |
| |
| static void intel_set_transcoder_timings_lrr(const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
| const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; |
| u32 crtc_vdisplay, crtc_vtotal, crtc_vblank_start, crtc_vblank_end; |
| |
| crtc_vdisplay = adjusted_mode->crtc_vdisplay; |
| crtc_vtotal = adjusted_mode->crtc_vtotal; |
| crtc_vblank_start = adjusted_mode->crtc_vblank_start; |
| crtc_vblank_end = adjusted_mode->crtc_vblank_end; |
| |
| drm_WARN_ON(&dev_priv->drm, adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE); |
| |
| /* |
| * The hardware actually ignores TRANS_VBLANK.VBLANK_END in DP mode. |
| * But let's write it anyway to keep the state checker happy. |
| */ |
| intel_de_write(dev_priv, TRANS_VBLANK(cpu_transcoder), |
| VBLANK_START(crtc_vblank_start - 1) | |
| VBLANK_END(crtc_vblank_end - 1)); |
| /* |
| * The double buffer latch point for TRANS_VTOTAL |
| * is the transcoder's undelayed vblank. |
| */ |
| intel_de_write(dev_priv, TRANS_VTOTAL(cpu_transcoder), |
| VACTIVE(crtc_vdisplay - 1) | |
| VTOTAL(crtc_vtotal - 1)); |
| } |
| |
| static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| int width = drm_rect_width(&crtc_state->pipe_src); |
| int height = drm_rect_height(&crtc_state->pipe_src); |
| enum pipe pipe = crtc->pipe; |
| |
| /* pipesrc controls the size that is scaled from, which should |
| * always be the user's requested size. |
| */ |
| intel_de_write(dev_priv, PIPESRC(pipe), |
| PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); |
| |
| if (!crtc_state->enable_psr2_su_region_et) |
| return; |
| |
| width = drm_rect_width(&crtc_state->psr2_su_area); |
| height = drm_rect_height(&crtc_state->psr2_su_area); |
| |
| intel_de_write(dev_priv, PIPE_SRCSZ_ERLY_TPT(pipe), |
| PIPESRC_WIDTH(width - 1) | PIPESRC_HEIGHT(height - 1)); |
| } |
| |
| static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) |
| { |
| struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); |
| enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
| |
| if (DISPLAY_VER(dev_priv) == 2) |
| return false; |
| |
| if (DISPLAY_VER(dev_priv) >= 9 || |
| IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) |
| return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK_HSW; |
| else |
| return intel_de_read(dev_priv, TRANSCONF(cpu_transcoder)) & TRANSCONF_INTERLACE_MASK; |
| } |
| |
| static void intel_get_transcoder_timings(struct intel_crtc *crtc, |
| struct intel_crtc_state *pipe_config) |
| { |
| struct drm_device *dev = crtc->base.dev; |
| struct drm_i915_private *dev_priv = to_i915(dev); |
| enum transcoder cpu_transcoder = pipe_config->cpu_transcoder; |
| struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; |
| u32 tmp; |
| |
| tmp = intel_de_read(dev_priv, TRANS_HTOTAL(cpu_transcoder)); |
| adjusted_mode->crtc_hdisplay = REG_FIELD_GET(HACTIVE_MASK, tmp) + 1; |
| adjusted_mode->crtc_htotal = REG_FIELD_GET(HTOTAL_MASK, tmp) + 1; |
| |
| if (!transcoder_is_dsi(cpu_transcoder)) { |
| tmp = intel_de_read(dev_priv, TRANS_HBLANK(cpu_transcoder)); |
| adjusted_mode->crtc_hblank_start = REG_FIELD_GET(HBLANK_START_MASK, tmp) + 1; |
| adjusted_mode->crtc_hblank_end = REG_FIELD_GET(HBLANK_END_MASK, tmp) + 1; |
| } |
| |
| tmp = intel_de_read(dev_priv, TRANS_HSYNC(cpu_transcoder)); |
| adjusted_mode->crtc_hsync_start = REG_FIELD_GET(HSYNC_START_MASK, tmp) + 1; |
| adjusted_mode->crtc_hsync_end = REG_FIELD_GET(HSYNC_END_MASK, tmp) + 1; |
| |
| tmp = intel_de_read(dev_priv, TRANS_VTOTAL(cpu_transcoder)); |
| adjusted_mode->crtc_vdisplay = REG_FIELD_GET(VACTIVE_MASK, tmp) + 1; |
| adjusted_mode->crtc_vtotal = REG_FIELD_GET(VTOTAL_MASK, tmp) + 1; |
| |
| /* FIXME TGL+ DSI transcoders have this! */ |
| if (!transcoder_is_dsi(cpu_transcoder)) { |
| tmp = intel_de_read(dev_priv, TRANS_VBLANK(cpu_transcoder)); |
| adjusted_mode->crtc_vblank_start = REG_FIELD_GET(VBLANK_START_MASK, tmp) + 1; |
| adjusted_mode->crtc_vblank_end = REG_FIELD_GET(VBLANK_END_MASK, tmp) + 1; |
| } |
| tmp = intel_de_read(dev_priv, TRANS_VSYNC(cpu_transcoder)); |
| adjusted_mode->crtc_vsync_start = REG_FIELD_GET(VSYNC_START_MASK, tmp) + 1; |
| adjusted_mode->crtc_vsync_end = REG_FIELD_GET(VSYNC_END_MASK, tmp) + 1; |
| |
| if (intel_pipe_is_interlaced(pipe_config)) { |
| adjusted_mode->flags |= DRM_MODE_FLAG_INTERLACE; |
| adjusted_mode->crtc_vtotal += 1; |
| adjusted_mode->crtc_vblank_end += 1; |
| } |
| |
| if (DISPLAY_VER(dev_priv) >= 13 && !transcoder_is_dsi(cpu_transcoder)) |
| adjusted_mode->crtc_vblank_start = |
| adjusted_mode->crtc_vdisplay + |
| intel_de_read(dev_priv, TRANS_SET_CONTEXT_LATENCY(cpu_transcoder)); |
| } |
| |
| static void intel_bigjoiner_adjust_pipe_src(struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| int num_pipes = intel_bigjoiner_num_pipes(crtc_state); |
| enum pipe master_pipe, pipe = crtc->pipe; |
| int width; |
| |
| if (num_pipes < 2) |
| return; |
| |
| master_pipe = bigjoiner_master_pipe(crtc_state); |
| width = drm_rect_width(&crtc_state->pipe_src); |
| |
| drm_rect_translate_to(&crtc_state->pipe_src, |
| (pipe - master_pipe) * width, 0); |
| } |
| |
| static void intel_get_pipe_src_size(struct intel_crtc *crtc, |
| struct intel_crtc_state *pipe_config) |
| { |
| struct drm_device *dev = crtc->base.dev; |
| struct drm_i915_private *dev_priv = to_i915(dev); |
| u32 tmp; |
| |
| tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe)); |
| |
| drm_rect_init(&pipe_config->pipe_src, 0, 0, |
| REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1, |
| REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1); |
| |
| intel_bigjoiner_adjust_pipe_src(pipe_config); |
| } |
| |
| void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; |
| u32 val = 0; |
| |
| /* |
| * - We keep both pipes enabled on 830 |
| * - During modeset the pipe is still disabled and must remain so |
| * - During fastset the pipe is already enabled and must remain so |
| */ |
| if (IS_I830(dev_priv) || !intel_crtc_needs_modeset(crtc_state)) |
| val |= TRANSCONF_ENABLE; |
| |
| if (crtc_state->double_wide) |
| val |= TRANSCONF_DOUBLE_WIDE; |
| |
| /* only g4x and later have fancy bpc/dither controls */ |
| if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || |
| IS_CHERRYVIEW(dev_priv)) { |
| /* Bspec claims that we can't use dithering for 30bpp pipes. */ |
| if (crtc_state->dither && crtc_state->pipe_bpp != 30) |
| val |= TRANSCONF_DITHER_EN | |
| TRANSCONF_DITHER_TYPE_SP; |
| |
| switch (crtc_state->pipe_bpp) { |
| default: |
| /* Case prevented by intel_choose_pipe_bpp_dither. */ |
| MISSING_CASE(crtc_state->pipe_bpp); |
| fallthrough; |
| case 18: |
| val |= TRANSCONF_BPC_6; |
| break; |
| case 24: |
| val |= TRANSCONF_BPC_8; |
| break; |
| case 30: |
| val |= TRANSCONF_BPC_10; |
| break; |
| } |
| } |
| |
| if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) { |
| if (DISPLAY_VER(dev_priv) < 4 || |
| intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) |
| val |= TRANSCONF_INTERLACE_W_FIELD_INDICATION; |
| else |
| val |= TRANSCONF_INTERLACE_W_SYNC_SHIFT; |
| } else { |
| val |= TRANSCONF_INTERLACE_PROGRESSIVE; |
| } |
| |
| if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && |
| crtc_state->limited_color_range) |
| val |= TRANSCONF_COLOR_RANGE_SELECT; |
| |
| val |= TRANSCONF_GAMMA_MODE(crtc_state->gamma_mode); |
| |
| if (crtc_state->wgc_enable) |
| val |= TRANSCONF_WGC_ENABLE; |
| |
| val |= TRANSCONF_FRAME_START_DELAY(crtc_state->framestart_delay - 1); |
| |
| intel_de_write(dev_priv, TRANSCONF(cpu_transcoder), val); |
| intel_de_posting_read(dev_priv, TRANSCONF(cpu_transcoder)); |
| } |
| |
| static bool i9xx_has_pfit(struct drm_i915_private *dev_priv) |
| { |
| if (IS_I830(dev_priv)) |
| return false; |
| |
| return DISPLAY_VER(dev_priv) >= 4 || |
| IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv); |
| } |
| |
| static void i9xx_get_pfit_config(struct intel_crtc_state *crtc_state) |
| { |
| struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum pipe pipe; |
| u32 tmp; |
| |
| if (!i9xx_has_pfit(dev_priv)) |
| return; |
| |
| tmp = intel_de_read(dev_priv, PFIT_CONTROL); |
| if (!(tmp & PFIT_ENABLE)) |
| return; |
| |
| /* Check whether the pfit is attached to our pipe. */ |
| if (DISPLAY_VER(dev_priv) >= 4) |
| pipe = REG_FIELD_GET(PFIT_PIPE_MASK, tmp); |
| else |
| pipe = PIPE_B; |
| |
| if (pipe != crtc->pipe) |
| return; |
| |
| crtc_state->gmch_pfit.control = tmp; |
| crtc_state->gmch_pfit.pgm_ratios = |
| intel_de_read(dev_priv, PFIT_PGM_RATIOS); |
| } |
| |
| static enum intel_output_format |
| bdw_get_pipe_misc_output_format(struct intel_crtc *crtc) |
| { |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| u32 tmp; |
| |
| tmp = intel_de_read(dev_priv, PIPE_MISC(crtc->pipe)); |
| |
| if (tmp & PIPE_MISC_YUV420_ENABLE) { |
| /* We support 4:2:0 in full blend mode only */ |
| drm_WARN_ON(&dev_priv->drm, |
| (tmp & PIPE_MISC_YUV420_MODE_FULL_BLEND) == 0); |
| |
| return INTEL_OUTPUT_FORMAT_YCBCR420; |
| } else if (tmp & PIPE_MISC_OUTPUT_COLORSPACE_YUV) { |
| return INTEL_OUTPUT_FORMAT_YCBCR444; |
| } else { |
| return INTEL_OUTPUT_FORMAT_RGB; |
| } |
| } |
| |
| static bool i9xx_get_pipe_config(struct intel_crtc *crtc, |
| struct intel_crtc_state *pipe_config) |
| { |
| struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); |
| enum intel_display_power_domain power_domain; |
| intel_wakeref_t wakeref; |
| u32 tmp; |
| bool ret; |
| |
| power_domain = POWER_DOMAIN_PIPE(crtc->pipe); |
| wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain); |
| if (!wakeref) |
| return false; |
| |
| pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; |
| pipe_config->sink_format = pipe_config->output_format; |
| pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe; |
| pipe_config->shared_dpll = NULL; |
| |
| ret = false; |
| |
| tmp = intel_de_read(dev_priv, TRANSCONF(pipe_config->cpu_transcoder)); |
| if (!(tmp & TRANSCONF_ENABLE)) |
| goto out; |
| |
| if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || |
| IS_CHERRYVIEW(dev_priv)) { |
| switch (tmp & TRANSCONF_BPC_MASK) { |
| case TRANSCONF_BPC_6: |
| pipe_config->pipe_bpp = 18; |
| break; |
| case TRANSCONF_BPC_8: |
| pipe_config->pipe_bpp = 24; |
| break; |
| case TRANSCONF_BPC_10: |
| pipe_config->pipe_bpp = 30; |
| break; |
| default: |
| MISSING_CASE(tmp); |
| break; |
| } |
| } |
| |
| if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && |
| (tmp & TRANSCONF_COLOR_RANGE_SELECT)) |
| pipe_config->limited_color_range = true; |
| |
| pipe_config->gamma_mode = REG_FIELD_GET(TRANSCONF_GAMMA_MODE_MASK_I9XX, tmp); |
| |
| pipe_config->framestart_delay = REG_FIELD_GET(TRANSCONF_FRAME_START_DELAY_MASK, tmp) + 1; |
| |
| if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && |
| (tmp & TRANSCONF_WGC_ENABLE)) |
| pipe_config->wgc_enable = true; |
| |
| intel_color_get_config(pipe_config); |
| |
| if (DISPLAY_VER(dev_priv) < 4) |
| pipe_config->double_wide = tmp & TRANSCONF_DOUBLE_WIDE; |
| |
| intel_get_transcoder_timings(crtc, pipe_config); |
| intel_get_pipe_src_size(crtc, pipe_config); |
| |
| i9xx_get_pfit_config(pipe_config); |
| |
| if (DISPLAY_VER(dev_priv) >= 4) { |
| /* No way to read it out on pipes B and C */ |
| if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A) |
| tmp = dev_priv->display.state.chv_dpll_md[crtc->pipe]; |
| else |
| tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe)); |
| pipe_config->pixel_multiplier = |
| ((tmp & DPLL_MD_UDI_MULTIPLIER_MASK) |
| >> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1; |
| pipe_config->dpll_hw_state.dpll_md = tmp; |
| } else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) || |
| IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) { |
| tmp = intel_de_read(dev_priv, DPLL(crtc->pipe)); |
| pipe_config->pixel_multiplier = |
| ((tmp & SDVO_MULTIPLIER_MASK) |
| >> SDVO_MULTIPLIER_SHIFT_HIRES) + 1; |
| } else { |
| /* Note that on i915G/GM the pixel multiplier is in the sdvo |
| * port and will be fixed up in the encoder->get_config |
| * function. */ |
| pipe_config->pixel_multiplier = 1; |
| } |
| pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv, |
| DPLL(crtc->pipe)); |
| if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) { |
| pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv, |
| FP0(crtc->pipe)); |
| pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv, |
| FP1( |