diff mbox

[v3,5/9] drm/i915: Add HDCP framework + base implementation

Message ID 20171205051513.8603-6-seanpaul@chromium.org (mailing list archive)
State New, archived
Headers show

Commit Message

Sean Paul Dec. 5, 2017, 5:15 a.m. UTC
This patch adds the framework required to add HDCP support to intel
connectors. It implements Aksv loading from fuse, and parts 1/2/3
of the HDCP authentication scheme.

Note that without shim implementations, this does not actually implement
HDCP. That will come in subsequent patches.

Changes in v2:
- Don't open code wait_fors (Chris)
- drm_hdcp.c under MIT license (Daniel)
- Move intel_hdcp_disable() call above ddi_disable (Ram)
- Fix // comments (I wore a cone of shame for 12 hours to atone) (Daniel)
- Justify intel_hdcp_shim with comments (Daniel)
- Fixed async locking issues by adding hdcp_mutex (Daniel)
- Don't alter connector_state in enable/disable (Daniel)
Changes in v3:
- Added hdcp_mutex/hdcp_value to make async reasonable
- Added hdcp_prop_work to separate link checking & property setting
- Added new helper for atomic_check state tracking (Daniel)
- Moved enable/disable into atomic_commit with matching helpers
- Moved intel_hdcp_check_link out of all locks when called from dp
- Bumped up ksv_fifo timeout (noticed failure on one of my dongles)

Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel.vetter@intel.com>
Cc: Ramalingam C <ramalingam.c@intel.com>
Signed-off-by: Sean Paul <seanpaul@chromium.org>
---
 drivers/gpu/drm/i915/Makefile        |   1 +
 drivers/gpu/drm/i915/i915_reg.h      |  83 ++++
 drivers/gpu/drm/i915/intel_atomic.c  |   2 +
 drivers/gpu/drm/i915/intel_display.c |  14 +
 drivers/gpu/drm/i915/intel_drv.h     |  88 +++++
 drivers/gpu/drm/i915/intel_hdcp.c    | 731 +++++++++++++++++++++++++++++++++++
 6 files changed, 919 insertions(+)
 create mode 100644 drivers/gpu/drm/i915/intel_hdcp.c

Comments

Ramalingam C Dec. 5, 2017, 9:06 a.m. UTC | #1
On Tuesday 05 December 2017 10:45 AM, Sean Paul wrote:
> This patch adds the framework required to add HDCP support to intel
> connectors. It implements Aksv loading from fuse, and parts 1/2/3
> of the HDCP authentication scheme.
>
> Note that without shim implementations, this does not actually implement
> HDCP. That will come in subsequent patches.
>
> Changes in v2:
> - Don't open code wait_fors (Chris)
> - drm_hdcp.c under MIT license (Daniel)
> - Move intel_hdcp_disable() call above ddi_disable (Ram)
> - Fix // comments (I wore a cone of shame for 12 hours to atone) (Daniel)
> - Justify intel_hdcp_shim with comments (Daniel)
> - Fixed async locking issues by adding hdcp_mutex (Daniel)
> - Don't alter connector_state in enable/disable (Daniel)
> Changes in v3:
> - Added hdcp_mutex/hdcp_value to make async reasonable
> - Added hdcp_prop_work to separate link checking & property setting
> - Added new helper for atomic_check state tracking (Daniel)
> - Moved enable/disable into atomic_commit with matching helpers
> - Moved intel_hdcp_check_link out of all locks when called from dp
> - Bumped up ksv_fifo timeout (noticed failure on one of my dongles)
>
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Daniel Vetter <daniel.vetter@intel.com>
> Cc: Ramalingam C <ramalingam.c@intel.com>
> Signed-off-by: Sean Paul <seanpaul@chromium.org>
> ---
>   drivers/gpu/drm/i915/Makefile        |   1 +
>   drivers/gpu/drm/i915/i915_reg.h      |  83 ++++
>   drivers/gpu/drm/i915/intel_atomic.c  |   2 +
>   drivers/gpu/drm/i915/intel_display.c |  14 +
>   drivers/gpu/drm/i915/intel_drv.h     |  88 +++++
>   drivers/gpu/drm/i915/intel_hdcp.c    | 731 +++++++++++++++++++++++++++++++++++
>   6 files changed, 919 insertions(+)
>   create mode 100644 drivers/gpu/drm/i915/intel_hdcp.c
>
> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
> index 42bc8bd4ff06..3facea4eefdb 100644
> --- a/drivers/gpu/drm/i915/Makefile
> +++ b/drivers/gpu/drm/i915/Makefile
> @@ -107,6 +107,7 @@ i915-y += intel_audio.o \
>   	  intel_fbc.o \
>   	  intel_fifo_underrun.o \
>   	  intel_frontbuffer.o \
> +	  intel_hdcp.o \
>   	  intel_hotplug.o \
>   	  intel_modes.o \
>   	  intel_overlay.o \
> diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
> index 09bf043c1c2e..2bd2cc8441d4 100644
> --- a/drivers/gpu/drm/i915/i915_reg.h
> +++ b/drivers/gpu/drm/i915/i915_reg.h
> @@ -8034,6 +8034,7 @@ enum {
>   #define     GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT	8
>   #define     GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT	16
>   #define     GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT	24
> +#define   SKL_PCODE_LOAD_HDCP_KEYS		0x5
>   #define   SKL_PCODE_CDCLK_CONTROL		0x7
>   #define     SKL_CDCLK_PREPARE_FOR_CHANGE	0x3
>   #define     SKL_CDCLK_READY_FOR_CHANGE		0x1
> @@ -8335,6 +8336,88 @@ enum skl_power_gate {
>   #define  SKL_PW_TO_PG(pw)			((pw) - SKL_DISP_PW_1 + SKL_PG1)
>   #define  SKL_FUSE_PG_DIST_STATUS(pg)		(1 << (27 - (pg)))
>   
> +
> +/* HDCP Key Registers */
> +#define SKL_HDCP_KEY_CONF		_MMIO(0x66c00)
> +#define	 SKL_HDCP_AKSV_SEND_TRIGGER	BIT(31)
> +#define  SKL_HDCP_CLEAR_KEYS_TRIGGER	BIT(30)
> +#define SKL_HDCP_KEY_STATUS		_MMIO(0x66c04)
> +#define  SKL_HDCP_FUSE_IN_PROGRESS	BIT(7)
> +#define  SKL_HDCP_FUSE_ERROR		BIT(6)
> +#define  SKL_HDCP_FUSE_DONE		BIT(5)
> +#define  SKL_HDCP_KEY_LOAD_STATUS	BIT(1)
> +#define  SKL_HDCP_KEY_LOAD_DONE		BIT(0)
> +#define SKL_HDCP_AKSV_LO		_MMIO(0x66c10)
> +#define SKL_HDCP_AKSV_HI		_MMIO(0x66c14)
> +
> +/* HDCP Repeater Registers */
> +#define SKL_HDCP_REP_CTL		_MMIO(0x66d00)
> +#define  SKL_HDCP_DDIB_REP_PRESENT	BIT(30)
> +#define  SKL_HDCP_DDIA_REP_PRESENT	BIT(29)
> +#define  SKL_HDCP_DDIC_REP_PRESENT	BIT(28)
> +#define  SKL_HDCP_DDID_REP_PRESENT	BIT(27)
> +#define  SKL_HDCP_DDIF_REP_PRESENT	BIT(26)
> +#define  SKL_HDCP_DDIE_REP_PRESENT	BIT(25)
> +#define  SKL_HDCP_DDIB_SHA1_M0		(1 << 20)
> +#define  SKL_HDCP_DDIA_SHA1_M0		(2 << 20)
> +#define  SKL_HDCP_DDIC_SHA1_M0		(3 << 20)
> +#define  SKL_HDCP_DDID_SHA1_M0		(4 << 20)
> +#define  SKL_HDCP_DDIF_SHA1_M0		(5 << 20)
> +#define  SKL_HDCP_DDIE_SHA1_M0		(6 << 20) /* Bspec says 5? */
> +#define  SKL_HDCP_SHA1_BUSY		BIT(16)
> +#define  SKL_HDCP_SHA1_READY		BIT(17)
> +#define  SKL_HDCP_SHA1_COMPLETE		BIT(18)
> +#define  SKL_HDCP_SHA1_V_MATCH		BIT(19)
> +#define  SKL_HDCP_SHA1_TEXT_32		(1 << 1)
> +#define  SKL_HDCP_SHA1_COMPLETE_HASH	(2 << 1)
> +#define  SKL_HDCP_SHA1_TEXT_24		(4 << 1)
> +#define  SKL_HDCP_SHA1_TEXT_16		(5 << 1)
> +#define  SKL_HDCP_SHA1_TEXT_8		(6 << 1)
> +#define  SKL_HDCP_SHA1_TEXT_0		(7 << 1)
> +#define SKL_HDCP_SHA_V_PRIME_H0		_MMIO(0x66d04)
> +#define SKL_HDCP_SHA_V_PRIME_H1		_MMIO(0x66d08)
> +#define SKL_HDCP_SHA_V_PRIME_H2		_MMIO(0x66d0C)
> +#define SKL_HDCP_SHA_V_PRIME_H3		_MMIO(0x66d10)
> +#define SKL_HDCP_SHA_V_PRIME_H4		_MMIO(0x66d14)
> +#define SKL_HDCP_SHA_V_PRIME(h)		_MMIO((0x66d04 + h * 4))
> +#define SKL_HDCP_SHA_TEXT		_MMIO(0x66d18)
> +
> +/* HDCP Auth Registers */
> +#define _SKL_PORTA_HDCP_AUTHENC		0x66800
> +#define _SKL_PORTB_HDCP_AUTHENC		0x66500
> +#define _SKL_PORTC_HDCP_AUTHENC		0x66600
> +#define _SKL_PORTD_HDCP_AUTHENC		0x66700
> +#define _SKL_PORTE_HDCP_AUTHENC		0x66A00
> +#define _SKL_PORTF_HDCP_AUTHENC		0x66900
> +#define _SKL_PORT_HDCP_AUTHENC(port, x)	_MMIO(_PICK(port, \
> +					  _SKL_PORTA_HDCP_AUTHENC, \
> +					  _SKL_PORTB_HDCP_AUTHENC, \
> +					  _SKL_PORTC_HDCP_AUTHENC, \
> +					  _SKL_PORTD_HDCP_AUTHENC, \
> +					  _SKL_PORTE_HDCP_AUTHENC, \
> +					  _SKL_PORTF_HDCP_AUTHENC) + x)
> +#define SKL_PORT_HDCP_CONF(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x0)
> +#define  SKL_HDCP_CONF_CAPTURE_AN	BIT(0)
> +#define  SKL_HDCP_CONF_AUTH_AND_ENC	(BIT(1) | BIT(0))
> +#define SKL_PORT_HDCP_ANINIT(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x4)
> +#define SKL_PORT_HDCP_ANLO(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x8)
> +#define SKL_PORT_HDCP_ANHI(port)	_SKL_PORT_HDCP_AUTHENC(port, 0xC)
> +#define SKL_PORT_HDCP_BKSVLO(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x10)
> +#define SKL_PORT_HDCP_BKSVHI(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x14)
> +#define SKL_PORT_HDCP_RPRIME(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x18)
> +#define SKL_PORT_HDCP_STATUS(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x1C)
> +#define  SKL_HDCP_STATUS_STREAM_A_ENC	BIT(31)
> +#define  SKL_HDCP_STATUS_STREAM_B_ENC	BIT(30)
> +#define  SKL_HDCP_STATUS_STREAM_C_ENC	BIT(29)
> +#define  SKL_HDCP_STATUS_STREAM_D_ENC	BIT(28)
> +#define  SKL_HDCP_STATUS_AUTH		BIT(21)
> +#define  SKL_HDCP_STATUS_ENC		BIT(20)
> +#define  SKL_HDCP_STATUS_RI_MATCH	BIT(19)
> +#define  SKL_HDCP_STATUS_R0_READY	BIT(18)
> +#define  SKL_HDCP_STATUS_AN_READY	BIT(17)
> +#define  SKL_HDCP_STATUS_CIPHER		BIT(16)
> +#define  SKL_HDCP_STATUS_FRAME_CNT(x)	((x >> 8) & 0xff)
> +
>   /* Per-pipe DDI Function Control */
>   #define _TRANS_DDI_FUNC_CTL_A		0x60400
>   #define _TRANS_DDI_FUNC_CTL_B		0x61400
> diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
> index 36d4e635e4ce..d452c327dc1d 100644
> --- a/drivers/gpu/drm/i915/intel_atomic.c
> +++ b/drivers/gpu/drm/i915/intel_atomic.c
> @@ -110,6 +110,8 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
>   		to_intel_digital_connector_state(old_state);
>   	struct drm_crtc_state *crtc_state;
>   
> +	intel_hdcp_atomic_check(conn, old_state, new_state);
> +
>   	if (!new_state->crtc)
>   		return 0;
>   
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 601c23be8264..f45c468abf98 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -12319,6 +12319,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
>   	struct drm_i915_private *dev_priv = to_i915(dev);
>   	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
>   	struct drm_crtc *crtc;
> +	struct drm_connector_state *old_conn_state, *new_conn_state;
> +	struct drm_connector *connector;
>   	struct intel_crtc_state *intel_cstate;
>   	u64 put_domains[I915_MAX_PIPES] = {};
>   	int i;
> @@ -12408,9 +12410,17 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
>   		}
>   	}
>   
> +	for_each_oldnew_connector_in_state(state, connector, old_conn_state,
> +					   new_conn_state, i)
> +		intel_hdcp_atomic_pre_commit(connector, old_conn_state,
> +					     new_conn_state);
> +
>   	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
>   	dev_priv->display.update_crtcs(state);
>   
> +	for_each_new_connector_in_state(state, connector, new_conn_state, i)
> +		intel_hdcp_atomic_commit(connector, new_conn_state);
> +
>   	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
>   	 * already, but still need the state for the delayed optimization. To
>   	 * fix this:
> @@ -15322,6 +15332,10 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
>   	for_each_intel_connector_iter(connector, &conn_iter) {
>   		if (connector->modeset_retry_work.func)
>   			cancel_work_sync(&connector->modeset_retry_work);
> +		if (connector->hdcp_shim) {
> +			cancel_delayed_work_sync(&connector->hdcp_check_work);
> +			cancel_work_sync(&connector->hdcp_prop_work);
> +		}
>   	}
>   	drm_connector_list_iter_end(&conn_iter);
>   }
> diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
> index 852b3d161754..6f47a4227f5f 100644
> --- a/drivers/gpu/drm/i915/intel_drv.h
> +++ b/drivers/gpu/drm/i915/intel_drv.h
> @@ -301,6 +301,76 @@ struct intel_panel {
>   	} backlight;
>   };
>   
> +/*
> + * This structure serves as a translation layer between the generic HDCP code
> + * and the bus-specific code. What that means is that HDCP over HDMI differs
> + * from HDCP over DP, so to account for these differences, we need to
> + * communicate with the receiver through this shim.
> + *
> + * For completeness, the 2 buses differ in the following ways:
> + *	- DP AUX vs. DDC
> + *		HDCP registers on the receiver are set via DP AUX for DP, and
> + *		they are set via DDC for HDMI.
> + *	- Receiver register offsets
> + *		The offsets of the registers are different for DP vs. HDMI
> + *	- Receiver register masks/offsets
> + *		For instance, the ready bit for the KSV fifo is in a different
> + *		place on DP vs HDMI
> + *	- Receiver register names
> + *		Seriously. In the DP spec, the 16-bit register containing
> + *		downstream information is called BINFO, on HDMI it's called
> + *		BSTATUS. To confuse matters further, DP has a BSTATUS register
> + *		with a completely different definition.
> + *	- KSV FIFO
> + *		On HDMI, the ksv fifo is read all at once, whereas on DP it must
> + *		be read 3 keys at a time
> + *	- Aksv output
> + *		Since Aksv is hidden in hardware, there's different procedures
> + *		to send it over DP AUX vs DDC
> + */
> +struct intel_hdcp_shim {
> +	/* Outputs the transmitter's An and Aksv values to the receiver. */
> +	int (*write_an_aksv)(struct intel_digital_port *intel_dig_port, u8 *an);
> +
> +	/* Reads the receiver's key selection vector */
> +	int (*read_bksv)(struct intel_digital_port *intel_dig_port, u8 *bksv);
> +
> +	/*
> +	 * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The
> +	 * definitions are the same in the respective specs, but the names are
> +	 * different. Call it BSTATUS since that's the name the HDMI spec
> +	 * uses and it was there first.
> +	 */
> +	int (*read_bstatus)(struct intel_digital_port *intel_dig_port,
> +			    u8 *bstatus);
> +
> +	/* Determines whether a repeater is present downstream */
> +	int (*repeater_present)(struct intel_digital_port *intel_dig_port,
> +				bool *repeater_present);
> +
> +	/* Reads the receiver's Ri' value */
> +	int (*read_ri_prime)(struct intel_digital_port *intel_dig_port, u8 *ri);
> +
> +	/* Determines if the receiver's KSV FIFO is ready for consumption */
> +	int (*read_ksv_ready)(struct intel_digital_port *intel_dig_port,
> +			      bool *ksv_ready);
> +
> +	/* Reads the ksv fifo for num_downstream devices */
> +	int (*read_ksv_fifo)(struct intel_digital_port *intel_dig_port,
> +			     int num_downstream, u8 *ksv_fifo);
> +
> +	/* Reads a 32-bit part of V' from the receiver */
> +	int (*read_v_prime_part)(struct intel_digital_port *intel_dig_port,
> +				 int i, u32 *part);
> +
> +	/* Enables HDCP signalling on the port */
> +	int (*toggle_signalling)(struct intel_digital_port *intel_dig_port,
> +				 bool enable);
> +
> +	/* Ensures the link is still protected */
> +	bool (*check_link)(struct intel_digital_port *intel_dig_port);
> +};
> +
>   struct intel_connector {
>   	struct drm_connector base;
>   	/*
> @@ -332,6 +402,12 @@ struct intel_connector {
>   
>   	/* Work struct to schedule a uevent on link train failure */
>   	struct work_struct modeset_retry_work;
> +
> +	const struct intel_hdcp_shim *hdcp_shim;
> +	struct mutex hdcp_mutex;
> +	uint64_t hdcp_value; /* protected by hdcp_mutex */
> +	struct delayed_work hdcp_check_work;
> +	struct work_struct hdcp_prop_work;
>   };
>   
>   struct intel_digital_connector_state {
> @@ -1763,6 +1839,18 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con
>   }
>   #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
>   
> +/* intel_hdcp.c */
> +void intel_hdcp_atomic_check(struct drm_connector *connector,
> +			     struct drm_connector_state *old_state,
> +			     struct drm_connector_state *new_state);
> +void intel_hdcp_atomic_pre_commit(struct drm_connector *connector,
> +			          struct drm_connector_state *old_state,
> +			          struct drm_connector_state *new_state);
> +void intel_hdcp_atomic_commit(struct drm_connector *connector,
> +			      struct drm_connector_state *new_state);
> +int intel_hdcp_check_link(struct intel_connector *connector);
> +void intel_hdcp_check_work(struct work_struct *work);
> +void intel_hdcp_prop_work(struct work_struct *work);
>   
>   /* intel_psr.c */
>   void intel_psr_enable(struct intel_dp *intel_dp,
> diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
> new file mode 100644
> index 000000000000..4eef9505410f
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/intel_hdcp.c
> @@ -0,0 +1,731 @@
> +/*
> + * Copyright (C) 2017 Google, Inc.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + * Authors:
> + * Sean Paul <seanpaul@chromium.org>
> + */
> +
> +#include <drm/drmP.h>
> +#include <drm/drm_hdcp.h>
> +#include <linux/i2c.h>
> +#include <linux/random.h>
> +
> +#include "intel_drv.h"
> +#include "i915_reg.h"
> +
> +#define KEY_LOAD_TRIES	5
> +
> +static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
> +				    const struct intel_hdcp_shim *shim)
> +{
> +	int ret, read_ret;
> +	bool ksv_ready;
> +
> +	ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
> +							 &ksv_ready),
> +			 read_ret || ksv_ready, 1500 * 1000, 1000, 100 * 1000);
> +	if (ret)
> +		return ret;
> +	if (read_ret)
> +		return read_ret;
> +	if (!ksv_ready)
> +		return -ETIMEDOUT;
> +
> +	return 0;
> +}
> +
> +static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
> +{
> +	I915_WRITE(SKL_HDCP_KEY_CONF, SKL_HDCP_CLEAR_KEYS_TRIGGER);
> +	I915_WRITE(SKL_HDCP_KEY_STATUS,
> +		   SKL_HDCP_KEY_LOAD_DONE | SKL_HDCP_KEY_LOAD_STATUS |
> +		   SKL_HDCP_FUSE_IN_PROGRESS | SKL_HDCP_FUSE_ERROR |
> +		   SKL_HDCP_FUSE_DONE);
> +}
> +
> +static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
> +{
> +	int ret;
> +	u32 val;
> +
> +	/* Initiate loading the HDCP key from fuses */
> +	mutex_lock(&dev_priv->pcu_lock);
> +	ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1);
> +	mutex_unlock(&dev_priv->pcu_lock);
> +	if (ret) {
> +		DRM_ERROR("Failed to initiate HDCP key load (%d)\n", ret);
> +		return ret;
> +	}
> +
> +	/* Wait for the keys to load (500us) */
> +	ret = __intel_wait_for_register(dev_priv, SKL_HDCP_KEY_STATUS,
> +					SKL_HDCP_KEY_LOAD_DONE,
> +					SKL_HDCP_KEY_LOAD_DONE,
> +					10, 1, &val);
> +	if (ret)
> +		return ret;
> +	else if (!(val & SKL_HDCP_KEY_LOAD_STATUS))
> +		return -ENXIO;
> +
> +	/* Send Aksv over to PCH display for use in authentication */
> +	I915_WRITE(SKL_HDCP_KEY_CONF, SKL_HDCP_AKSV_SEND_TRIGGER);
> +
> +	return 0;
> +}
> +
> +/* Returns updated SHA-1 index */
> +static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
> +{
> +	I915_WRITE(SKL_HDCP_SHA_TEXT, sha_text);
> +	if (intel_wait_for_register(dev_priv, SKL_HDCP_REP_CTL,
> +				    SKL_HDCP_SHA1_READY,
> +				    SKL_HDCP_SHA1_READY, 1)) {
> +		DRM_ERROR("Timed out waiting for SHA1 ready\n");
> +		return -ETIMEDOUT;
> +	}
> +	return 0;
> +}
> +
> +static
> +u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
> +{
> +	enum port port = intel_dig_port->base.port;
> +	switch(port) {
> +	case PORT_A:
> +		return SKL_HDCP_DDIA_REP_PRESENT | SKL_HDCP_DDIA_SHA1_M0;
> +	case PORT_B:
> +		return SKL_HDCP_DDIB_REP_PRESENT | SKL_HDCP_DDIB_SHA1_M0;
> +	case PORT_C:
> +		return SKL_HDCP_DDIC_REP_PRESENT | SKL_HDCP_DDIC_SHA1_M0;
> +	case PORT_D:
> +		return SKL_HDCP_DDID_REP_PRESENT | SKL_HDCP_DDID_SHA1_M0;
> +	case PORT_E:
> +		return SKL_HDCP_DDIE_REP_PRESENT | SKL_HDCP_DDIE_SHA1_M0;
> +	default:
> +		break;
> +	}
> +	DRM_ERROR("Unknown port %d\n", port);
> +	return -EINVAL;
> +}
> +
> +/* Implements Part 2 of the HDCP authorization procedure */
> +static
> +int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
> +			       const struct intel_hdcp_shim *shim)
> +{
> +	struct drm_i915_private *dev_priv;
> +	u32 vprime, sha_text, sha_leftovers, rep_ctl;
> +	u8 bstatus[2], num_downstream, *ksv_fifo;
> +	int ret, i, j, sha_idx;
> +
> +	dev_priv = intel_dig_port->base.base.dev->dev_private;
> +
> +	ret = shim->read_bstatus(intel_dig_port, bstatus);
> +	if (ret)
> +		return ret;
> +
> +	/* If there are no downstream devices, we're all done. */
> +	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
> +	if (num_downstream == 0) {
> +		DRM_INFO("HDCP is enabled (no downstream devices)\n");
> +		return 0;
> +	}
> +
> +	/* Poll for ksv list ready (spec says max time allowed is 5s) */
> +	ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
> +	if (ret) {
> +		DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
> +		return ret;
> +	}
> +
> +	ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL);
> +	if (!ksv_fifo)
> +		return -ENOMEM;
> +
> +	ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
> +	if (ret)
> +		return ret;
> +
> +	/* Process V' values from the receiver */
> +	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
> +		ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
> +		if (ret)
> +			return ret;
> +		I915_WRITE(SKL_HDCP_SHA_V_PRIME(i), vprime);
> +	}
> +
> +	/*
> +	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
> +	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
> +	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
> +	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
> +	 * index will keep track of our progress through the 64 bytes as well as
> +	 * helping us work the 40-bit KSVs through our 32-bit register.
> +	 *
> +	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
> +	 */
> +	sha_idx = 0;
> +	sha_text = 0;
> +	sha_leftovers = 0;
> +	rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
> +	I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_32);
> +	for (i = 0; i < num_downstream; i++) {
> +		unsigned sha_empty;
> +		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
> +
> +		/* Fill up the empty slots in sha_text and write it out */
> +		sha_empty = sizeof(sha_text) - sha_leftovers;
> +		for (j = 0; j < sha_empty; j++)
> +			sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
> +
> +		ret = intel_write_sha_text(dev_priv, sha_text);
> +		if (ret < 0)
> +			return ret;
> +
> +		/* Programming guide writes this every 64 bytes */
> +		sha_idx += sizeof(sha_text);
> +		if (!(sha_idx % 64))
> +			I915_WRITE(SKL_HDCP_REP_CTL,
> +				   rep_ctl | SKL_HDCP_SHA1_TEXT_32);
> +
> +		/* Store the leftover bytes from the ksv in sha_text */
> +		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
> +		sha_text = 0;
> +		for (j = 0; j < sha_leftovers; j++)
> +			sha_text |= ksv[sha_empty + j] <<
> +					((sizeof(sha_text) - j - 1) * 8);
> +
> +		/*
> +		 * If we still have room in sha_text for more data, continue.
> +		 * Otherwise, write it out immediately.
> +		 */
> +		if (sizeof(sha_text) > sha_leftovers)
> +			continue;
> +
> +		ret = intel_write_sha_text(dev_priv, sha_text);
> +		if (ret < 0)
> +			return ret;
> +		sha_leftovers = 0;
> +		sha_text = 0;
> +		sha_idx += sizeof(sha_text);
> +	}
> +
> +	/*
> +	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
> +	 * bytes are leftover from the last ksv, we might be able to fit them
> +	 * all in sha_text (first 2 cases), or we might need to split them up
> +	 * into 2 writes (last 2 cases).
> +	 */
> +	if (sha_leftovers == 0) {
> +		/* Write 16 bits of text, 16 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_16);
> +		ret = intel_write_sha_text(dev_priv,
> +					   bstatus[0] << 8 | bstatus[1]);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 32 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_0);
> +		ret = intel_write_sha_text(dev_priv, 0);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 16 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_16);
> +		ret = intel_write_sha_text(dev_priv, 0);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +	} else if (sha_leftovers == 1) {
> +		/* Write 24 bits of text, 8 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_24);
> +		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
> +		/* Only 24-bits of data, must be in the LSB */
> +		sha_text = (sha_text & 0xffffff00) >> 8;
> +		ret = intel_write_sha_text(dev_priv, sha_text);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 32 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_0);
> +		ret = intel_write_sha_text(dev_priv, 0);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 24 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_8);
> +		ret = intel_write_sha_text(dev_priv, 0);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +	} else if (sha_leftovers == 2) {
> +		/* Write 32 bits of text */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_32);
> +		sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
> +		ret = intel_write_sha_text(dev_priv, sha_text);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 64 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_0);
> +		for (i = 0; i < 2; i++) {
> +			ret = intel_write_sha_text(dev_priv, 0);
> +			if (ret < 0)
> +				return ret;
> +			sha_idx += sizeof(sha_text);
> +		}
> +	} else if (sha_leftovers == 3) {
> +		/* Write 32 bits of text */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_32);
> +		sha_text |= bstatus[0] << 24;
> +		ret = intel_write_sha_text(dev_priv, sha_text);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 8 bits of text, 24 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_8);
> +		ret = intel_write_sha_text(dev_priv, bstatus[1]);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 32 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_0);
> +		ret = intel_write_sha_text(dev_priv, 0);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 8 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_24);
> +		ret = intel_write_sha_text(dev_priv, 0);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +	} else {
> +		DRM_ERROR("Invalid number of leftovers %d\n", sha_leftovers);
> +		return -EINVAL;
> +	}
> +
> +	I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_32);
> +	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
> +	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
> +		ret = intel_write_sha_text(dev_priv, 0);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +	}
> +
> +	/*
> +	 * Last write gets the length of the concatenation in bits. That is:
> +	 *  - 5 bytes per device
> +	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
> +	 */
> +	sha_text = (num_downstream * 5 + 10) * 8;
> +	ret = intel_write_sha_text(dev_priv, sha_text);
> +	if (ret < 0)
> +		return ret;
> +
> +	/* Tell the HW we're done with the hash and wait for it to ACK */
> +	I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_COMPLETE_HASH);
> +	if (intel_wait_for_register(dev_priv, SKL_HDCP_REP_CTL,
> +				    SKL_HDCP_SHA1_COMPLETE,
> +				    SKL_HDCP_SHA1_COMPLETE, 1)) {
> +		DRM_ERROR("Timed out waiting for SHA1 complete\n");
> +		return -ETIMEDOUT;
> +	}
> +	if (!(I915_READ(SKL_HDCP_REP_CTL) & SKL_HDCP_SHA1_V_MATCH)) {
> +		DRM_ERROR("SHA-1 mismatch, HDCP failed\n");
> +		return -ENXIO;
> +	}
> +
> +	DRM_INFO("HDCP is enabled (%d downstream devices)\n", num_downstream);
> +	return 0;
> +}
> +
> +/* Implements Part 1 of the HDCP authorization procedure */
> +static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
> +			   const struct intel_hdcp_shim *shim)
> +{
> +	struct drm_i915_private *dev_priv;
> +	enum port port;
> +	unsigned long r0_prime_gen_start;
> +	int ret, i;
> +	union {
> +		u32 reg[2];
> +		u8 shim[DRM_HDCP_AN_LEN];
> +	} an;
> +	union {
> +		u32 reg[2];
> +		u8 shim[DRM_HDCP_KSV_LEN];
> +	} bksv;
> +	union {
> +		u32 reg;
> +		u8 shim[DRM_HDCP_RI_LEN];
> +	} ri;
> +	bool repeater_present;
> +
> +	dev_priv = intel_dig_port->base.base.dev->dev_private;
> +
> +	port = intel_dig_port->base.port;
> +
> +	/* Initialize An with 2 random values and acquire it */
> +	for (i = 0; i < 2; i++)
> +		I915_WRITE(SKL_PORT_HDCP_ANINIT(port), get_random_long());
> +	I915_WRITE(SKL_PORT_HDCP_CONF(port), SKL_HDCP_CONF_CAPTURE_AN);
> +
> +	/* Wait for An to be acquired */
> +	if (intel_wait_for_register(dev_priv, SKL_PORT_HDCP_STATUS(port),
> +				    SKL_HDCP_STATUS_AN_READY,
> +				    SKL_HDCP_STATUS_AN_READY, 1)) {
> +		DRM_ERROR("Timed out waiting for An\n");
> +		return -ETIMEDOUT;
> +	}
> +
> +	an.reg[0] = I915_READ(SKL_PORT_HDCP_ANLO(port));
> +	an.reg[1] = I915_READ(SKL_PORT_HDCP_ANHI(port));
> +	ret = shim->write_an_aksv(intel_dig_port, an.shim);
> +	if (ret)
> +		return ret;
> +
> +	r0_prime_gen_start = jiffies;
> +
> +	memset(&bksv, 0, sizeof(bksv));
> +	ret = shim->read_bksv(intel_dig_port, bksv.shim);
> +	if (ret)
> +		return ret;
> +
> +	I915_WRITE(SKL_PORT_HDCP_BKSVLO(port), bksv.reg[0]);
> +	I915_WRITE(SKL_PORT_HDCP_BKSVHI(port), bksv.reg[1]);
> +
> +	ret = shim->repeater_present(intel_dig_port, &repeater_present);
> +	if (ret)
> +		return ret;
> +	if (repeater_present)
> +		I915_WRITE(SKL_HDCP_REP_CTL,
> +			   intel_hdcp_get_repeater_ctl(intel_dig_port));
> +
> +	ret = shim->toggle_signalling(intel_dig_port, true);
> +	if (ret)
> +		return ret;
> +
> +	I915_WRITE(SKL_PORT_HDCP_CONF(port), SKL_HDCP_CONF_AUTH_AND_ENC);
> +
> +	/* Wait for R0 ready */
> +	if (wait_for(I915_READ(SKL_PORT_HDCP_STATUS(port)) &
> +		     (SKL_HDCP_STATUS_R0_READY | SKL_HDCP_STATUS_ENC), 1)) {
> +		DRM_ERROR("Timed out waiting for R0 ready\n");
> +		return -ETIMEDOUT;
> +	}
> +
> +	/*
> +	 * Wait for R0' to become available, the spec says 100ms from Aksv
> +	 * write. On DP, there's an R0_READY bit available but no such bit
> +	 * exists on HDMI. Since the upper-bound is the same, we'll just do
> +	 * the stupid thing instead of polling on one and not the other.
> +	 */
> +	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 100);
> +
> +	ri.reg = 0;
> +	ret = shim->read_ri_prime(intel_dig_port, ri.shim);
> +	if (ret)
> +		return ret;
> +	I915_WRITE(SKL_PORT_HDCP_RPRIME(port), ri.reg);
> +
> +	/* Wait for Ri prime match */
> +	if (wait_for(I915_READ(SKL_PORT_HDCP_STATUS(port)) &
> +		     (SKL_HDCP_STATUS_RI_MATCH | SKL_HDCP_STATUS_ENC), 1)) {
> +		DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
> +			  I915_READ(SKL_PORT_HDCP_STATUS(port)));
> +		return -ETIMEDOUT;
> +	}
> +
> +	/* Wait for encryption confirmation */
> +	if (intel_wait_for_register(dev_priv, SKL_PORT_HDCP_STATUS(port),
> +				    SKL_HDCP_STATUS_ENC,
> +				    SKL_HDCP_STATUS_ENC, 20)) {
> +		DRM_ERROR("Timed out waiting for encryption\n");
> +		return -ETIMEDOUT;
> +	}
> +
> +	/*
> +	 * XXX: If we have MST-connected devices, we need to enable encryption
> +	 * on those as well.
> +	 */
> +
> +	return intel_hdcp_auth_downstream(intel_dig_port, shim);
> +}
> +
> +static
> +struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
> +{
> +	return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
> +}
> +
> +static int _intel_hdcp_disable(struct intel_connector *connector)
> +{
> +	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
> +	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
> +	enum port port = intel_dig_port->base.port;
> +	int ret;
> +
> +	I915_WRITE(SKL_PORT_HDCP_CONF(port), 0);
> +	if (intel_wait_for_register(dev_priv, SKL_PORT_HDCP_STATUS(port), ~0, 0,
> +				    20)) {
> +		DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
> +		return -ETIMEDOUT;
> +	}
> +
> +	intel_hdcp_clear_keys(dev_priv);
> +
> +	ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
> +	if (ret) {
> +		DRM_ERROR("Failed to disable HDCP signalling\n");
> +		return ret;
> +	}
> +
> +	DRM_INFO("HDCP is disabled\n");
> +	return 0;
> +}
> +
> +static int _intel_hdcp_enable(struct intel_connector *connector)
> +{
> +	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
> +	int i, ret;
> +
> +	if (!(I915_READ(SKL_FUSE_STATUS) & SKL_FUSE_PG_DIST_STATUS(1))) {
> +		DRM_ERROR("PG1 is disabled, cannot load keys\n");
> +		return -ENXIO;
> +	}
> +
> +	for (i = 0; i < KEY_LOAD_TRIES; i++) {
> +		ret = intel_hdcp_load_keys(dev_priv);
> +		if (!ret)
> +			break;
> +		intel_hdcp_clear_keys(dev_priv);
> +	}
> +	if (ret) {
> +		DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
> +		return ret;
> +	}
> +
> +	ret = intel_hdcp_auth(conn_to_dig_port(connector),
> +			      connector->hdcp_shim);
> +	if (ret) {
> +		DRM_ERROR("Failed to authenticate HDCP (%d)\n", ret);
> +		return ret;
> +	}
> +
> +	return 0;
> +}
> +
> +void intel_hdcp_check_work(struct work_struct *work)
> +{
> +	struct intel_connector *connector = container_of(to_delayed_work(work),
> +							 struct intel_connector,
> +						         hdcp_check_work);
> +	if (!intel_hdcp_check_link(connector))
> +		schedule_delayed_work(&connector->hdcp_check_work,
> +				      DRM_HDCP_CHECK_PERIOD_MS);
> +}
> +
> +void intel_hdcp_prop_work(struct work_struct *work)
> +{
> +	struct intel_connector *connector = container_of(work,
> +							 struct intel_connector,
> +						         hdcp_prop_work);
> +	struct drm_device *dev = connector->base.dev;
> +	struct drm_connector_state *state;
> +
> +	mutex_lock(&dev->mode_config.mutex);
> +	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
> +	mutex_lock(&connector->hdcp_mutex);
> +
> +	/*
> +	 * This worker is only used to flip between ENABLED/DESIRED. Either of
> +	 * those to OFF is handled by core. If hdcp_value == OFF, we're running
> +	 * just after hdcp has been disabled, so just exit
> +	 */
> +	if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_OFF) {
> +		state = connector->base.state;
> +		state->content_protection = connector->hdcp_value;
> +	}
> +
> +	mutex_unlock(&connector->hdcp_mutex);
> +	drm_modeset_unlock(&dev->mode_config.connection_mutex);
> +	mutex_unlock(&dev->mode_config.mutex);
> +}
> +
> +static int intel_hdcp_enable(struct intel_connector *connector)
> +{
> +	int ret;
> +
> +	if (!connector->hdcp_shim)
> +		return -ENOENT;
> +
> +	mutex_lock(&connector->hdcp_mutex);
> +
> +	ret = _intel_hdcp_enable(connector);
> +	if (ret)
> +		goto out;
> +
> +	connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
> +	schedule_work(&connector->hdcp_prop_work);
> +	schedule_delayed_work(&connector->hdcp_check_work,
> +			      DRM_HDCP_CHECK_PERIOD_MS);
> +out:
> +	mutex_unlock(&connector->hdcp_mutex);
> +	return ret;
> +}
> +
> +static int intel_hdcp_disable(struct intel_connector *connector)
> +{
> +	int ret;
> +
> +	if (!connector->hdcp_shim)
> +		return -ENOENT;
> +
> +	mutex_lock(&connector->hdcp_mutex);
> +
> +	connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_OFF;
> +	ret = _intel_hdcp_disable(connector);
> +
> +	mutex_unlock(&connector->hdcp_mutex);
> +	cancel_delayed_work_sync(&connector->hdcp_check_work);
> +	return ret;
> +}
> +
> +void intel_hdcp_atomic_check(struct drm_connector *connector,
> +			     struct drm_connector_state *old_state,
> +			     struct drm_connector_state *new_state)
> +{
> +	uint64_t old_cp = old_state->content_protection;
> +	uint64_t new_cp = new_state->content_protection;
> +
> +	if (!new_state->crtc) {
> +		/*
> +		 * If the connector is being disabled with CP enabled, mark it
> +		 * desired so it's re-enabled when the connector is brought back
> +		 */
> +		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
> +			new_state->content_protection =
> +				DRM_MODE_CONTENT_PROTECTION_DESIRED;
> +		return;
> +	}
> +
> +	/* Only drivers can set content protection enabled */
> +	if (old_cp != DRM_MODE_CONTENT_PROTECTION_ENABLED &&
> +	    new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
> +		new_state->content_protection =
> +			DRM_MODE_CONTENT_PROTECTION_DESIRED;
As drm_atomic_connector_set_property is rejecting the userspace request 
for DRM_MODE_CONTENT_PROTECTION_ENABLED,
We dont need this check here.

--Ram
> +}
> +
> +void intel_hdcp_atomic_pre_commit(struct drm_connector *connector,
> +			          struct drm_connector_state *old_state,
> +			          struct drm_connector_state *new_state)
> +{
> +	uint64_t old_cp = old_state->content_protection;
> +	uint64_t new_cp = new_state->content_protection;
> +
> +	/*
> +	 * Disable HDCP if the connector is becoming disabled, or if requested
> +	 * via the property.
> +	 */
> +	if ((!new_state->crtc && old_cp != DRM_MODE_CONTENT_PROTECTION_OFF) ||
> +	    (new_state->crtc && old_cp != DRM_MODE_CONTENT_PROTECTION_OFF &&
> +	     new_cp == DRM_MODE_CONTENT_PROTECTION_OFF))
> +		intel_hdcp_disable(to_intel_connector(connector));
> +}
> +
> +void intel_hdcp_atomic_commit(struct drm_connector *connector,
> +			      struct drm_connector_state *new_state)
> +{
> +	uint64_t new_cp = new_state->content_protection;
> +
> +	/* Enable hdcp if it's desired */
> +	if (new_state->crtc && new_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED)
> +		intel_hdcp_enable(to_intel_connector(connector));
> +}
> +
> +/* Implements Part 3 of the HDCP authorization procedure */
> +int intel_hdcp_check_link(struct intel_connector *connector)
> +{
> +	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
> +	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
> +	enum port port = intel_dig_port->base.port;
> +	int ret = 0;
> +
> +	if (!connector->hdcp_shim)
> +		return -ENOENT;
> +
> +	mutex_lock(&connector->hdcp_mutex);
> +
> +	if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_OFF)
> +		goto out;
> +
> +	if (!(I915_READ(SKL_PORT_HDCP_STATUS(port)) & SKL_HDCP_STATUS_ENC)) {
> +		DRM_ERROR("HDCP check failed: link is not encrypted, %x\n",
> +			   I915_READ(SKL_PORT_HDCP_STATUS(port)));
> +		ret = -ENXIO;
> +		connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
> +		schedule_work(&connector->hdcp_prop_work);
> +		goto out;
> +	}
> +
> +	if (connector->hdcp_shim->check_link(intel_dig_port)) {
> +		if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_OFF) {
> +			connector->hdcp_value =
> +				DRM_MODE_CONTENT_PROTECTION_ENABLED;
> +			schedule_work(&connector->hdcp_prop_work);
> +		}
> +		goto out;
> +	}
> +
> +	DRM_INFO("HDCP link failed, retrying authentication\n");
> +
> +	ret = _intel_hdcp_disable(connector);
> +	if (ret) {
> +		DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
> +		connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
> +		schedule_work(&connector->hdcp_prop_work);
> +		goto out;
> +	}
> +
> +	ret = _intel_hdcp_enable(connector);
> +	if (ret) {
> +		DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
> +		connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
> +		schedule_work(&connector->hdcp_prop_work);
> +		goto out;
> +	}
> +
> +out:
> +	mutex_unlock(&connector->hdcp_mutex);
> +	return ret;
> +}
Daniel Vetter Dec. 5, 2017, 5 p.m. UTC | #2
On Tue, Dec 05, 2017 at 12:15:04AM -0500, Sean Paul wrote:
> This patch adds the framework required to add HDCP support to intel
> connectors. It implements Aksv loading from fuse, and parts 1/2/3
> of the HDCP authentication scheme.
> 
> Note that without shim implementations, this does not actually implement
> HDCP. That will come in subsequent patches.
> 
> Changes in v2:
> - Don't open code wait_fors (Chris)
> - drm_hdcp.c under MIT license (Daniel)
> - Move intel_hdcp_disable() call above ddi_disable (Ram)
> - Fix // comments (I wore a cone of shame for 12 hours to atone) (Daniel)
> - Justify intel_hdcp_shim with comments (Daniel)
> - Fixed async locking issues by adding hdcp_mutex (Daniel)
> - Don't alter connector_state in enable/disable (Daniel)
> Changes in v3:
> - Added hdcp_mutex/hdcp_value to make async reasonable
> - Added hdcp_prop_work to separate link checking & property setting
> - Added new helper for atomic_check state tracking (Daniel)
> - Moved enable/disable into atomic_commit with matching helpers
> - Moved intel_hdcp_check_link out of all locks when called from dp
> - Bumped up ksv_fifo timeout (noticed failure on one of my dongles)
> 
> Cc: Chris Wilson <chris@chris-wilson.co.uk>
> Cc: Daniel Vetter <daniel.vetter@intel.com>
> Cc: Ramalingam C <ramalingam.c@intel.com>
> Signed-off-by: Sean Paul <seanpaul@chromium.org>
> ---
>  drivers/gpu/drm/i915/Makefile        |   1 +
>  drivers/gpu/drm/i915/i915_reg.h      |  83 ++++
>  drivers/gpu/drm/i915/intel_atomic.c  |   2 +
>  drivers/gpu/drm/i915/intel_display.c |  14 +
>  drivers/gpu/drm/i915/intel_drv.h     |  88 +++++
>  drivers/gpu/drm/i915/intel_hdcp.c    | 731 +++++++++++++++++++++++++++++++++++
>  6 files changed, 919 insertions(+)
>  create mode 100644 drivers/gpu/drm/i915/intel_hdcp.c
> 
> diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
> index 42bc8bd4ff06..3facea4eefdb 100644
> --- a/drivers/gpu/drm/i915/Makefile
> +++ b/drivers/gpu/drm/i915/Makefile
> @@ -107,6 +107,7 @@ i915-y += intel_audio.o \
>  	  intel_fbc.o \
>  	  intel_fifo_underrun.o \
>  	  intel_frontbuffer.o \
> +	  intel_hdcp.o \
>  	  intel_hotplug.o \
>  	  intel_modes.o \
>  	  intel_overlay.o \
> diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
> index 09bf043c1c2e..2bd2cc8441d4 100644
> --- a/drivers/gpu/drm/i915/i915_reg.h
> +++ b/drivers/gpu/drm/i915/i915_reg.h
> @@ -8034,6 +8034,7 @@ enum {
>  #define     GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT	8
>  #define     GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT	16
>  #define     GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT	24
> +#define   SKL_PCODE_LOAD_HDCP_KEYS		0x5

SKL_ prefix feels right here, since this is for skl, kbl, ... only, and
doesn't apply to bxt. So not gen9 stuff.

>  #define   SKL_PCODE_CDCLK_CONTROL		0x7
>  #define     SKL_CDCLK_PREPARE_FOR_CHANGE	0x3
>  #define     SKL_CDCLK_READY_FOR_CHANGE		0x1
> @@ -8335,6 +8336,88 @@ enum skl_power_gate {
>  #define  SKL_PW_TO_PG(pw)			((pw) - SKL_DISP_PW_1 + SKL_PG1)
>  #define  SKL_FUSE_PG_DIST_STATUS(pg)		(1 << (27 - (pg)))
>  
> +
> +/* HDCP Key Registers */
> +#define SKL_HDCP_KEY_CONF		_MMIO(0x66c00)
> +#define	 SKL_HDCP_AKSV_SEND_TRIGGER	BIT(31)
> +#define  SKL_HDCP_CLEAR_KEYS_TRIGGER	BIT(30)
> +#define SKL_HDCP_KEY_STATUS		_MMIO(0x66c04)
> +#define  SKL_HDCP_FUSE_IN_PROGRESS	BIT(7)
> +#define  SKL_HDCP_FUSE_ERROR		BIT(6)
> +#define  SKL_HDCP_FUSE_DONE		BIT(5)
> +#define  SKL_HDCP_KEY_LOAD_STATUS	BIT(1)
> +#define  SKL_HDCP_KEY_LOAD_DONE		BIT(0)
> +#define SKL_HDCP_AKSV_LO		_MMIO(0x66c10)
> +#define SKL_HDCP_AKSV_HI		_MMIO(0x66c14)
> +
> +/* HDCP Repeater Registers */
> +#define SKL_HDCP_REP_CTL		_MMIO(0x66d00)
> +#define  SKL_HDCP_DDIB_REP_PRESENT	BIT(30)
> +#define  SKL_HDCP_DDIB_REP_PRESENT	BIT(30)
> +#define  SKL_HDCP_DDIA_REP_PRESENT	BIT(29)
> +#define  SKL_HDCP_DDIC_REP_PRESENT	BIT(28)
> +#define  SKL_HDCP_DDID_REP_PRESENT	BIT(27)
> +#define  SKL_HDCP_DDIF_REP_PRESENT	BIT(26)
> +#define  SKL_HDCP_DDIE_REP_PRESENT	BIT(25)
> +#define  SKL_HDCP_DDIB_SHA1_M0		(1 << 20)
> +#define  SKL_HDCP_DDIA_SHA1_M0		(2 << 20)
> +#define  SKL_HDCP_DDIC_SHA1_M0		(3 << 20)
> +#define  SKL_HDCP_DDID_SHA1_M0		(4 << 20)
> +#define  SKL_HDCP_DDIF_SHA1_M0		(5 << 20)
> +#define  SKL_HDCP_DDIE_SHA1_M0		(6 << 20) /* Bspec says 5? */

Yeah that's one good wtf :-)

> +#define  SKL_HDCP_SHA1_BUSY		BIT(16)
> +#define  SKL_HDCP_SHA1_READY		BIT(17)
> +#define  SKL_HDCP_SHA1_COMPLETE		BIT(18)
> +#define  SKL_HDCP_SHA1_V_MATCH		BIT(19)
> +#define  SKL_HDCP_SHA1_TEXT_32		(1 << 1)
> +#define  SKL_HDCP_SHA1_COMPLETE_HASH	(2 << 1)
> +#define  SKL_HDCP_SHA1_TEXT_24		(4 << 1)
> +#define  SKL_HDCP_SHA1_TEXT_16		(5 << 1)
> +#define  SKL_HDCP_SHA1_TEXT_8		(6 << 1)
> +#define  SKL_HDCP_SHA1_TEXT_0		(7 << 1)
> +#define SKL_HDCP_SHA_V_PRIME_H0		_MMIO(0x66d04)
> +#define SKL_HDCP_SHA_V_PRIME_H1		_MMIO(0x66d08)
> +#define SKL_HDCP_SHA_V_PRIME_H2		_MMIO(0x66d0C)
> +#define SKL_HDCP_SHA_V_PRIME_H3		_MMIO(0x66d10)
> +#define SKL_HDCP_SHA_V_PRIME_H4		_MMIO(0x66d14)
> +#define SKL_HDCP_SHA_V_PRIME(h)		_MMIO((0x66d04 + h * 4))
> +#define SKL_HDCP_SHA_TEXT		_MMIO(0x66d18)
> +
> +/* HDCP Auth Registers */
> +#define _SKL_PORTA_HDCP_AUTHENC		0x66800
> +#define _SKL_PORTB_HDCP_AUTHENC		0x66500
> +#define _SKL_PORTC_HDCP_AUTHENC		0x66600
> +#define _SKL_PORTD_HDCP_AUTHENC		0x66700
> +#define _SKL_PORTE_HDCP_AUTHENC		0x66A00
> +#define _SKL_PORTF_HDCP_AUTHENC		0x66900
> +#define _SKL_PORT_HDCP_AUTHENC(port, x)	_MMIO(_PICK(port, \
> +					  _SKL_PORTA_HDCP_AUTHENC, \
> +					  _SKL_PORTB_HDCP_AUTHENC, \
> +					  _SKL_PORTC_HDCP_AUTHENC, \
> +					  _SKL_PORTD_HDCP_AUTHENC, \
> +					  _SKL_PORTE_HDCP_AUTHENC, \
> +					  _SKL_PORTF_HDCP_AUTHENC) + x)
> +#define SKL_PORT_HDCP_CONF(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x0)
> +#define  SKL_HDCP_CONF_CAPTURE_AN	BIT(0)
> +#define  SKL_HDCP_CONF_AUTH_AND_ENC	(BIT(1) | BIT(0))
> +#define SKL_PORT_HDCP_ANINIT(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x4)
> +#define SKL_PORT_HDCP_ANLO(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x8)
> +#define SKL_PORT_HDCP_ANHI(port)	_SKL_PORT_HDCP_AUTHENC(port, 0xC)
> +#define SKL_PORT_HDCP_BKSVLO(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x10)
> +#define SKL_PORT_HDCP_BKSVHI(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x14)
> +#define SKL_PORT_HDCP_RPRIME(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x18)
> +#define SKL_PORT_HDCP_STATUS(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x1C)
> +#define  SKL_HDCP_STATUS_STREAM_A_ENC	BIT(31)
> +#define  SKL_HDCP_STATUS_STREAM_B_ENC	BIT(30)
> +#define  SKL_HDCP_STATUS_STREAM_C_ENC	BIT(29)
> +#define  SKL_HDCP_STATUS_STREAM_D_ENC	BIT(28)
> +#define  SKL_HDCP_STATUS_AUTH		BIT(21)
> +#define  SKL_HDCP_STATUS_ENC		BIT(20)
> +#define  SKL_HDCP_STATUS_RI_MATCH	BIT(19)
> +#define  SKL_HDCP_STATUS_R0_READY	BIT(18)
> +#define  SKL_HDCP_STATUS_AN_READY	BIT(17)
> +#define  SKL_HDCP_STATUS_CIPHER		BIT(16)
> +#define  SKL_HDCP_STATUS_FRAME_CNT(x)	((x >> 8) & 0xff)

Except for the SKL+ specific GT mailbox bit all the above registers are
generic (except the 3 bits in KEY_STATUS I highlighted). Can you pls sed
the entire patch and drop the SKL_ prefix from all of them?

I know it's a bit busywork, but given how the bits move around between
platforms in funny ways it's good prep for enabling hdcp on more platforms
I think.

If it's too messy feel free to apply the bikeshed in a follow-up patch.

Either way all the above looks correct.

> +
>  /* Per-pipe DDI Function Control */
>  #define _TRANS_DDI_FUNC_CTL_A		0x60400
>  #define _TRANS_DDI_FUNC_CTL_B		0x61400
> diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
> index 36d4e635e4ce..d452c327dc1d 100644
> --- a/drivers/gpu/drm/i915/intel_atomic.c
> +++ b/drivers/gpu/drm/i915/intel_atomic.c
> @@ -110,6 +110,8 @@ int intel_digital_connector_atomic_check(struct drm_connector *conn,
>  		to_intel_digital_connector_state(old_state);
>  	struct drm_crtc_state *crtc_state;
>  
> +	intel_hdcp_atomic_check(conn, old_state, new_state);
> +
>  	if (!new_state->crtc)
>  		return 0;
>  
> diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
> index 601c23be8264..f45c468abf98 100644
> --- a/drivers/gpu/drm/i915/intel_display.c
> +++ b/drivers/gpu/drm/i915/intel_display.c
> @@ -12319,6 +12319,8 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
>  	struct drm_i915_private *dev_priv = to_i915(dev);
>  	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
>  	struct drm_crtc *crtc;
> +	struct drm_connector_state *old_conn_state, *new_conn_state;
> +	struct drm_connector *connector;
>  	struct intel_crtc_state *intel_cstate;
>  	u64 put_domains[I915_MAX_PIPES] = {};
>  	int i;
> @@ -12408,9 +12410,17 @@ static void intel_atomic_commit_tail(struct drm_atomic_state *state)
>  		}
>  	}
>  
> +	for_each_oldnew_connector_in_state(state, connector, old_conn_state,
> +					   new_conn_state, i)
> +		intel_hdcp_atomic_pre_commit(connector, old_conn_state,
> +					     new_conn_state);
> +
>  	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
>  	dev_priv->display.update_crtcs(state);
>  
> +	for_each_new_connector_in_state(state, connector, new_conn_state, i)
> +		intel_hdcp_atomic_commit(connector, new_conn_state);

Hm, I liked the old place where this was called from ddi enable/disable
hooks much better. HDCP very much is a connector thing, leaking that
through the entire layer-cake that is atomic to the highest level feels a
bit wrong.

Why?

Filling in after an irc chat with Sean: It's leftovers from an attempt at
trying to implement HDCP state changes without a full modeset. Imo that
should be undone again to the old version, for a few reasons:

- Atomic is damn hard to understand already, leaking a connector thing
  through all layers makes it worse. And we're probably already over the
  limit of complexity that can be understood, so making abstractions more
  leaky needs some real good reasons.

- Ville is working on a slightly different approach to allow
  connectors/encoders to fix up stuff if no modeset happened (new separate
  callback). This here doesn't align with that plan.

> +
>  	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
>  	 * already, but still need the state for the delayed optimization. To
>  	 * fix this:
> @@ -15322,6 +15332,10 @@ static void intel_hpd_poll_fini(struct drm_device *dev)
>  	for_each_intel_connector_iter(connector, &conn_iter) {
>  		if (connector->modeset_retry_work.func)
>  			cancel_work_sync(&connector->modeset_retry_work);
> +		if (connector->hdcp_shim) {
> +			cancel_delayed_work_sync(&connector->hdcp_check_work);
> +			cancel_work_sync(&connector->hdcp_prop_work);
> +		}
>  	}
>  	drm_connector_list_iter_end(&conn_iter);
>  }
> diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
> index 852b3d161754..6f47a4227f5f 100644
> --- a/drivers/gpu/drm/i915/intel_drv.h
> +++ b/drivers/gpu/drm/i915/intel_drv.h
> @@ -301,6 +301,76 @@ struct intel_panel {
>  	} backlight;
>  };
>  
> +/*
> + * This structure serves as a translation layer between the generic HDCP code
> + * and the bus-specific code. What that means is that HDCP over HDMI differs
> + * from HDCP over DP, so to account for these differences, we need to
> + * communicate with the receiver through this shim.
> + *
> + * For completeness, the 2 buses differ in the following ways:
> + *	- DP AUX vs. DDC
> + *		HDCP registers on the receiver are set via DP AUX for DP, and
> + *		they are set via DDC for HDMI.
> + *	- Receiver register offsets
> + *		The offsets of the registers are different for DP vs. HDMI
> + *	- Receiver register masks/offsets
> + *		For instance, the ready bit for the KSV fifo is in a different
> + *		place on DP vs HDMI
> + *	- Receiver register names
> + *		Seriously. In the DP spec, the 16-bit register containing
> + *		downstream information is called BINFO, on HDMI it's called
> + *		BSTATUS. To confuse matters further, DP has a BSTATUS register
> + *		with a completely different definition.
> + *	- KSV FIFO
> + *		On HDMI, the ksv fifo is read all at once, whereas on DP it must
> + *		be read 3 keys at a time
> + *	- Aksv output
> + *		Since Aksv is hidden in hardware, there's different procedures
> + *		to send it over DP AUX vs DDC
> + */
> +struct intel_hdcp_shim {
> +	/* Outputs the transmitter's An and Aksv values to the receiver. */
> +	int (*write_an_aksv)(struct intel_digital_port *intel_dig_port, u8 *an);
> +
> +	/* Reads the receiver's key selection vector */
> +	int (*read_bksv)(struct intel_digital_port *intel_dig_port, u8 *bksv);
> +
> +	/*
> +	 * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The
> +	 * definitions are the same in the respective specs, but the names are
> +	 * different. Call it BSTATUS since that's the name the HDMI spec
> +	 * uses and it was there first.
> +	 */
> +	int (*read_bstatus)(struct intel_digital_port *intel_dig_port,
> +			    u8 *bstatus);
> +
> +	/* Determines whether a repeater is present downstream */
> +	int (*repeater_present)(struct intel_digital_port *intel_dig_port,
> +				bool *repeater_present);
> +
> +	/* Reads the receiver's Ri' value */
> +	int (*read_ri_prime)(struct intel_digital_port *intel_dig_port, u8 *ri);
> +
> +	/* Determines if the receiver's KSV FIFO is ready for consumption */
> +	int (*read_ksv_ready)(struct intel_digital_port *intel_dig_port,
> +			      bool *ksv_ready);
> +
> +	/* Reads the ksv fifo for num_downstream devices */
> +	int (*read_ksv_fifo)(struct intel_digital_port *intel_dig_port,
> +			     int num_downstream, u8 *ksv_fifo);
> +
> +	/* Reads a 32-bit part of V' from the receiver */
> +	int (*read_v_prime_part)(struct intel_digital_port *intel_dig_port,
> +				 int i, u32 *part);
> +
> +	/* Enables HDCP signalling on the port */
> +	int (*toggle_signalling)(struct intel_digital_port *intel_dig_port,
> +				 bool enable);
> +
> +	/* Ensures the link is still protected */
> +	bool (*check_link)(struct intel_digital_port *intel_dig_port);
> +};
> +
>  struct intel_connector {
>  	struct drm_connector base;
>  	/*
> @@ -332,6 +402,12 @@ struct intel_connector {
>  
>  	/* Work struct to schedule a uevent on link train failure */
>  	struct work_struct modeset_retry_work;
> +
> +	const struct intel_hdcp_shim *hdcp_shim;
> +	struct mutex hdcp_mutex;
> +	uint64_t hdcp_value; /* protected by hdcp_mutex */
> +	struct delayed_work hdcp_check_work;
> +	struct work_struct hdcp_prop_work;
>  };
>  
>  struct intel_digital_connector_state {
> @@ -1763,6 +1839,18 @@ static inline void intel_backlight_device_unregister(struct intel_connector *con
>  }
>  #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
>  
> +/* intel_hdcp.c */
> +void intel_hdcp_atomic_check(struct drm_connector *connector,
> +			     struct drm_connector_state *old_state,
> +			     struct drm_connector_state *new_state);
> +void intel_hdcp_atomic_pre_commit(struct drm_connector *connector,
> +			          struct drm_connector_state *old_state,
> +			          struct drm_connector_state *new_state);
> +void intel_hdcp_atomic_commit(struct drm_connector *connector,
> +			      struct drm_connector_state *new_state);
> +int intel_hdcp_check_link(struct intel_connector *connector);
> +void intel_hdcp_check_work(struct work_struct *work);
> +void intel_hdcp_prop_work(struct work_struct *work);
>  
>  /* intel_psr.c */
>  void intel_psr_enable(struct intel_dp *intel_dp,
> diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
> new file mode 100644
> index 000000000000..4eef9505410f
> --- /dev/null
> +++ b/drivers/gpu/drm/i915/intel_hdcp.c
> @@ -0,0 +1,731 @@
> +/*
> + * Copyright (C) 2017 Google, Inc.
> + *
> + * Permission is hereby granted, free of charge, to any person obtaining a
> + * copy of this software and associated documentation files (the "Software"),
> + * to deal in the Software without restriction, including without limitation
> + * the rights to use, copy, modify, merge, publish, distribute, sublicense,
> + * and/or sell copies of the Software, and to permit persons to whom the
> + * Software is furnished to do so, subject to the following conditions:
> + *
> + * The above copyright notice and this permission notice shall be included in
> + * all copies or substantial portions of the Software.
> + *
> + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
> + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
> + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
> + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> + * OTHER DEALINGS IN THE SOFTWARE.
> + *
> + * Authors:
> + * Sean Paul <seanpaul@chromium.org>
> + */
> +
> +#include <drm/drmP.h>
> +#include <drm/drm_hdcp.h>
> +#include <linux/i2c.h>
> +#include <linux/random.h>
> +
> +#include "intel_drv.h"
> +#include "i915_reg.h"
> +
> +#define KEY_LOAD_TRIES	5
> +
> +static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
> +				    const struct intel_hdcp_shim *shim)
> +{
> +	int ret, read_ret;
> +	bool ksv_ready;
> +
> +	ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
> +							 &ksv_ready),
> +			 read_ret || ksv_ready, 1500 * 1000, 1000, 100 * 1000);
> +	if (ret)
> +		return ret;
> +	if (read_ret)
> +		return read_ret;
> +	if (!ksv_ready)
> +		return -ETIMEDOUT;
> +
> +	return 0;
> +}
> +
> +static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
> +{
> +	I915_WRITE(SKL_HDCP_KEY_CONF, SKL_HDCP_CLEAR_KEYS_TRIGGER);
> +	I915_WRITE(SKL_HDCP_KEY_STATUS,
> +		   SKL_HDCP_KEY_LOAD_DONE | SKL_HDCP_KEY_LOAD_STATUS |
> +		   SKL_HDCP_FUSE_IN_PROGRESS | SKL_HDCP_FUSE_ERROR |
> +		   SKL_HDCP_FUSE_DONE);
> +}
> +
> +static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
> +{
> +	int ret;
> +	u32 val;
> +
> +	/* Initiate loading the HDCP key from fuses */
> +	mutex_lock(&dev_priv->pcu_lock);
> +	ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1);
> +	mutex_unlock(&dev_priv->pcu_lock);
> +	if (ret) {
> +		DRM_ERROR("Failed to initiate HDCP key load (%d)\n", ret);
> +		return ret;
> +	}
> +
> +	/* Wait for the keys to load (500us) */
> +	ret = __intel_wait_for_register(dev_priv, SKL_HDCP_KEY_STATUS,
> +					SKL_HDCP_KEY_LOAD_DONE,
> +					SKL_HDCP_KEY_LOAD_DONE,
> +					10, 1, &val);
> +	if (ret)
> +		return ret;
> +	else if (!(val & SKL_HDCP_KEY_LOAD_STATUS))
> +		return -ENXIO;
> +
> +	/* Send Aksv over to PCH display for use in authentication */
> +	I915_WRITE(SKL_HDCP_KEY_CONF, SKL_HDCP_AKSV_SEND_TRIGGER);
> +
> +	return 0;
> +}
> +
> +/* Returns updated SHA-1 index */
> +static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
> +{
> +	I915_WRITE(SKL_HDCP_SHA_TEXT, sha_text);
> +	if (intel_wait_for_register(dev_priv, SKL_HDCP_REP_CTL,
> +				    SKL_HDCP_SHA1_READY,
> +				    SKL_HDCP_SHA1_READY, 1)) {
> +		DRM_ERROR("Timed out waiting for SHA1 ready\n");
> +		return -ETIMEDOUT;
> +	}
> +	return 0;
> +}
> +
> +static
> +u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
> +{
> +	enum port port = intel_dig_port->base.port;
> +	switch(port) {
> +	case PORT_A:
> +		return SKL_HDCP_DDIA_REP_PRESENT | SKL_HDCP_DDIA_SHA1_M0;
> +	case PORT_B:
> +		return SKL_HDCP_DDIB_REP_PRESENT | SKL_HDCP_DDIB_SHA1_M0;
> +	case PORT_C:
> +		return SKL_HDCP_DDIC_REP_PRESENT | SKL_HDCP_DDIC_SHA1_M0;
> +	case PORT_D:
> +		return SKL_HDCP_DDID_REP_PRESENT | SKL_HDCP_DDID_SHA1_M0;
> +	case PORT_E:
> +		return SKL_HDCP_DDIE_REP_PRESENT | SKL_HDCP_DDIE_SHA1_M0;
> +	default:
> +		break;
> +	}
> +	DRM_ERROR("Unknown port %d\n", port);
> +	return -EINVAL;
> +}
> +
> +/* Implements Part 2 of the HDCP authorization procedure */
> +static
> +int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
> +			       const struct intel_hdcp_shim *shim)
> +{
> +	struct drm_i915_private *dev_priv;
> +	u32 vprime, sha_text, sha_leftovers, rep_ctl;
> +	u8 bstatus[2], num_downstream, *ksv_fifo;
> +	int ret, i, j, sha_idx;
> +
> +	dev_priv = intel_dig_port->base.base.dev->dev_private;
> +
> +	ret = shim->read_bstatus(intel_dig_port, bstatus);
> +	if (ret)
> +		return ret;
> +
> +	/* If there are no downstream devices, we're all done. */
> +	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
> +	if (num_downstream == 0) {
> +		DRM_INFO("HDCP is enabled (no downstream devices)\n");
> +		return 0;
> +	}
> +
> +	/* Poll for ksv list ready (spec says max time allowed is 5s) */
> +	ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
> +	if (ret) {
> +		DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
> +		return ret;
> +	}
> +
> +	ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL);
> +	if (!ksv_fifo)
> +		return -ENOMEM;
> +
> +	ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
> +	if (ret)
> +		return ret;
> +
> +	/* Process V' values from the receiver */
> +	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
> +		ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
> +		if (ret)
> +			return ret;
> +		I915_WRITE(SKL_HDCP_SHA_V_PRIME(i), vprime);
> +	}
> +
> +	/*
> +	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
> +	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
> +	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
> +	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
> +	 * index will keep track of our progress through the 64 bytes as well as
> +	 * helping us work the 40-bit KSVs through our 32-bit register.
> +	 *
> +	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
> +	 */
> +	sha_idx = 0;
> +	sha_text = 0;
> +	sha_leftovers = 0;
> +	rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
> +	I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_32);
> +	for (i = 0; i < num_downstream; i++) {
> +		unsigned sha_empty;
> +		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
> +
> +		/* Fill up the empty slots in sha_text and write it out */
> +		sha_empty = sizeof(sha_text) - sha_leftovers;
> +		for (j = 0; j < sha_empty; j++)
> +			sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
> +
> +		ret = intel_write_sha_text(dev_priv, sha_text);
> +		if (ret < 0)
> +			return ret;
> +
> +		/* Programming guide writes this every 64 bytes */
> +		sha_idx += sizeof(sha_text);
> +		if (!(sha_idx % 64))
> +			I915_WRITE(SKL_HDCP_REP_CTL,
> +				   rep_ctl | SKL_HDCP_SHA1_TEXT_32);
> +
> +		/* Store the leftover bytes from the ksv in sha_text */
> +		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
> +		sha_text = 0;
> +		for (j = 0; j < sha_leftovers; j++)
> +			sha_text |= ksv[sha_empty + j] <<
> +					((sizeof(sha_text) - j - 1) * 8);
> +
> +		/*
> +		 * If we still have room in sha_text for more data, continue.
> +		 * Otherwise, write it out immediately.
> +		 */
> +		if (sizeof(sha_text) > sha_leftovers)
> +			continue;
> +
> +		ret = intel_write_sha_text(dev_priv, sha_text);
> +		if (ret < 0)
> +			return ret;
> +		sha_leftovers = 0;
> +		sha_text = 0;
> +		sha_idx += sizeof(sha_text);
> +	}
> +
> +	/*
> +	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
> +	 * bytes are leftover from the last ksv, we might be able to fit them
> +	 * all in sha_text (first 2 cases), or we might need to split them up
> +	 * into 2 writes (last 2 cases).
> +	 */
> +	if (sha_leftovers == 0) {
> +		/* Write 16 bits of text, 16 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_16);
> +		ret = intel_write_sha_text(dev_priv,
> +					   bstatus[0] << 8 | bstatus[1]);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 32 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_0);
> +		ret = intel_write_sha_text(dev_priv, 0);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 16 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_16);
> +		ret = intel_write_sha_text(dev_priv, 0);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +	} else if (sha_leftovers == 1) {
> +		/* Write 24 bits of text, 8 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_24);
> +		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
> +		/* Only 24-bits of data, must be in the LSB */
> +		sha_text = (sha_text & 0xffffff00) >> 8;
> +		ret = intel_write_sha_text(dev_priv, sha_text);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 32 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_0);
> +		ret = intel_write_sha_text(dev_priv, 0);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 24 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_8);
> +		ret = intel_write_sha_text(dev_priv, 0);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +	} else if (sha_leftovers == 2) {
> +		/* Write 32 bits of text */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_32);
> +		sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
> +		ret = intel_write_sha_text(dev_priv, sha_text);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 64 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_0);
> +		for (i = 0; i < 2; i++) {
> +			ret = intel_write_sha_text(dev_priv, 0);
> +			if (ret < 0)
> +				return ret;
> +			sha_idx += sizeof(sha_text);
> +		}
> +	} else if (sha_leftovers == 3) {
> +		/* Write 32 bits of text */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_32);
> +		sha_text |= bstatus[0] << 24;
> +		ret = intel_write_sha_text(dev_priv, sha_text);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 8 bits of text, 24 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_8);
> +		ret = intel_write_sha_text(dev_priv, bstatus[1]);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 32 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_0);
> +		ret = intel_write_sha_text(dev_priv, 0);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +
> +		/* Write 8 bits of M0 */
> +		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_24);
> +		ret = intel_write_sha_text(dev_priv, 0);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +	} else {
> +		DRM_ERROR("Invalid number of leftovers %d\n", sha_leftovers);
> +		return -EINVAL;
> +	}

I'd have extracted a little helper to write teh sha_text bytewise which
internally accumulates it and pushes it out every 32 bytes. Would neatly
separate what we write from how the sha machinery works.

But this a bit more verbose style works too I guess, so just an idea in
case we ever spot a bug in here (I didn't).

> +
> +	I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_32);
> +	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
> +	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
> +		ret = intel_write_sha_text(dev_priv, 0);
> +		if (ret < 0)
> +			return ret;
> +		sha_idx += sizeof(sha_text);
> +	}
> +
> +	/*
> +	 * Last write gets the length of the concatenation in bits. That is:
> +	 *  - 5 bytes per device
> +	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
> +	 */
> +	sha_text = (num_downstream * 5 + 10) * 8;
> +	ret = intel_write_sha_text(dev_priv, sha_text);
> +	if (ret < 0)
> +		return ret;
> +
> +	/* Tell the HW we're done with the hash and wait for it to ACK */
> +	I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_COMPLETE_HASH);
> +	if (intel_wait_for_register(dev_priv, SKL_HDCP_REP_CTL,
> +				    SKL_HDCP_SHA1_COMPLETE,
> +				    SKL_HDCP_SHA1_COMPLETE, 1)) {
> +		DRM_ERROR("Timed out waiting for SHA1 complete\n");
> +		return -ETIMEDOUT;
> +	}
> +	if (!(I915_READ(SKL_HDCP_REP_CTL) & SKL_HDCP_SHA1_V_MATCH)) {
> +		DRM_ERROR("SHA-1 mismatch, HDCP failed\n");
> +		return -ENXIO;
> +	}
> +
> +	DRM_INFO("HDCP is enabled (%d downstream devices)\n", num_downstream);
> +	return 0;
> +}
> +
> +/* Implements Part 1 of the HDCP authorization procedure */
> +static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
> +			   const struct intel_hdcp_shim *shim)
> +{
> +	struct drm_i915_private *dev_priv;
> +	enum port port;
> +	unsigned long r0_prime_gen_start;
> +	int ret, i;
> +	union {
> +		u32 reg[2];
> +		u8 shim[DRM_HDCP_AN_LEN];
> +	} an;
> +	union {
> +		u32 reg[2];
> +		u8 shim[DRM_HDCP_KSV_LEN];
> +	} bksv;
> +	union {
> +		u32 reg;
> +		u8 shim[DRM_HDCP_RI_LEN];
> +	} ri;
> +	bool repeater_present;
> +
> +	dev_priv = intel_dig_port->base.base.dev->dev_private;
> +
> +	port = intel_dig_port->base.port;
> +
> +	/* Initialize An with 2 random values and acquire it */
> +	for (i = 0; i < 2; i++)
> +		I915_WRITE(SKL_PORT_HDCP_ANINIT(port), get_random_long());

nit: get_random_u32 instad of get_random_long?

> +	I915_WRITE(SKL_PORT_HDCP_CONF(port), SKL_HDCP_CONF_CAPTURE_AN);
> +
> +	/* Wait for An to be acquired */
> +	if (intel_wait_for_register(dev_priv, SKL_PORT_HDCP_STATUS(port),
> +				    SKL_HDCP_STATUS_AN_READY,
> +				    SKL_HDCP_STATUS_AN_READY, 1)) {
> +		DRM_ERROR("Timed out waiting for An\n");
> +		return -ETIMEDOUT;
> +	}
> +
> +	an.reg[0] = I915_READ(SKL_PORT_HDCP_ANLO(port));
> +	an.reg[1] = I915_READ(SKL_PORT_HDCP_ANHI(port));
> +	ret = shim->write_an_aksv(intel_dig_port, an.shim);
> +	if (ret)
> +		return ret;
> +
> +	r0_prime_gen_start = jiffies;
> +
> +	memset(&bksv, 0, sizeof(bksv));
> +	ret = shim->read_bksv(intel_dig_port, bksv.shim);
> +	if (ret)
> +		return ret;
> +
> +	I915_WRITE(SKL_PORT_HDCP_BKSVLO(port), bksv.reg[0]);
> +	I915_WRITE(SKL_PORT_HDCP_BKSVHI(port), bksv.reg[1]);
> +
> +	ret = shim->repeater_present(intel_dig_port, &repeater_present);
> +	if (ret)
> +		return ret;
> +	if (repeater_present)
> +		I915_WRITE(SKL_HDCP_REP_CTL,
> +			   intel_hdcp_get_repeater_ctl(intel_dig_port));
> +
> +	ret = shim->toggle_signalling(intel_dig_port, true);
> +	if (ret)
> +		return ret;
> +
> +	I915_WRITE(SKL_PORT_HDCP_CONF(port), SKL_HDCP_CONF_AUTH_AND_ENC);
> +
> +	/* Wait for R0 ready */
> +	if (wait_for(I915_READ(SKL_PORT_HDCP_STATUS(port)) &
> +		     (SKL_HDCP_STATUS_R0_READY | SKL_HDCP_STATUS_ENC), 1)) {
> +		DRM_ERROR("Timed out waiting for R0 ready\n");
> +		return -ETIMEDOUT;
> +	}
> +
> +	/*
> +	 * Wait for R0' to become available, the spec says 100ms from Aksv
> +	 * write. On DP, there's an R0_READY bit available but no such bit
> +	 * exists on HDMI. Since the upper-bound is the same, we'll just do
> +	 * the stupid thing instead of polling on one and not the other.
> +	 */
> +	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 100);
> +
> +	ri.reg = 0;
> +	ret = shim->read_ri_prime(intel_dig_port, ri.shim);
> +	if (ret)
> +		return ret;
> +	I915_WRITE(SKL_PORT_HDCP_RPRIME(port), ri.reg);
> +
> +	/* Wait for Ri prime match */
> +	if (wait_for(I915_READ(SKL_PORT_HDCP_STATUS(port)) &
> +		     (SKL_HDCP_STATUS_RI_MATCH | SKL_HDCP_STATUS_ENC), 1)) {
> +		DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
> +			  I915_READ(SKL_PORT_HDCP_STATUS(port)));
> +		return -ETIMEDOUT;
> +	}
> +
> +	/* Wait for encryption confirmation */
> +	if (intel_wait_for_register(dev_priv, SKL_PORT_HDCP_STATUS(port),
> +				    SKL_HDCP_STATUS_ENC,
> +				    SKL_HDCP_STATUS_ENC, 20)) {
> +		DRM_ERROR("Timed out waiting for encryption\n");
> +		return -ETIMEDOUT;
> +	}
> +
> +	/*
> +	 * XXX: If we have MST-connected devices, we need to enable encryption
> +	 * on those as well.
> +	 */

Just an aside: I think we can stuff the DDI stream specific stuff into
the toggle_signalling callback. But not entirely sure.

> +
> +	return intel_hdcp_auth_downstream(intel_dig_port, shim);
> +}
> +
> +static
> +struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
> +{
> +	return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
> +}
> +
> +static int _intel_hdcp_disable(struct intel_connector *connector)
> +{
> +	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
> +	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
> +	enum port port = intel_dig_port->base.port;
> +	int ret;
> +
> +	I915_WRITE(SKL_PORT_HDCP_CONF(port), 0);
> +	if (intel_wait_for_register(dev_priv, SKL_PORT_HDCP_STATUS(port), ~0, 0,
> +				    20)) {
> +		DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
> +		return -ETIMEDOUT;
> +	}
> +
> +	intel_hdcp_clear_keys(dev_priv);
> +
> +	ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
> +	if (ret) {
> +		DRM_ERROR("Failed to disable HDCP signalling\n");
> +		return ret;
> +	}
> +
> +	DRM_INFO("HDCP is disabled\n");
> +	return 0;
> +}
> +
> +static int _intel_hdcp_enable(struct intel_connector *connector)
> +{
> +	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
> +	int i, ret;
> +
> +	if (!(I915_READ(SKL_FUSE_STATUS) & SKL_FUSE_PG_DIST_STATUS(1))) {
> +		DRM_ERROR("PG1 is disabled, cannot load keys\n");
> +		return -ENXIO;
> +	}
> +
> +	for (i = 0; i < KEY_LOAD_TRIES; i++) {
> +		ret = intel_hdcp_load_keys(dev_priv);
> +		if (!ret)
> +			break;
> +		intel_hdcp_clear_keys(dev_priv);
> +	}
> +	if (ret) {
> +		DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
> +		return ret;
> +	}
> +
> +	ret = intel_hdcp_auth(conn_to_dig_port(connector),
> +			      connector->hdcp_shim);
> +	if (ret) {
> +		DRM_ERROR("Failed to authenticate HDCP (%d)\n", ret);
> +		return ret;
> +	}
> +
> +	return 0;
> +}
> +
> +void intel_hdcp_check_work(struct work_struct *work)
> +{
> +	struct intel_connector *connector = container_of(to_delayed_work(work),
> +							 struct intel_connector,
> +						         hdcp_check_work);
> +	if (!intel_hdcp_check_link(connector))
> +		schedule_delayed_work(&connector->hdcp_check_work,
> +				      DRM_HDCP_CHECK_PERIOD_MS);
> +}
> +
> +void intel_hdcp_prop_work(struct work_struct *work)
> +{
> +	struct intel_connector *connector = container_of(work,
> +							 struct intel_connector,
> +						         hdcp_prop_work);
> +	struct drm_device *dev = connector->base.dev;
> +	struct drm_connector_state *state;
> +
> +	mutex_lock(&dev->mode_config.mutex);

You don't need this lock here. Only connection_mutex is needed to protect
connector->status.

> +	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
> +	mutex_lock(&connector->hdcp_mutex);
> +
> +	/*
> +	 * This worker is only used to flip between ENABLED/DESIRED. Either of
> +	 * those to OFF is handled by core. If hdcp_value == OFF, we're running
> +	 * just after hdcp has been disabled, so just exit
> +	 */
> +	if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_OFF) {
> +		state = connector->base.state;
> +		state->content_protection = connector->hdcp_value;
> +	}
> +
> +	mutex_unlock(&connector->hdcp_mutex);
> +	drm_modeset_unlock(&dev->mode_config.connection_mutex);
> +	mutex_unlock(&dev->mode_config.mutex);
> +}
> +
> +static int intel_hdcp_enable(struct intel_connector *connector)
> +{
> +	int ret;
> +
> +	if (!connector->hdcp_shim)
> +		return -ENOENT;
> +
> +	mutex_lock(&connector->hdcp_mutex);
> +
> +	ret = _intel_hdcp_enable(connector);
> +	if (ret)
> +		goto out;
> +
> +	connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
> +	schedule_work(&connector->hdcp_prop_work);
> +	schedule_delayed_work(&connector->hdcp_check_work,
> +			      DRM_HDCP_CHECK_PERIOD_MS);
> +out:
> +	mutex_unlock(&connector->hdcp_mutex);
> +	return ret;
> +}
> +
> +static int intel_hdcp_disable(struct intel_connector *connector)
> +{
> +	int ret;
> +
> +	if (!connector->hdcp_shim)
> +		return -ENOENT;
> +
> +	mutex_lock(&connector->hdcp_mutex);
> +
> +	connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_OFF;
> +	ret = _intel_hdcp_disable(connector);
> +
> +	mutex_unlock(&connector->hdcp_mutex);
> +	cancel_delayed_work_sync(&connector->hdcp_check_work);
> +	return ret;
> +}
> +
> +void intel_hdcp_atomic_check(struct drm_connector *connector,
> +			     struct drm_connector_state *old_state,
> +			     struct drm_connector_state *new_state)
> +{
> +	uint64_t old_cp = old_state->content_protection;
> +	uint64_t new_cp = new_state->content_protection;
> +
> +	if (!new_state->crtc) {
> +		/*
> +		 * If the connector is being disabled with CP enabled, mark it
> +		 * desired so it's re-enabled when the connector is brought back
> +		 */
> +		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
> +			new_state->content_protection =
> +				DRM_MODE_CONTENT_PROTECTION_DESIRED;
> +		return;
> +	}
> +
> +	/* Only drivers can set content protection enabled */
> +	if (old_cp != DRM_MODE_CONTENT_PROTECTION_ENABLED &&
> +	    new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
> +		new_state->content_protection =
> +			DRM_MODE_CONTENT_PROTECTION_DESIRED;
> +}
> +
> +void intel_hdcp_atomic_pre_commit(struct drm_connector *connector,
> +			          struct drm_connector_state *old_state,
> +			          struct drm_connector_state *new_state)
> +{
> +	uint64_t old_cp = old_state->content_protection;
> +	uint64_t new_cp = new_state->content_protection;
> +
> +	/*
> +	 * Disable HDCP if the connector is becoming disabled, or if requested
> +	 * via the property.
> +	 */
> +	if ((!new_state->crtc && old_cp != DRM_MODE_CONTENT_PROTECTION_OFF) ||
> +	    (new_state->crtc && old_cp != DRM_MODE_CONTENT_PROTECTION_OFF &&
> +	     new_cp == DRM_MODE_CONTENT_PROTECTION_OFF))
> +		intel_hdcp_disable(to_intel_connector(connector));
> +}
> +
> +void intel_hdcp_atomic_commit(struct drm_connector *connector,
> +			      struct drm_connector_state *new_state)
> +{
> +	uint64_t new_cp = new_state->content_protection;
> +
> +	/* Enable hdcp if it's desired */
> +	if (new_state->crtc && new_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED)
> +		intel_hdcp_enable(to_intel_connector(connector));
> +}
> +
> +/* Implements Part 3 of the HDCP authorization procedure */
> +int intel_hdcp_check_link(struct intel_connector *connector)
> +{
> +	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
> +	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
> +	enum port port = intel_dig_port->base.port;
> +	int ret = 0;
> +
> +	if (!connector->hdcp_shim)
> +		return -ENOENT;
> +
> +	mutex_lock(&connector->hdcp_mutex);
> +
> +	if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_OFF)
> +		goto out;
> +
> +	if (!(I915_READ(SKL_PORT_HDCP_STATUS(port)) & SKL_HDCP_STATUS_ENC)) {
> +		DRM_ERROR("HDCP check failed: link is not encrypted, %x\n",
> +			   I915_READ(SKL_PORT_HDCP_STATUS(port)));
> +		ret = -ENXIO;
> +		connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
> +		schedule_work(&connector->hdcp_prop_work);
> +		goto out;
> +	}
> +
> +	if (connector->hdcp_shim->check_link(intel_dig_port)) {
> +		if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_OFF) {
> +			connector->hdcp_value =
> +				DRM_MODE_CONTENT_PROTECTION_ENABLED;
> +			schedule_work(&connector->hdcp_prop_work);
> +		}
> +		goto out;
> +	}
> +
> +	DRM_INFO("HDCP link failed, retrying authentication\n");
> +
> +	ret = _intel_hdcp_disable(connector);
> +	if (ret) {
> +		DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
> +		connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
> +		schedule_work(&connector->hdcp_prop_work);
> +		goto out;
> +	}
> +
> +	ret = _intel_hdcp_enable(connector);
> +	if (ret) {
> +		DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
> +		connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
> +		schedule_work(&connector->hdcp_prop_work);
> +		goto out;
> +	}
> +
> +out:
> +	mutex_unlock(&connector->hdcp_mutex);
> +	return ret;
> +}

With the comments addressed:

Reviewed-by: Daniel Vetter <daniel.vetter@ffwll.ch>

Also counts for the reg renaming patch if you want to do that as a
follow-up in this series :-)
-Daniel

> -- 
> 2.15.0.531.g2ccb3012c9-goog
> 
> _______________________________________________
> dri-devel mailing list
> dri-devel@lists.freedesktop.org
> https://lists.freedesktop.org/mailman/listinfo/dri-devel
diff mbox

Patch

diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index 42bc8bd4ff06..3facea4eefdb 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -107,6 +107,7 @@  i915-y += intel_audio.o \
 	  intel_fbc.o \
 	  intel_fifo_underrun.o \
 	  intel_frontbuffer.o \
+	  intel_hdcp.o \
 	  intel_hotplug.o \
 	  intel_modes.o \
 	  intel_overlay.o \
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 09bf043c1c2e..2bd2cc8441d4 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -8034,6 +8034,7 @@  enum {
 #define     GEN9_MEM_LATENCY_LEVEL_1_5_SHIFT	8
 #define     GEN9_MEM_LATENCY_LEVEL_2_6_SHIFT	16
 #define     GEN9_MEM_LATENCY_LEVEL_3_7_SHIFT	24
+#define   SKL_PCODE_LOAD_HDCP_KEYS		0x5
 #define   SKL_PCODE_CDCLK_CONTROL		0x7
 #define     SKL_CDCLK_PREPARE_FOR_CHANGE	0x3
 #define     SKL_CDCLK_READY_FOR_CHANGE		0x1
@@ -8335,6 +8336,88 @@  enum skl_power_gate {
 #define  SKL_PW_TO_PG(pw)			((pw) - SKL_DISP_PW_1 + SKL_PG1)
 #define  SKL_FUSE_PG_DIST_STATUS(pg)		(1 << (27 - (pg)))
 
+
+/* HDCP Key Registers */
+#define SKL_HDCP_KEY_CONF		_MMIO(0x66c00)
+#define	 SKL_HDCP_AKSV_SEND_TRIGGER	BIT(31)
+#define  SKL_HDCP_CLEAR_KEYS_TRIGGER	BIT(30)
+#define SKL_HDCP_KEY_STATUS		_MMIO(0x66c04)
+#define  SKL_HDCP_FUSE_IN_PROGRESS	BIT(7)
+#define  SKL_HDCP_FUSE_ERROR		BIT(6)
+#define  SKL_HDCP_FUSE_DONE		BIT(5)
+#define  SKL_HDCP_KEY_LOAD_STATUS	BIT(1)
+#define  SKL_HDCP_KEY_LOAD_DONE		BIT(0)
+#define SKL_HDCP_AKSV_LO		_MMIO(0x66c10)
+#define SKL_HDCP_AKSV_HI		_MMIO(0x66c14)
+
+/* HDCP Repeater Registers */
+#define SKL_HDCP_REP_CTL		_MMIO(0x66d00)
+#define  SKL_HDCP_DDIB_REP_PRESENT	BIT(30)
+#define  SKL_HDCP_DDIA_REP_PRESENT	BIT(29)
+#define  SKL_HDCP_DDIC_REP_PRESENT	BIT(28)
+#define  SKL_HDCP_DDID_REP_PRESENT	BIT(27)
+#define  SKL_HDCP_DDIF_REP_PRESENT	BIT(26)
+#define  SKL_HDCP_DDIE_REP_PRESENT	BIT(25)
+#define  SKL_HDCP_DDIB_SHA1_M0		(1 << 20)
+#define  SKL_HDCP_DDIA_SHA1_M0		(2 << 20)
+#define  SKL_HDCP_DDIC_SHA1_M0		(3 << 20)
+#define  SKL_HDCP_DDID_SHA1_M0		(4 << 20)
+#define  SKL_HDCP_DDIF_SHA1_M0		(5 << 20)
+#define  SKL_HDCP_DDIE_SHA1_M0		(6 << 20) /* Bspec says 5? */
+#define  SKL_HDCP_SHA1_BUSY		BIT(16)
+#define  SKL_HDCP_SHA1_READY		BIT(17)
+#define  SKL_HDCP_SHA1_COMPLETE		BIT(18)
+#define  SKL_HDCP_SHA1_V_MATCH		BIT(19)
+#define  SKL_HDCP_SHA1_TEXT_32		(1 << 1)
+#define  SKL_HDCP_SHA1_COMPLETE_HASH	(2 << 1)
+#define  SKL_HDCP_SHA1_TEXT_24		(4 << 1)
+#define  SKL_HDCP_SHA1_TEXT_16		(5 << 1)
+#define  SKL_HDCP_SHA1_TEXT_8		(6 << 1)
+#define  SKL_HDCP_SHA1_TEXT_0		(7 << 1)
+#define SKL_HDCP_SHA_V_PRIME_H0		_MMIO(0x66d04)
+#define SKL_HDCP_SHA_V_PRIME_H1		_MMIO(0x66d08)
+#define SKL_HDCP_SHA_V_PRIME_H2		_MMIO(0x66d0C)
+#define SKL_HDCP_SHA_V_PRIME_H3		_MMIO(0x66d10)
+#define SKL_HDCP_SHA_V_PRIME_H4		_MMIO(0x66d14)
+#define SKL_HDCP_SHA_V_PRIME(h)		_MMIO((0x66d04 + h * 4))
+#define SKL_HDCP_SHA_TEXT		_MMIO(0x66d18)
+
+/* HDCP Auth Registers */
+#define _SKL_PORTA_HDCP_AUTHENC		0x66800
+#define _SKL_PORTB_HDCP_AUTHENC		0x66500
+#define _SKL_PORTC_HDCP_AUTHENC		0x66600
+#define _SKL_PORTD_HDCP_AUTHENC		0x66700
+#define _SKL_PORTE_HDCP_AUTHENC		0x66A00
+#define _SKL_PORTF_HDCP_AUTHENC		0x66900
+#define _SKL_PORT_HDCP_AUTHENC(port, x)	_MMIO(_PICK(port, \
+					  _SKL_PORTA_HDCP_AUTHENC, \
+					  _SKL_PORTB_HDCP_AUTHENC, \
+					  _SKL_PORTC_HDCP_AUTHENC, \
+					  _SKL_PORTD_HDCP_AUTHENC, \
+					  _SKL_PORTE_HDCP_AUTHENC, \
+					  _SKL_PORTF_HDCP_AUTHENC) + x)
+#define SKL_PORT_HDCP_CONF(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x0)
+#define  SKL_HDCP_CONF_CAPTURE_AN	BIT(0)
+#define  SKL_HDCP_CONF_AUTH_AND_ENC	(BIT(1) | BIT(0))
+#define SKL_PORT_HDCP_ANINIT(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x4)
+#define SKL_PORT_HDCP_ANLO(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x8)
+#define SKL_PORT_HDCP_ANHI(port)	_SKL_PORT_HDCP_AUTHENC(port, 0xC)
+#define SKL_PORT_HDCP_BKSVLO(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x10)
+#define SKL_PORT_HDCP_BKSVHI(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x14)
+#define SKL_PORT_HDCP_RPRIME(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x18)
+#define SKL_PORT_HDCP_STATUS(port)	_SKL_PORT_HDCP_AUTHENC(port, 0x1C)
+#define  SKL_HDCP_STATUS_STREAM_A_ENC	BIT(31)
+#define  SKL_HDCP_STATUS_STREAM_B_ENC	BIT(30)
+#define  SKL_HDCP_STATUS_STREAM_C_ENC	BIT(29)
+#define  SKL_HDCP_STATUS_STREAM_D_ENC	BIT(28)
+#define  SKL_HDCP_STATUS_AUTH		BIT(21)
+#define  SKL_HDCP_STATUS_ENC		BIT(20)
+#define  SKL_HDCP_STATUS_RI_MATCH	BIT(19)
+#define  SKL_HDCP_STATUS_R0_READY	BIT(18)
+#define  SKL_HDCP_STATUS_AN_READY	BIT(17)
+#define  SKL_HDCP_STATUS_CIPHER		BIT(16)
+#define  SKL_HDCP_STATUS_FRAME_CNT(x)	((x >> 8) & 0xff)
+
 /* Per-pipe DDI Function Control */
 #define _TRANS_DDI_FUNC_CTL_A		0x60400
 #define _TRANS_DDI_FUNC_CTL_B		0x61400
diff --git a/drivers/gpu/drm/i915/intel_atomic.c b/drivers/gpu/drm/i915/intel_atomic.c
index 36d4e635e4ce..d452c327dc1d 100644
--- a/drivers/gpu/drm/i915/intel_atomic.c
+++ b/drivers/gpu/drm/i915/intel_atomic.c
@@ -110,6 +110,8 @@  int intel_digital_connector_atomic_check(struct drm_connector *conn,
 		to_intel_digital_connector_state(old_state);
 	struct drm_crtc_state *crtc_state;
 
+	intel_hdcp_atomic_check(conn, old_state, new_state);
+
 	if (!new_state->crtc)
 		return 0;
 
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 601c23be8264..f45c468abf98 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12319,6 +12319,8 @@  static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 	struct drm_i915_private *dev_priv = to_i915(dev);
 	struct drm_crtc_state *old_crtc_state, *new_crtc_state;
 	struct drm_crtc *crtc;
+	struct drm_connector_state *old_conn_state, *new_conn_state;
+	struct drm_connector *connector;
 	struct intel_crtc_state *intel_cstate;
 	u64 put_domains[I915_MAX_PIPES] = {};
 	int i;
@@ -12408,9 +12410,17 @@  static void intel_atomic_commit_tail(struct drm_atomic_state *state)
 		}
 	}
 
+	for_each_oldnew_connector_in_state(state, connector, old_conn_state,
+					   new_conn_state, i)
+		intel_hdcp_atomic_pre_commit(connector, old_conn_state,
+					     new_conn_state);
+
 	/* Now enable the clocks, plane, pipe, and connectors that we set up. */
 	dev_priv->display.update_crtcs(state);
 
+	for_each_new_connector_in_state(state, connector, new_conn_state, i)
+		intel_hdcp_atomic_commit(connector, new_conn_state);
+
 	/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
 	 * already, but still need the state for the delayed optimization. To
 	 * fix this:
@@ -15322,6 +15332,10 @@  static void intel_hpd_poll_fini(struct drm_device *dev)
 	for_each_intel_connector_iter(connector, &conn_iter) {
 		if (connector->modeset_retry_work.func)
 			cancel_work_sync(&connector->modeset_retry_work);
+		if (connector->hdcp_shim) {
+			cancel_delayed_work_sync(&connector->hdcp_check_work);
+			cancel_work_sync(&connector->hdcp_prop_work);
+		}
 	}
 	drm_connector_list_iter_end(&conn_iter);
 }
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 852b3d161754..6f47a4227f5f 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -301,6 +301,76 @@  struct intel_panel {
 	} backlight;
 };
 
+/*
+ * This structure serves as a translation layer between the generic HDCP code
+ * and the bus-specific code. What that means is that HDCP over HDMI differs
+ * from HDCP over DP, so to account for these differences, we need to
+ * communicate with the receiver through this shim.
+ *
+ * For completeness, the 2 buses differ in the following ways:
+ *	- DP AUX vs. DDC
+ *		HDCP registers on the receiver are set via DP AUX for DP, and
+ *		they are set via DDC for HDMI.
+ *	- Receiver register offsets
+ *		The offsets of the registers are different for DP vs. HDMI
+ *	- Receiver register masks/offsets
+ *		For instance, the ready bit for the KSV fifo is in a different
+ *		place on DP vs HDMI
+ *	- Receiver register names
+ *		Seriously. In the DP spec, the 16-bit register containing
+ *		downstream information is called BINFO, on HDMI it's called
+ *		BSTATUS. To confuse matters further, DP has a BSTATUS register
+ *		with a completely different definition.
+ *	- KSV FIFO
+ *		On HDMI, the ksv fifo is read all at once, whereas on DP it must
+ *		be read 3 keys at a time
+ *	- Aksv output
+ *		Since Aksv is hidden in hardware, there's different procedures
+ *		to send it over DP AUX vs DDC
+ */
+struct intel_hdcp_shim {
+	/* Outputs the transmitter's An and Aksv values to the receiver. */
+	int (*write_an_aksv)(struct intel_digital_port *intel_dig_port, u8 *an);
+
+	/* Reads the receiver's key selection vector */
+	int (*read_bksv)(struct intel_digital_port *intel_dig_port, u8 *bksv);
+
+	/*
+	 * Reads BINFO from DP receivers and BSTATUS from HDMI receivers. The
+	 * definitions are the same in the respective specs, but the names are
+	 * different. Call it BSTATUS since that's the name the HDMI spec
+	 * uses and it was there first.
+	 */
+	int (*read_bstatus)(struct intel_digital_port *intel_dig_port,
+			    u8 *bstatus);
+
+	/* Determines whether a repeater is present downstream */
+	int (*repeater_present)(struct intel_digital_port *intel_dig_port,
+				bool *repeater_present);
+
+	/* Reads the receiver's Ri' value */
+	int (*read_ri_prime)(struct intel_digital_port *intel_dig_port, u8 *ri);
+
+	/* Determines if the receiver's KSV FIFO is ready for consumption */
+	int (*read_ksv_ready)(struct intel_digital_port *intel_dig_port,
+			      bool *ksv_ready);
+
+	/* Reads the ksv fifo for num_downstream devices */
+	int (*read_ksv_fifo)(struct intel_digital_port *intel_dig_port,
+			     int num_downstream, u8 *ksv_fifo);
+
+	/* Reads a 32-bit part of V' from the receiver */
+	int (*read_v_prime_part)(struct intel_digital_port *intel_dig_port,
+				 int i, u32 *part);
+
+	/* Enables HDCP signalling on the port */
+	int (*toggle_signalling)(struct intel_digital_port *intel_dig_port,
+				 bool enable);
+
+	/* Ensures the link is still protected */
+	bool (*check_link)(struct intel_digital_port *intel_dig_port);
+};
+
 struct intel_connector {
 	struct drm_connector base;
 	/*
@@ -332,6 +402,12 @@  struct intel_connector {
 
 	/* Work struct to schedule a uevent on link train failure */
 	struct work_struct modeset_retry_work;
+
+	const struct intel_hdcp_shim *hdcp_shim;
+	struct mutex hdcp_mutex;
+	uint64_t hdcp_value; /* protected by hdcp_mutex */
+	struct delayed_work hdcp_check_work;
+	struct work_struct hdcp_prop_work;
 };
 
 struct intel_digital_connector_state {
@@ -1763,6 +1839,18 @@  static inline void intel_backlight_device_unregister(struct intel_connector *con
 }
 #endif /* CONFIG_BACKLIGHT_CLASS_DEVICE */
 
+/* intel_hdcp.c */
+void intel_hdcp_atomic_check(struct drm_connector *connector,
+			     struct drm_connector_state *old_state,
+			     struct drm_connector_state *new_state);
+void intel_hdcp_atomic_pre_commit(struct drm_connector *connector,
+			          struct drm_connector_state *old_state,
+			          struct drm_connector_state *new_state);
+void intel_hdcp_atomic_commit(struct drm_connector *connector,
+			      struct drm_connector_state *new_state);
+int intel_hdcp_check_link(struct intel_connector *connector);
+void intel_hdcp_check_work(struct work_struct *work);
+void intel_hdcp_prop_work(struct work_struct *work);
 
 /* intel_psr.c */
 void intel_psr_enable(struct intel_dp *intel_dp,
diff --git a/drivers/gpu/drm/i915/intel_hdcp.c b/drivers/gpu/drm/i915/intel_hdcp.c
new file mode 100644
index 000000000000..4eef9505410f
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_hdcp.c
@@ -0,0 +1,731 @@ 
+/*
+ * Copyright (C) 2017 Google, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Sean Paul <seanpaul@chromium.org>
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_hdcp.h>
+#include <linux/i2c.h>
+#include <linux/random.h>
+
+#include "intel_drv.h"
+#include "i915_reg.h"
+
+#define KEY_LOAD_TRIES	5
+
+static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *intel_dig_port,
+				    const struct intel_hdcp_shim *shim)
+{
+	int ret, read_ret;
+	bool ksv_ready;
+
+	ret = __wait_for(read_ret = shim->read_ksv_ready(intel_dig_port,
+							 &ksv_ready),
+			 read_ret || ksv_ready, 1500 * 1000, 1000, 100 * 1000);
+	if (ret)
+		return ret;
+	if (read_ret)
+		return read_ret;
+	if (!ksv_ready)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
+{
+	I915_WRITE(SKL_HDCP_KEY_CONF, SKL_HDCP_CLEAR_KEYS_TRIGGER);
+	I915_WRITE(SKL_HDCP_KEY_STATUS,
+		   SKL_HDCP_KEY_LOAD_DONE | SKL_HDCP_KEY_LOAD_STATUS |
+		   SKL_HDCP_FUSE_IN_PROGRESS | SKL_HDCP_FUSE_ERROR |
+		   SKL_HDCP_FUSE_DONE);
+}
+
+static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
+{
+	int ret;
+	u32 val;
+
+	/* Initiate loading the HDCP key from fuses */
+	mutex_lock(&dev_priv->pcu_lock);
+	ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1);
+	mutex_unlock(&dev_priv->pcu_lock);
+	if (ret) {
+		DRM_ERROR("Failed to initiate HDCP key load (%d)\n", ret);
+		return ret;
+	}
+
+	/* Wait for the keys to load (500us) */
+	ret = __intel_wait_for_register(dev_priv, SKL_HDCP_KEY_STATUS,
+					SKL_HDCP_KEY_LOAD_DONE,
+					SKL_HDCP_KEY_LOAD_DONE,
+					10, 1, &val);
+	if (ret)
+		return ret;
+	else if (!(val & SKL_HDCP_KEY_LOAD_STATUS))
+		return -ENXIO;
+
+	/* Send Aksv over to PCH display for use in authentication */
+	I915_WRITE(SKL_HDCP_KEY_CONF, SKL_HDCP_AKSV_SEND_TRIGGER);
+
+	return 0;
+}
+
+/* Returns updated SHA-1 index */
+static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
+{
+	I915_WRITE(SKL_HDCP_SHA_TEXT, sha_text);
+	if (intel_wait_for_register(dev_priv, SKL_HDCP_REP_CTL,
+				    SKL_HDCP_SHA1_READY,
+				    SKL_HDCP_SHA1_READY, 1)) {
+		DRM_ERROR("Timed out waiting for SHA1 ready\n");
+		return -ETIMEDOUT;
+	}
+	return 0;
+}
+
+static
+u32 intel_hdcp_get_repeater_ctl(struct intel_digital_port *intel_dig_port)
+{
+	enum port port = intel_dig_port->base.port;
+	switch(port) {
+	case PORT_A:
+		return SKL_HDCP_DDIA_REP_PRESENT | SKL_HDCP_DDIA_SHA1_M0;
+	case PORT_B:
+		return SKL_HDCP_DDIB_REP_PRESENT | SKL_HDCP_DDIB_SHA1_M0;
+	case PORT_C:
+		return SKL_HDCP_DDIC_REP_PRESENT | SKL_HDCP_DDIC_SHA1_M0;
+	case PORT_D:
+		return SKL_HDCP_DDID_REP_PRESENT | SKL_HDCP_DDID_SHA1_M0;
+	case PORT_E:
+		return SKL_HDCP_DDIE_REP_PRESENT | SKL_HDCP_DDIE_SHA1_M0;
+	default:
+		break;
+	}
+	DRM_ERROR("Unknown port %d\n", port);
+	return -EINVAL;
+}
+
+/* Implements Part 2 of the HDCP authorization procedure */
+static
+int intel_hdcp_auth_downstream(struct intel_digital_port *intel_dig_port,
+			       const struct intel_hdcp_shim *shim)
+{
+	struct drm_i915_private *dev_priv;
+	u32 vprime, sha_text, sha_leftovers, rep_ctl;
+	u8 bstatus[2], num_downstream, *ksv_fifo;
+	int ret, i, j, sha_idx;
+
+	dev_priv = intel_dig_port->base.base.dev->dev_private;
+
+	ret = shim->read_bstatus(intel_dig_port, bstatus);
+	if (ret)
+		return ret;
+
+	/* If there are no downstream devices, we're all done. */
+	num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
+	if (num_downstream == 0) {
+		DRM_INFO("HDCP is enabled (no downstream devices)\n");
+		return 0;
+	}
+
+	/* Poll for ksv list ready (spec says max time allowed is 5s) */
+	ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
+	if (ret) {
+		DRM_ERROR("KSV list failed to become ready (%d)\n", ret);
+		return ret;
+	}
+
+	ksv_fifo = kzalloc(num_downstream * DRM_HDCP_KSV_LEN, GFP_KERNEL);
+	if (!ksv_fifo)
+		return -ENOMEM;
+
+	ret = shim->read_ksv_fifo(intel_dig_port, num_downstream, ksv_fifo);
+	if (ret)
+		return ret;
+
+	/* Process V' values from the receiver */
+	for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
+		ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
+		if (ret)
+			return ret;
+		I915_WRITE(SKL_HDCP_SHA_V_PRIME(i), vprime);
+	}
+
+	/*
+	 * We need to write the concatenation of all device KSVs, BINFO (DP) ||
+	 * BSTATUS (HDMI), and M0 (which is added via HDCP_REP_CTL). This byte
+	 * stream is written via the HDCP_SHA_TEXT register in 32-bit
+	 * increments. Every 64 bytes, we need to write HDCP_REP_CTL again. This
+	 * index will keep track of our progress through the 64 bytes as well as
+	 * helping us work the 40-bit KSVs through our 32-bit register.
+	 *
+	 * NOTE: data passed via HDCP_SHA_TEXT should be big-endian
+	 */
+	sha_idx = 0;
+	sha_text = 0;
+	sha_leftovers = 0;
+	rep_ctl = intel_hdcp_get_repeater_ctl(intel_dig_port);
+	I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_32);
+	for (i = 0; i < num_downstream; i++) {
+		unsigned sha_empty;
+		u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
+
+		/* Fill up the empty slots in sha_text and write it out */
+		sha_empty = sizeof(sha_text) - sha_leftovers;
+		for (j = 0; j < sha_empty; j++)
+			sha_text |= ksv[j] << ((sizeof(sha_text) - j - 1) * 8);
+
+		ret = intel_write_sha_text(dev_priv, sha_text);
+		if (ret < 0)
+			return ret;
+
+		/* Programming guide writes this every 64 bytes */
+		sha_idx += sizeof(sha_text);
+		if (!(sha_idx % 64))
+			I915_WRITE(SKL_HDCP_REP_CTL,
+				   rep_ctl | SKL_HDCP_SHA1_TEXT_32);
+
+		/* Store the leftover bytes from the ksv in sha_text */
+		sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
+		sha_text = 0;
+		for (j = 0; j < sha_leftovers; j++)
+			sha_text |= ksv[sha_empty + j] <<
+					((sizeof(sha_text) - j - 1) * 8);
+
+		/*
+		 * If we still have room in sha_text for more data, continue.
+		 * Otherwise, write it out immediately.
+		 */
+		if (sizeof(sha_text) > sha_leftovers)
+			continue;
+
+		ret = intel_write_sha_text(dev_priv, sha_text);
+		if (ret < 0)
+			return ret;
+		sha_leftovers = 0;
+		sha_text = 0;
+		sha_idx += sizeof(sha_text);
+	}
+
+	/*
+	 * We need to write BINFO/BSTATUS, and M0 now. Depending on how many
+	 * bytes are leftover from the last ksv, we might be able to fit them
+	 * all in sha_text (first 2 cases), or we might need to split them up
+	 * into 2 writes (last 2 cases).
+	 */
+	if (sha_leftovers == 0) {
+		/* Write 16 bits of text, 16 bits of M0 */
+		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_16);
+		ret = intel_write_sha_text(dev_priv,
+					   bstatus[0] << 8 | bstatus[1]);
+		if (ret < 0)
+			return ret;
+		sha_idx += sizeof(sha_text);
+
+		/* Write 32 bits of M0 */
+		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_0);
+		ret = intel_write_sha_text(dev_priv, 0);
+		if (ret < 0)
+			return ret;
+		sha_idx += sizeof(sha_text);
+
+		/* Write 16 bits of M0 */
+		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_16);
+		ret = intel_write_sha_text(dev_priv, 0);
+		if (ret < 0)
+			return ret;
+		sha_idx += sizeof(sha_text);
+
+	} else if (sha_leftovers == 1) {
+		/* Write 24 bits of text, 8 bits of M0 */
+		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_24);
+		sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
+		/* Only 24-bits of data, must be in the LSB */
+		sha_text = (sha_text & 0xffffff00) >> 8;
+		ret = intel_write_sha_text(dev_priv, sha_text);
+		if (ret < 0)
+			return ret;
+		sha_idx += sizeof(sha_text);
+
+		/* Write 32 bits of M0 */
+		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_0);
+		ret = intel_write_sha_text(dev_priv, 0);
+		if (ret < 0)
+			return ret;
+		sha_idx += sizeof(sha_text);
+
+		/* Write 24 bits of M0 */
+		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_8);
+		ret = intel_write_sha_text(dev_priv, 0);
+		if (ret < 0)
+			return ret;
+		sha_idx += sizeof(sha_text);
+
+	} else if (sha_leftovers == 2) {
+		/* Write 32 bits of text */
+		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_32);
+		sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
+		ret = intel_write_sha_text(dev_priv, sha_text);
+		if (ret < 0)
+			return ret;
+		sha_idx += sizeof(sha_text);
+
+		/* Write 64 bits of M0 */
+		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_0);
+		for (i = 0; i < 2; i++) {
+			ret = intel_write_sha_text(dev_priv, 0);
+			if (ret < 0)
+				return ret;
+			sha_idx += sizeof(sha_text);
+		}
+	} else if (sha_leftovers == 3) {
+		/* Write 32 bits of text */
+		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_32);
+		sha_text |= bstatus[0] << 24;
+		ret = intel_write_sha_text(dev_priv, sha_text);
+		if (ret < 0)
+			return ret;
+		sha_idx += sizeof(sha_text);
+
+		/* Write 8 bits of text, 24 bits of M0 */
+		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_8);
+		ret = intel_write_sha_text(dev_priv, bstatus[1]);
+		if (ret < 0)
+			return ret;
+		sha_idx += sizeof(sha_text);
+
+		/* Write 32 bits of M0 */
+		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_0);
+		ret = intel_write_sha_text(dev_priv, 0);
+		if (ret < 0)
+			return ret;
+		sha_idx += sizeof(sha_text);
+
+		/* Write 8 bits of M0 */
+		I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_24);
+		ret = intel_write_sha_text(dev_priv, 0);
+		if (ret < 0)
+			return ret;
+		sha_idx += sizeof(sha_text);
+	} else {
+		DRM_ERROR("Invalid number of leftovers %d\n", sha_leftovers);
+		return -EINVAL;
+	}
+
+	I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_TEXT_32);
+	/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
+	while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
+		ret = intel_write_sha_text(dev_priv, 0);
+		if (ret < 0)
+			return ret;
+		sha_idx += sizeof(sha_text);
+	}
+
+	/*
+	 * Last write gets the length of the concatenation in bits. That is:
+	 *  - 5 bytes per device
+	 *  - 10 bytes for BINFO/BSTATUS(2), M0(8)
+	 */
+	sha_text = (num_downstream * 5 + 10) * 8;
+	ret = intel_write_sha_text(dev_priv, sha_text);
+	if (ret < 0)
+		return ret;
+
+	/* Tell the HW we're done with the hash and wait for it to ACK */
+	I915_WRITE(SKL_HDCP_REP_CTL, rep_ctl | SKL_HDCP_SHA1_COMPLETE_HASH);
+	if (intel_wait_for_register(dev_priv, SKL_HDCP_REP_CTL,
+				    SKL_HDCP_SHA1_COMPLETE,
+				    SKL_HDCP_SHA1_COMPLETE, 1)) {
+		DRM_ERROR("Timed out waiting for SHA1 complete\n");
+		return -ETIMEDOUT;
+	}
+	if (!(I915_READ(SKL_HDCP_REP_CTL) & SKL_HDCP_SHA1_V_MATCH)) {
+		DRM_ERROR("SHA-1 mismatch, HDCP failed\n");
+		return -ENXIO;
+	}
+
+	DRM_INFO("HDCP is enabled (%d downstream devices)\n", num_downstream);
+	return 0;
+}
+
+/* Implements Part 1 of the HDCP authorization procedure */
+static int intel_hdcp_auth(struct intel_digital_port *intel_dig_port,
+			   const struct intel_hdcp_shim *shim)
+{
+	struct drm_i915_private *dev_priv;
+	enum port port;
+	unsigned long r0_prime_gen_start;
+	int ret, i;
+	union {
+		u32 reg[2];
+		u8 shim[DRM_HDCP_AN_LEN];
+	} an;
+	union {
+		u32 reg[2];
+		u8 shim[DRM_HDCP_KSV_LEN];
+	} bksv;
+	union {
+		u32 reg;
+		u8 shim[DRM_HDCP_RI_LEN];
+	} ri;
+	bool repeater_present;
+
+	dev_priv = intel_dig_port->base.base.dev->dev_private;
+
+	port = intel_dig_port->base.port;
+
+	/* Initialize An with 2 random values and acquire it */
+	for (i = 0; i < 2; i++)
+		I915_WRITE(SKL_PORT_HDCP_ANINIT(port), get_random_long());
+	I915_WRITE(SKL_PORT_HDCP_CONF(port), SKL_HDCP_CONF_CAPTURE_AN);
+
+	/* Wait for An to be acquired */
+	if (intel_wait_for_register(dev_priv, SKL_PORT_HDCP_STATUS(port),
+				    SKL_HDCP_STATUS_AN_READY,
+				    SKL_HDCP_STATUS_AN_READY, 1)) {
+		DRM_ERROR("Timed out waiting for An\n");
+		return -ETIMEDOUT;
+	}
+
+	an.reg[0] = I915_READ(SKL_PORT_HDCP_ANLO(port));
+	an.reg[1] = I915_READ(SKL_PORT_HDCP_ANHI(port));
+	ret = shim->write_an_aksv(intel_dig_port, an.shim);
+	if (ret)
+		return ret;
+
+	r0_prime_gen_start = jiffies;
+
+	memset(&bksv, 0, sizeof(bksv));
+	ret = shim->read_bksv(intel_dig_port, bksv.shim);
+	if (ret)
+		return ret;
+
+	I915_WRITE(SKL_PORT_HDCP_BKSVLO(port), bksv.reg[0]);
+	I915_WRITE(SKL_PORT_HDCP_BKSVHI(port), bksv.reg[1]);
+
+	ret = shim->repeater_present(intel_dig_port, &repeater_present);
+	if (ret)
+		return ret;
+	if (repeater_present)
+		I915_WRITE(SKL_HDCP_REP_CTL,
+			   intel_hdcp_get_repeater_ctl(intel_dig_port));
+
+	ret = shim->toggle_signalling(intel_dig_port, true);
+	if (ret)
+		return ret;
+
+	I915_WRITE(SKL_PORT_HDCP_CONF(port), SKL_HDCP_CONF_AUTH_AND_ENC);
+
+	/* Wait for R0 ready */
+	if (wait_for(I915_READ(SKL_PORT_HDCP_STATUS(port)) &
+		     (SKL_HDCP_STATUS_R0_READY | SKL_HDCP_STATUS_ENC), 1)) {
+		DRM_ERROR("Timed out waiting for R0 ready\n");
+		return -ETIMEDOUT;
+	}
+
+	/*
+	 * Wait for R0' to become available, the spec says 100ms from Aksv
+	 * write. On DP, there's an R0_READY bit available but no such bit
+	 * exists on HDMI. Since the upper-bound is the same, we'll just do
+	 * the stupid thing instead of polling on one and not the other.
+	 */
+	wait_remaining_ms_from_jiffies(r0_prime_gen_start, 100);
+
+	ri.reg = 0;
+	ret = shim->read_ri_prime(intel_dig_port, ri.shim);
+	if (ret)
+		return ret;
+	I915_WRITE(SKL_PORT_HDCP_RPRIME(port), ri.reg);
+
+	/* Wait for Ri prime match */
+	if (wait_for(I915_READ(SKL_PORT_HDCP_STATUS(port)) &
+		     (SKL_HDCP_STATUS_RI_MATCH | SKL_HDCP_STATUS_ENC), 1)) {
+		DRM_ERROR("Timed out waiting for Ri prime match (%x)\n",
+			  I915_READ(SKL_PORT_HDCP_STATUS(port)));
+		return -ETIMEDOUT;
+	}
+
+	/* Wait for encryption confirmation */
+	if (intel_wait_for_register(dev_priv, SKL_PORT_HDCP_STATUS(port),
+				    SKL_HDCP_STATUS_ENC,
+				    SKL_HDCP_STATUS_ENC, 20)) {
+		DRM_ERROR("Timed out waiting for encryption\n");
+		return -ETIMEDOUT;
+	}
+
+	/*
+	 * XXX: If we have MST-connected devices, we need to enable encryption
+	 * on those as well.
+	 */
+
+	return intel_hdcp_auth_downstream(intel_dig_port, shim);
+}
+
+static
+struct intel_digital_port *conn_to_dig_port(struct intel_connector *connector)
+{
+	return enc_to_dig_port(&intel_attached_encoder(&connector->base)->base);
+}
+
+static int _intel_hdcp_disable(struct intel_connector *connector)
+{
+	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	enum port port = intel_dig_port->base.port;
+	int ret;
+
+	I915_WRITE(SKL_PORT_HDCP_CONF(port), 0);
+	if (intel_wait_for_register(dev_priv, SKL_PORT_HDCP_STATUS(port), ~0, 0,
+				    20)) {
+		DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
+		return -ETIMEDOUT;
+	}
+
+	intel_hdcp_clear_keys(dev_priv);
+
+	ret = connector->hdcp_shim->toggle_signalling(intel_dig_port, false);
+	if (ret) {
+		DRM_ERROR("Failed to disable HDCP signalling\n");
+		return ret;
+	}
+
+	DRM_INFO("HDCP is disabled\n");
+	return 0;
+}
+
+static int _intel_hdcp_enable(struct intel_connector *connector)
+{
+	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+	int i, ret;
+
+	if (!(I915_READ(SKL_FUSE_STATUS) & SKL_FUSE_PG_DIST_STATUS(1))) {
+		DRM_ERROR("PG1 is disabled, cannot load keys\n");
+		return -ENXIO;
+	}
+
+	for (i = 0; i < KEY_LOAD_TRIES; i++) {
+		ret = intel_hdcp_load_keys(dev_priv);
+		if (!ret)
+			break;
+		intel_hdcp_clear_keys(dev_priv);
+	}
+	if (ret) {
+		DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
+		return ret;
+	}
+
+	ret = intel_hdcp_auth(conn_to_dig_port(connector),
+			      connector->hdcp_shim);
+	if (ret) {
+		DRM_ERROR("Failed to authenticate HDCP (%d)\n", ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+void intel_hdcp_check_work(struct work_struct *work)
+{
+	struct intel_connector *connector = container_of(to_delayed_work(work),
+							 struct intel_connector,
+						         hdcp_check_work);
+	if (!intel_hdcp_check_link(connector))
+		schedule_delayed_work(&connector->hdcp_check_work,
+				      DRM_HDCP_CHECK_PERIOD_MS);
+}
+
+void intel_hdcp_prop_work(struct work_struct *work)
+{
+	struct intel_connector *connector = container_of(work,
+							 struct intel_connector,
+						         hdcp_prop_work);
+	struct drm_device *dev = connector->base.dev;
+	struct drm_connector_state *state;
+
+	mutex_lock(&dev->mode_config.mutex);
+	drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+	mutex_lock(&connector->hdcp_mutex);
+
+	/*
+	 * This worker is only used to flip between ENABLED/DESIRED. Either of
+	 * those to OFF is handled by core. If hdcp_value == OFF, we're running
+	 * just after hdcp has been disabled, so just exit
+	 */
+	if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_OFF) {
+		state = connector->base.state;
+		state->content_protection = connector->hdcp_value;
+	}
+
+	mutex_unlock(&connector->hdcp_mutex);
+	drm_modeset_unlock(&dev->mode_config.connection_mutex);
+	mutex_unlock(&dev->mode_config.mutex);
+}
+
+static int intel_hdcp_enable(struct intel_connector *connector)
+{
+	int ret;
+
+	if (!connector->hdcp_shim)
+		return -ENOENT;
+
+	mutex_lock(&connector->hdcp_mutex);
+
+	ret = _intel_hdcp_enable(connector);
+	if (ret)
+		goto out;
+
+	connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
+	schedule_work(&connector->hdcp_prop_work);
+	schedule_delayed_work(&connector->hdcp_check_work,
+			      DRM_HDCP_CHECK_PERIOD_MS);
+out:
+	mutex_unlock(&connector->hdcp_mutex);
+	return ret;
+}
+
+static int intel_hdcp_disable(struct intel_connector *connector)
+{
+	int ret;
+
+	if (!connector->hdcp_shim)
+		return -ENOENT;
+
+	mutex_lock(&connector->hdcp_mutex);
+
+	connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_OFF;
+	ret = _intel_hdcp_disable(connector);
+
+	mutex_unlock(&connector->hdcp_mutex);
+	cancel_delayed_work_sync(&connector->hdcp_check_work);
+	return ret;
+}
+
+void intel_hdcp_atomic_check(struct drm_connector *connector,
+			     struct drm_connector_state *old_state,
+			     struct drm_connector_state *new_state)
+{
+	uint64_t old_cp = old_state->content_protection;
+	uint64_t new_cp = new_state->content_protection;
+
+	if (!new_state->crtc) {
+		/*
+		 * If the connector is being disabled with CP enabled, mark it
+		 * desired so it's re-enabled when the connector is brought back
+		 */
+		if (old_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+			new_state->content_protection =
+				DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		return;
+	}
+
+	/* Only drivers can set content protection enabled */
+	if (old_cp != DRM_MODE_CONTENT_PROTECTION_ENABLED &&
+	    new_cp == DRM_MODE_CONTENT_PROTECTION_ENABLED)
+		new_state->content_protection =
+			DRM_MODE_CONTENT_PROTECTION_DESIRED;
+}
+
+void intel_hdcp_atomic_pre_commit(struct drm_connector *connector,
+			          struct drm_connector_state *old_state,
+			          struct drm_connector_state *new_state)
+{
+	uint64_t old_cp = old_state->content_protection;
+	uint64_t new_cp = new_state->content_protection;
+
+	/*
+	 * Disable HDCP if the connector is becoming disabled, or if requested
+	 * via the property.
+	 */
+	if ((!new_state->crtc && old_cp != DRM_MODE_CONTENT_PROTECTION_OFF) ||
+	    (new_state->crtc && old_cp != DRM_MODE_CONTENT_PROTECTION_OFF &&
+	     new_cp == DRM_MODE_CONTENT_PROTECTION_OFF))
+		intel_hdcp_disable(to_intel_connector(connector));
+}
+
+void intel_hdcp_atomic_commit(struct drm_connector *connector,
+			      struct drm_connector_state *new_state)
+{
+	uint64_t new_cp = new_state->content_protection;
+
+	/* Enable hdcp if it's desired */
+	if (new_state->crtc && new_cp == DRM_MODE_CONTENT_PROTECTION_DESIRED)
+		intel_hdcp_enable(to_intel_connector(connector));
+}
+
+/* Implements Part 3 of the HDCP authorization procedure */
+int intel_hdcp_check_link(struct intel_connector *connector)
+{
+	struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
+	struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+	enum port port = intel_dig_port->base.port;
+	int ret = 0;
+
+	if (!connector->hdcp_shim)
+		return -ENOENT;
+
+	mutex_lock(&connector->hdcp_mutex);
+
+	if (connector->hdcp_value == DRM_MODE_CONTENT_PROTECTION_OFF)
+		goto out;
+
+	if (!(I915_READ(SKL_PORT_HDCP_STATUS(port)) & SKL_HDCP_STATUS_ENC)) {
+		DRM_ERROR("HDCP check failed: link is not encrypted, %x\n",
+			   I915_READ(SKL_PORT_HDCP_STATUS(port)));
+		ret = -ENXIO;
+		connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		schedule_work(&connector->hdcp_prop_work);
+		goto out;
+	}
+
+	if (connector->hdcp_shim->check_link(intel_dig_port)) {
+		if (connector->hdcp_value != DRM_MODE_CONTENT_PROTECTION_OFF) {
+			connector->hdcp_value =
+				DRM_MODE_CONTENT_PROTECTION_ENABLED;
+			schedule_work(&connector->hdcp_prop_work);
+		}
+		goto out;
+	}
+
+	DRM_INFO("HDCP link failed, retrying authentication\n");
+
+	ret = _intel_hdcp_disable(connector);
+	if (ret) {
+		DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
+		connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		schedule_work(&connector->hdcp_prop_work);
+		goto out;
+	}
+
+	ret = _intel_hdcp_enable(connector);
+	if (ret) {
+		DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
+		connector->hdcp_value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+		schedule_work(&connector->hdcp_prop_work);
+		goto out;
+	}
+
+out:
+	mutex_unlock(&connector->hdcp_mutex);
+	return ret;
+}