diff mbox

[v2] drm/dp/mst: Sideband message transaction to power up/down nodes

Message ID 20170907001458.9399-1-dhinakaran.pandiyan@intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Dhinakaran Pandiyan Sept. 7, 2017, 12:14 a.m. UTC
The POWER_DOWN_PHY and POWER_UP_PHY sideband message transactions allow
the source to reqest any node in a mst path or a whole path to be
powered down or up. This allows drivers to target a specific sink in the
MST topology, an improvement over just power managing the imediate
downstream device. Secondly, since the request-reply protocol waits for an
ACK, we can be sure that a downstream sink has enough time to respond to a
power up/down request.

v2: Fix memory leak (Lyude)
Cc: Lyude <lyude@redhat.com>
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Cc: Harry Wentland <harry.wentland@amd.com>
Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
---
 drivers/gpu/drm/drm_dp_mst_topology.c | 75 +++++++++++++++++++++++++++++++++++
 include/drm/drm_dp_mst_helper.h       |  2 +
 2 files changed, 77 insertions(+)

Comments

Lyude Paul Sept. 7, 2017, 6:04 p.m. UTC | #1
Looks good to me.

Reviewed-by: Lyude Paul <lyude@redhat.com>

On Wed, 2017-09-06 at 17:14 -0700, Dhinakaran Pandiyan wrote:
> The POWER_DOWN_PHY and POWER_UP_PHY sideband message transactions
> allow
> the source to reqest any node in a mst path or a whole path to be
> powered down or up. This allows drivers to target a specific sink in
> the
> MST topology, an improvement over just power managing the imediate
> downstream device. Secondly, since the request-reply protocol waits
> for an
> ACK, we can be sure that a downstream sink has enough time to respond
> to a
> power up/down request.
> 
> v2: Fix memory leak (Lyude)
> Cc: Lyude <lyude@redhat.com>
> Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
> Cc: Harry Wentland <harry.wentland@amd.com>
> Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>
> ---
>  drivers/gpu/drm/drm_dp_mst_topology.c | 75
> +++++++++++++++++++++++++++++++++++
>  include/drm/drm_dp_mst_helper.h       |  2 +
>  2 files changed, 77 insertions(+)
> 
> diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c
> b/drivers/gpu/drm/drm_dp_mst_topology.c
> index 41b492f99955..9bc5049e7e59 100644
> --- a/drivers/gpu/drm/drm_dp_mst_topology.c
> +++ b/drivers/gpu/drm/drm_dp_mst_topology.c
> @@ -294,6 +294,12 @@ static void drm_dp_encode_sideband_req(struct
> drm_dp_sideband_msg_req_body *req,
>  		memcpy(&buf[idx], req->u.i2c_write.bytes, req-
> >u.i2c_write.num_bytes);
>  		idx += req->u.i2c_write.num_bytes;
>  		break;
> +
> +	case DP_POWER_DOWN_PHY:
> +	case DP_POWER_UP_PHY:
> +		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
> +		idx++;
> +		break;
>  	}
>  	raw->cur_len = idx;
>  }
> @@ -538,6 +544,22 @@ static bool
> drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_r
>  	return false;
>  }
>  
> +
> +static bool drm_dp_sideband_parse_power_updown_phy_ack(struct
> drm_dp_sideband_msg_rx *raw,
> +						       struct
> drm_dp_sideband_msg_reply_body *repmsg)
> +{
> +	int idx = 1;
> +
> +	repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) &
> 0xf;
> +	idx++;
> +	if (idx > raw->curlen) {
> +		DRM_DEBUG_KMS("power up/down phy parse length fail
> %d %d\n",
> +			      idx, raw->curlen);
> +		return false;
> +	}
> +	return true;
> +}
> +
>  static bool drm_dp_sideband_parse_reply(struct
> drm_dp_sideband_msg_rx *raw,
>  					struct
> drm_dp_sideband_msg_reply_body *msg)
>  {
> @@ -567,6 +589,9 @@ static bool drm_dp_sideband_parse_reply(struct
> drm_dp_sideband_msg_rx *raw,
>  		return
> drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
>  	case DP_ALLOCATE_PAYLOAD:
>  		return
> drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
> +	case DP_POWER_DOWN_PHY:
> +	case DP_POWER_UP_PHY:
> +		return
> drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
>  	default:
>  		DRM_ERROR("Got unknown reply 0x%02x\n", msg-
> >req_type);
>  		return false;
> @@ -693,6 +718,22 @@ static int build_allocate_payload(struct
> drm_dp_sideband_msg_tx *msg, int port_n
>  	return 0;
>  }
>  
> +static int build_power_updown_phy(struct drm_dp_sideband_msg_tx
> *msg,
> +				  int port_num, bool power_up)
> +{
> +	struct drm_dp_sideband_msg_req_body req;
> +
> +	if (power_up)
> +		req.req_type = DP_POWER_UP_PHY;
> +	else
> +		req.req_type = DP_POWER_DOWN_PHY;
> +
> +	req.u.port_num.port_number = port_num;
> +	drm_dp_encode_sideband_req(&req, msg);
> +	msg->path_msg = true;
> +	return 0;
> +}
> +
>  static int drm_dp_mst_assign_payload_id(struct
> drm_dp_mst_topology_mgr *mgr,
>  					struct drm_dp_vcpi *vcpi)
>  {
> @@ -1724,6 +1765,40 @@ static int drm_dp_payload_send_msg(struct
> drm_dp_mst_topology_mgr *mgr,
>  	return ret;
>  }
>  
> +int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr
> *mgr,
> +				 struct drm_dp_mst_port *port, bool
> power_up)
> +{
> +	struct drm_dp_sideband_msg_tx *txmsg;
> +	int len, ret;
> +
> +	port = drm_dp_get_validated_port_ref(mgr, port);
> +	if (!port)
> +		return -EINVAL;
> +
> +	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
> +	if (!txmsg) {
> +		drm_dp_put_port(port);
> +		return -ENOMEM;
> +	}
> +
> +	txmsg->dst = port->parent;
> +	len = build_power_updown_phy(txmsg, port->port_num,
> power_up);
> +	drm_dp_queue_down_tx(mgr, txmsg);
> +
> +	ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
> +	if (ret > 0) {
> +		if (txmsg->reply.reply_type == 1)
> +			ret = -EINVAL;
> +		else
> +			ret = 0;
> +	}
> +	kfree(txmsg);
> +	drm_dp_put_port(port);
> +
> +	return ret;
> +}
> +EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
> +
>  static int drm_dp_create_payload_step1(struct
> drm_dp_mst_topology_mgr *mgr,
>  				       int id,
>  				       struct drm_dp_payload
> *payload)
> diff --git a/include/drm/drm_dp_mst_helper.h
> b/include/drm/drm_dp_mst_helper.h
> index d55abb75f29a..7f78d26a0766 100644
> --- a/include/drm/drm_dp_mst_helper.h
> +++ b/include/drm/drm_dp_mst_helper.h
> @@ -631,5 +631,7 @@ int drm_dp_atomic_find_vcpi_slots(struct
> drm_atomic_state *state,
>  int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
>  				     struct drm_dp_mst_topology_mgr
> *mgr,
>  				     int slots);
> +int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr
> *mgr,
> +				 struct drm_dp_mst_port *port, bool
> power_up);
>  
>  #endif
Dhinakaran Pandiyan Sept. 7, 2017, 6:54 p.m. UTC | #2
On Thu, 2017-09-07 at 14:04 -0400, Lyude Paul wrote:
> Looks good to me.

> 

> Reviewed-by: Lyude Paul <lyude@redhat.com>

> 



Thanks for the review.
-DK


> On Wed, 2017-09-06 at 17:14 -0700, Dhinakaran Pandiyan wrote:

> > The POWER_DOWN_PHY and POWER_UP_PHY sideband message transactions

> > allow

> > the source to reqest any node in a mst path or a whole path to be

> > powered down or up. This allows drivers to target a specific sink in

> > the

> > MST topology, an improvement over just power managing the imediate

> > downstream device. Secondly, since the request-reply protocol waits

> > for an

> > ACK, we can be sure that a downstream sink has enough time to respond

> > to a

> > power up/down request.

> > 

> > v2: Fix memory leak (Lyude)

> > Cc: Lyude <lyude@redhat.com>

> > Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>

> > Cc: Harry Wentland <harry.wentland@amd.com>

> > Signed-off-by: Dhinakaran Pandiyan <dhinakaran.pandiyan@intel.com>

> > ---

> >  drivers/gpu/drm/drm_dp_mst_topology.c | 75

> > +++++++++++++++++++++++++++++++++++

> >  include/drm/drm_dp_mst_helper.h       |  2 +

> >  2 files changed, 77 insertions(+)

> > 

> > diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c

> > b/drivers/gpu/drm/drm_dp_mst_topology.c

> > index 41b492f99955..9bc5049e7e59 100644

> > --- a/drivers/gpu/drm/drm_dp_mst_topology.c

> > +++ b/drivers/gpu/drm/drm_dp_mst_topology.c

> > @@ -294,6 +294,12 @@ static void drm_dp_encode_sideband_req(struct

> > drm_dp_sideband_msg_req_body *req,

> >  		memcpy(&buf[idx], req->u.i2c_write.bytes, req-

> > >u.i2c_write.num_bytes);

> >  		idx += req->u.i2c_write.num_bytes;

> >  		break;

> > +

> > +	case DP_POWER_DOWN_PHY:

> > +	case DP_POWER_UP_PHY:

> > +		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;

> > +		idx++;

> > +		break;

> >  	}

> >  	raw->cur_len = idx;

> >  }

> > @@ -538,6 +544,22 @@ static bool

> > drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_r

> >  	return false;

> >  }

> >  

> > +

> > +static bool drm_dp_sideband_parse_power_updown_phy_ack(struct

> > drm_dp_sideband_msg_rx *raw,

> > +						       struct

> > drm_dp_sideband_msg_reply_body *repmsg)

> > +{

> > +	int idx = 1;

> > +

> > +	repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) &

> > 0xf;

> > +	idx++;

> > +	if (idx > raw->curlen) {

> > +		DRM_DEBUG_KMS("power up/down phy parse length fail

> > %d %d\n",

> > +			      idx, raw->curlen);

> > +		return false;

> > +	}

> > +	return true;

> > +}

> > +

> >  static bool drm_dp_sideband_parse_reply(struct

> > drm_dp_sideband_msg_rx *raw,

> >  					struct

> > drm_dp_sideband_msg_reply_body *msg)

> >  {

> > @@ -567,6 +589,9 @@ static bool drm_dp_sideband_parse_reply(struct

> > drm_dp_sideband_msg_rx *raw,

> >  		return

> > drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);

> >  	case DP_ALLOCATE_PAYLOAD:

> >  		return

> > drm_dp_sideband_parse_allocate_payload_ack(raw, msg);

> > +	case DP_POWER_DOWN_PHY:

> > +	case DP_POWER_UP_PHY:

> > +		return

> > drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);

> >  	default:

> >  		DRM_ERROR("Got unknown reply 0x%02x\n", msg-

> > >req_type);

> >  		return false;

> > @@ -693,6 +718,22 @@ static int build_allocate_payload(struct

> > drm_dp_sideband_msg_tx *msg, int port_n

> >  	return 0;

> >  }

> >  

> > +static int build_power_updown_phy(struct drm_dp_sideband_msg_tx

> > *msg,

> > +				  int port_num, bool power_up)

> > +{

> > +	struct drm_dp_sideband_msg_req_body req;

> > +

> > +	if (power_up)

> > +		req.req_type = DP_POWER_UP_PHY;

> > +	else

> > +		req.req_type = DP_POWER_DOWN_PHY;

> > +

> > +	req.u.port_num.port_number = port_num;

> > +	drm_dp_encode_sideband_req(&req, msg);

> > +	msg->path_msg = true;

> > +	return 0;

> > +}

> > +

> >  static int drm_dp_mst_assign_payload_id(struct

> > drm_dp_mst_topology_mgr *mgr,

> >  					struct drm_dp_vcpi *vcpi)

> >  {

> > @@ -1724,6 +1765,40 @@ static int drm_dp_payload_send_msg(struct

> > drm_dp_mst_topology_mgr *mgr,

> >  	return ret;

> >  }

> >  

> > +int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr

> > *mgr,

> > +				 struct drm_dp_mst_port *port, bool

> > power_up)

> > +{

> > +	struct drm_dp_sideband_msg_tx *txmsg;

> > +	int len, ret;

> > +

> > +	port = drm_dp_get_validated_port_ref(mgr, port);

> > +	if (!port)

> > +		return -EINVAL;

> > +

> > +	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);

> > +	if (!txmsg) {

> > +		drm_dp_put_port(port);

> > +		return -ENOMEM;

> > +	}

> > +

> > +	txmsg->dst = port->parent;

> > +	len = build_power_updown_phy(txmsg, port->port_num,

> > power_up);

> > +	drm_dp_queue_down_tx(mgr, txmsg);

> > +

> > +	ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);

> > +	if (ret > 0) {

> > +		if (txmsg->reply.reply_type == 1)

> > +			ret = -EINVAL;

> > +		else

> > +			ret = 0;

> > +	}

> > +	kfree(txmsg);

> > +	drm_dp_put_port(port);

> > +

> > +	return ret;

> > +}

> > +EXPORT_SYMBOL(drm_dp_send_power_updown_phy);

> > +

> >  static int drm_dp_create_payload_step1(struct

> > drm_dp_mst_topology_mgr *mgr,

> >  				       int id,

> >  				       struct drm_dp_payload

> > *payload)

> > diff --git a/include/drm/drm_dp_mst_helper.h

> > b/include/drm/drm_dp_mst_helper.h

> > index d55abb75f29a..7f78d26a0766 100644

> > --- a/include/drm/drm_dp_mst_helper.h

> > +++ b/include/drm/drm_dp_mst_helper.h

> > @@ -631,5 +631,7 @@ int drm_dp_atomic_find_vcpi_slots(struct

> > drm_atomic_state *state,

> >  int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,

> >  				     struct drm_dp_mst_topology_mgr

> > *mgr,

> >  				     int slots);

> > +int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr

> > *mgr,

> > +				 struct drm_dp_mst_port *port, bool

> > power_up);

> >  

> >  #endif

> _______________________________________________

> Intel-gfx mailing list

> Intel-gfx@lists.freedesktop.org

> https://lists.freedesktop.org/mailman/listinfo/intel-gfx
Ville Syrjälä Sept. 11, 2017, 1:33 p.m. UTC | #3
On Wed, Sep 06, 2017 at 05:14:58PM -0700, Dhinakaran Pandiyan wrote:
> The POWER_DOWN_PHY and POWER_UP_PHY sideband message transactions allow
> the source to reqest any node in a mst path or a whole path to be
> powered down or up. This allows drivers to target a specific sink in the
> MST topology, an improvement over just power managing the imediate
> downstream device. Secondly, since the request-reply protocol waits for an
> ACK, we can be sure that a downstream sink has enough time to respond to a
> power up/down request.

I was a bit worried how this handles multiple streams going through
the same MST device, but looks like the spec has accounted for this
by having the device skip the D3 if any active streams are present.

I guess that also means we have to do this after we've turned off the
stream, assuming we want things actually reach D3. Looks like that does
match our current order of things.

Pushed this one to drm-misc-next. Thanks for the patch and review.
Dhinakaran Pandiyan Sept. 11, 2017, 7:10 p.m. UTC | #4
On Mon, 2017-09-11 at 16:33 +0300, Ville Syrjälä wrote:
> On Wed, Sep 06, 2017 at 05:14:58PM -0700, Dhinakaran Pandiyan wrote:

> > The POWER_DOWN_PHY and POWER_UP_PHY sideband message transactions allow

> > the source to reqest any node in a mst path or a whole path to be

> > powered down or up. This allows drivers to target a specific sink in the

> > MST topology, an improvement over just power managing the imediate

> > downstream device. Secondly, since the request-reply protocol waits for an

> > ACK, we can be sure that a downstream sink has enough time to respond to a

> > power up/down request.

> 

> I was a bit worried how this handles multiple streams going through

> the same MST device, but looks like the spec has accounted for this

> by having the device skip the D3 if any active streams are present.

> 

> I guess that also means we have to do this after we've turned off the

> stream, assuming we want things actually reach D3. Looks like that does

> match our current order of things.

> 

> Pushed this one to drm-misc-next. Thanks for the patch and review.

> 

Thanks, do you see any potential problems patch with Patch 2/2?
diff mbox

Patch

diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 41b492f99955..9bc5049e7e59 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -294,6 +294,12 @@  static void drm_dp_encode_sideband_req(struct drm_dp_sideband_msg_req_body *req,
 		memcpy(&buf[idx], req->u.i2c_write.bytes, req->u.i2c_write.num_bytes);
 		idx += req->u.i2c_write.num_bytes;
 		break;
+
+	case DP_POWER_DOWN_PHY:
+	case DP_POWER_UP_PHY:
+		buf[idx] = (req->u.port_num.port_number & 0xf) << 4;
+		idx++;
+		break;
 	}
 	raw->cur_len = idx;
 }
@@ -538,6 +544,22 @@  static bool drm_dp_sideband_parse_query_payload_ack(struct drm_dp_sideband_msg_r
 	return false;
 }
 
+
+static bool drm_dp_sideband_parse_power_updown_phy_ack(struct drm_dp_sideband_msg_rx *raw,
+						       struct drm_dp_sideband_msg_reply_body *repmsg)
+{
+	int idx = 1;
+
+	repmsg->u.port_number.port_number = (raw->msg[idx] >> 4) & 0xf;
+	idx++;
+	if (idx > raw->curlen) {
+		DRM_DEBUG_KMS("power up/down phy parse length fail %d %d\n",
+			      idx, raw->curlen);
+		return false;
+	}
+	return true;
+}
+
 static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
 					struct drm_dp_sideband_msg_reply_body *msg)
 {
@@ -567,6 +589,9 @@  static bool drm_dp_sideband_parse_reply(struct drm_dp_sideband_msg_rx *raw,
 		return drm_dp_sideband_parse_enum_path_resources_ack(raw, msg);
 	case DP_ALLOCATE_PAYLOAD:
 		return drm_dp_sideband_parse_allocate_payload_ack(raw, msg);
+	case DP_POWER_DOWN_PHY:
+	case DP_POWER_UP_PHY:
+		return drm_dp_sideband_parse_power_updown_phy_ack(raw, msg);
 	default:
 		DRM_ERROR("Got unknown reply 0x%02x\n", msg->req_type);
 		return false;
@@ -693,6 +718,22 @@  static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n
 	return 0;
 }
 
+static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
+				  int port_num, bool power_up)
+{
+	struct drm_dp_sideband_msg_req_body req;
+
+	if (power_up)
+		req.req_type = DP_POWER_UP_PHY;
+	else
+		req.req_type = DP_POWER_DOWN_PHY;
+
+	req.u.port_num.port_number = port_num;
+	drm_dp_encode_sideband_req(&req, msg);
+	msg->path_msg = true;
+	return 0;
+}
+
 static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
 					struct drm_dp_vcpi *vcpi)
 {
@@ -1724,6 +1765,40 @@  static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
 	return ret;
 }
 
+int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
+				 struct drm_dp_mst_port *port, bool power_up)
+{
+	struct drm_dp_sideband_msg_tx *txmsg;
+	int len, ret;
+
+	port = drm_dp_get_validated_port_ref(mgr, port);
+	if (!port)
+		return -EINVAL;
+
+	txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+	if (!txmsg) {
+		drm_dp_put_port(port);
+		return -ENOMEM;
+	}
+
+	txmsg->dst = port->parent;
+	len = build_power_updown_phy(txmsg, port->port_num, power_up);
+	drm_dp_queue_down_tx(mgr, txmsg);
+
+	ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
+	if (ret > 0) {
+		if (txmsg->reply.reply_type == 1)
+			ret = -EINVAL;
+		else
+			ret = 0;
+	}
+	kfree(txmsg);
+	drm_dp_put_port(port);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_dp_send_power_updown_phy);
+
 static int drm_dp_create_payload_step1(struct drm_dp_mst_topology_mgr *mgr,
 				       int id,
 				       struct drm_dp_payload *payload)
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index d55abb75f29a..7f78d26a0766 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -631,5 +631,7 @@  int drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
 int drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
 				     struct drm_dp_mst_topology_mgr *mgr,
 				     int slots);
+int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
+				 struct drm_dp_mst_port *port, bool power_up);
 
 #endif