From patchwork Tue Oct 1 11:38:09 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168513 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id DF94714DB for ; Tue, 1 Oct 2019 11:38:37 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id C845421A4C for ; Tue, 1 Oct 2019 11:38:37 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732490AbfJALig (ORCPT ); Tue, 1 Oct 2019 07:38:36 -0400 Received: from mga02.intel.com ([134.134.136.20]:39070 "EHLO mga02.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732423AbfJALif (ORCPT ); Tue, 1 Oct 2019 07:38:35 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga001.fm.intel.com ([10.253.24.23]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="205047686" Received: from black.fi.intel.com ([10.237.72.28]) by fmsmga001.fm.intel.com with ESMTP; 01 Oct 2019 04:38:31 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id E2182EB; Tue, 1 Oct 2019 14:38:30 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 01/22] thunderbolt: Introduce tb_switch_is_icm() Date: Tue, 1 Oct 2019 14:38:09 +0300 Message-Id: <20191001113830.13028-2-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org We currently differentiate between SW CM and ICM by looking directly at the sw->config.enabled field which may be rather hard to understand for the casual reader. For this reason introduce a wrapper function with documentation that should make the intention more clear. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/lc.c | 4 ++-- drivers/thunderbolt/switch.c | 4 ++-- drivers/thunderbolt/tb.h | 14 ++++++++++++++ 3 files changed, 18 insertions(+), 4 deletions(-) diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c index ae1e92611c3e..af38076088f6 100644 --- a/drivers/thunderbolt/lc.c +++ b/drivers/thunderbolt/lc.c @@ -94,7 +94,7 @@ int tb_lc_configure_link(struct tb_switch *sw) struct tb_port *up, *down; int ret; - if (!sw->config.enabled || !tb_route(sw)) + if (!tb_route(sw) || tb_switch_is_icm(sw)) return 0; up = tb_upstream_port(sw); @@ -124,7 +124,7 @@ void tb_lc_unconfigure_link(struct tb_switch *sw) { struct tb_port *up, *down; - if (sw->is_unplugged || !sw->config.enabled || !tb_route(sw)) + if (sw->is_unplugged || !tb_route(sw) || tb_switch_is_icm(sw)) return; up = tb_upstream_port(sw); diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 5ea8db667e83..f9efd670d032 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -986,7 +986,7 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active) u32 data; int res; - if (!sw->config.enabled) + if (tb_switch_is_icm(sw)) return 0; sw->config.plug_events_delay = 0xff; @@ -1710,7 +1710,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) } /* Root switch DMA port requires running firmware */ - if (!tb_route(sw) && sw->config.enabled) + if (!tb_route(sw) && !tb_switch_is_icm(sw)) return 0; sw->dma_port = dma_port_alloc(sw); diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 6407d529871d..1565af2e48cb 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -591,6 +591,20 @@ static inline bool tb_switch_is_fr(const struct tb_switch *sw) } } +/** + * tb_switch_is_icm() - Is the switch handled by ICM firmware + * @sw: Switch to check + * + * In case there is a need to differentiate whether ICM firmware or SW CM + * is handling @sw this function can be called. It is valid to call this + * after tb_switch_alloc() and tb_switch_configure() has been called + * (latter only for SW CM case). + */ +static inline bool tb_switch_is_icm(const struct tb_switch *sw) +{ + return !sw->config.enabled; +} + int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); int tb_port_add_nfc_credits(struct tb_port *port, int credits); int tb_port_set_initial_credits(struct tb_port *port, u32 credits); From patchwork Tue Oct 1 11:38:10 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168553 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 2A3221747 for ; Tue, 1 Oct 2019 11:40:16 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 08F8F21A4A for ; Tue, 1 Oct 2019 11:40:16 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732456AbfJALif (ORCPT ); Tue, 1 Oct 2019 07:38:35 -0400 Received: from mga04.intel.com ([192.55.52.120]:46305 "EHLO mga04.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725947AbfJALif (ORCPT ); Tue, 1 Oct 2019 07:38:35 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga005.fm.intel.com ([10.253.24.32]) by fmsmga104.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:34 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="391165497" Received: from black.fi.intel.com ([10.237.72.28]) by fmsmga005.fm.intel.com with ESMTP; 01 Oct 2019 04:38:31 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id EB73D1DF; Tue, 1 Oct 2019 14:38:30 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 02/22] thunderbolt: Log switch route string on config read/write timeout Date: Tue, 1 Oct 2019 14:38:10 +0300 Message-Id: <20191001113830.13028-3-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org This helps to point out which switch config read/write triggered the timeout. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/ctl.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c index 2ec1af8f7968..d97813e80e5f 100644 --- a/drivers/thunderbolt/ctl.c +++ b/drivers/thunderbolt/ctl.c @@ -962,8 +962,8 @@ int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, return tb_cfg_get_error(ctl, space, &res); case -ETIMEDOUT: - tb_ctl_warn(ctl, "timeout reading config space %u from %#x\n", - space, offset); + tb_ctl_warn(ctl, "%llx: timeout reading config space %u from %#x\n", + route, space, offset); break; default: @@ -988,8 +988,8 @@ int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, return tb_cfg_get_error(ctl, space, &res); case -ETIMEDOUT: - tb_ctl_warn(ctl, "timeout writing config space %u to %#x\n", - space, offset); + tb_ctl_warn(ctl, "%llx: timeout writing config space %u to %#x\n", + route, space, offset); break; default: From patchwork Tue Oct 1 11:38:11 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168551 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id D5BC5112B for ; Tue, 1 Oct 2019 11:40:15 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id BF45D21A4A for ; Tue, 1 Oct 2019 11:40:15 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732471AbfJALig (ORCPT ); Tue, 1 Oct 2019 07:38:36 -0400 Received: from mga03.intel.com ([134.134.136.65]:58885 "EHLO mga03.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1725839AbfJALif (ORCPT ); Tue, 1 Oct 2019 07:38:35 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga004.jf.intel.com ([10.7.209.38]) by orsmga103.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="342949799" Received: from black.fi.intel.com ([10.237.72.28]) by orsmga004.jf.intel.com with ESMTP; 01 Oct 2019 04:38:31 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id F3D811F6; Tue, 1 Oct 2019 14:38:30 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 03/22] thunderbolt: Log warning if adding switch fails Date: Tue, 1 Oct 2019 14:38:11 +0300 Message-Id: <20191001113830.13028-4-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org If we fail to add a switch for some reason log a warning with the error code. This is useful for debugging. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/tb.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 1f7a9e1cc09c..541295be9bfc 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -143,6 +143,7 @@ static void tb_scan_port(struct tb_port *port) struct tb_cm *tcm = tb_priv(port->sw->tb); struct tb_port *upstream_port; struct tb_switch *sw; + int ret; if (tb_is_upstream_port(port)) return; @@ -203,7 +204,9 @@ static void tb_scan_port(struct tb_port *port) if (!tcm->hotplug_active) dev_set_uevent_suppress(&sw->dev, true); - if (tb_switch_add(sw)) { + ret = tb_switch_add(sw); + if (ret) { + dev_warn(&sw->dev, "failed to register switch: %d\n", ret); tb_switch_put(sw); return; } From patchwork Tue Oct 1 11:38:12 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168555 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 29F7D112B for ; Tue, 1 Oct 2019 11:40:20 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 126E521D79 for ; Tue, 1 Oct 2019 11:40:20 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732657AbfJALkP (ORCPT ); Tue, 1 Oct 2019 07:40:15 -0400 Received: from mga06.intel.com ([134.134.136.31]:8631 "EHLO mga06.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732424AbfJALif (ORCPT ); Tue, 1 Oct 2019 07:38:35 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:35 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="274967135" Received: from black.fi.intel.com ([10.237.72.28]) by orsmga001.jf.intel.com with ESMTP; 01 Oct 2019 04:38:31 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id 118E2234; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 04/22] thunderbolt: Make tb_sw_write() take const parameter Date: Tue, 1 Oct 2019 14:38:12 +0300 Message-Id: <20191001113830.13028-5-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org The function does not modify the argument in any way so make it const. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/tb.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 1565af2e48cb..455ca490ea87 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -399,7 +399,7 @@ static inline int tb_sw_read(struct tb_switch *sw, void *buffer, length); } -static inline int tb_sw_write(struct tb_switch *sw, void *buffer, +static inline int tb_sw_write(struct tb_switch *sw, const void *buffer, enum tb_cfg_space space, u32 offset, u32 length) { if (sw->is_unplugged) From patchwork Tue Oct 1 11:38:13 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168549 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 6E0BC112B for ; Tue, 1 Oct 2019 11:40:09 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 4BE7821D79 for ; Tue, 1 Oct 2019 11:40:09 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1733295AbfJALkE (ORCPT ); Tue, 1 Oct 2019 07:40:04 -0400 Received: from mga06.intel.com ([134.134.136.31]:8631 "EHLO mga06.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732555AbfJALij (ORCPT ); Tue, 1 Oct 2019 07:38:39 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga001.jf.intel.com ([10.7.209.18]) by orsmga104.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:38 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="274967152" Received: from black.fi.intel.com ([10.237.72.28]) by orsmga001.jf.intel.com with ESMTP; 01 Oct 2019 04:38:35 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id 1E93124C; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 05/22] thunderbolt: Add helper macros to iterate over switch ports Date: Tue, 1 Oct 2019 14:38:13 +0300 Message-Id: <20191001113830.13028-6-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org There are quite many places in the driver where we iterate over each port in the switch skipping. To make it bit more consistent provide set of helper macros that can be used to do this and convert the existing call sites to these. No functional changes. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/icm.c | 4 ++-- drivers/thunderbolt/switch.c | 17 ++++++----------- drivers/thunderbolt/tb.c | 26 +++++++++++++------------- drivers/thunderbolt/tb.h | 35 +++++++++++++++++++++++++++++++++++ drivers/thunderbolt/xdomain.c | 2 +- 5 files changed, 57 insertions(+), 27 deletions(-) diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 245588f691e7..6550f68f92ce 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -1898,7 +1898,7 @@ static void icm_unplug_children(struct tb_switch *sw) if (tb_route(sw)) sw->is_unplugged = true; - for (i = 1; i <= sw->config.max_port_number; i++) { + tb_switch_for_each_port(sw, i) { struct tb_port *port = &sw->ports[i]; if (port->xdomain) @@ -1938,7 +1938,7 @@ static void icm_free_unplugged_children(struct tb_switch *sw) { unsigned int i; - for (i = 1; i <= sw->config.max_port_number; i++) { + tb_switch_for_each_port(sw, i) { struct tb_port *port = &sw->ports[i]; if (port->xdomain && port->xdomain->is_unplugged) { diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index f9efd670d032..cc2670dd2698 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -1386,7 +1386,7 @@ static void tb_switch_release(struct device *dev) dma_port_free(sw->dma_port); - for (i = 1; i <= sw->config.max_port_number; i++) { + tb_switch_for_each_port(sw, i) { if (!sw->ports[i].disabled) { ida_destroy(&sw->ports[i].in_hopids); ida_destroy(&sw->ports[i].out_hopids); @@ -1850,7 +1850,7 @@ void tb_switch_remove(struct tb_switch *sw) } /* port 0 is the switch itself and never has a remote */ - for (i = 1; i <= sw->config.max_port_number; i++) { + tb_switch_for_each_port(sw, i) { if (tb_port_has_remote(&sw->ports[i])) { tb_switch_remove(sw->ports[i].remote->sw); sw->ports[i].remote = NULL; @@ -1886,7 +1886,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw) return; } sw->is_unplugged = true; - for (i = 0; i <= sw->config.max_port_number; i++) { + tb_switch_for_each_port(sw, i) { if (tb_port_has_remote(&sw->ports[i])) tb_sw_set_unplugged(sw->ports[i].remote->sw); else if (sw->ports[i].xdomain) @@ -1944,12 +1944,9 @@ int tb_switch_resume(struct tb_switch *sw) return err; /* check for surviving downstream switches */ - for (i = 1; i <= sw->config.max_port_number; i++) { + tb_switch_for_each_connected_port(sw, i) { struct tb_port *port = &sw->ports[i]; - if (!tb_port_has_remote(port) && !port->xdomain) - continue; - if (tb_wait_for_port(port, true) <= 0) { tb_port_warn(port, "lost during suspend, disconnecting\n"); @@ -1975,10 +1972,8 @@ void tb_switch_suspend(struct tb_switch *sw) if (err) return; - for (i = 1; i <= sw->config.max_port_number; i++) { - if (tb_port_has_remote(&sw->ports[i])) - tb_switch_suspend(sw->ports[i].remote->sw); - } + tb_switch_for_each_remote_port(sw, i) + tb_switch_suspend(sw->ports[i].remote->sw); tb_lc_set_sleep(sw); } diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 541295be9bfc..ab42f0fea787 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -63,7 +63,7 @@ static void tb_discover_tunnels(struct tb_switch *sw) struct tb_port *port; int i; - for (i = 1; i <= sw->config.max_port_number; i++) { + tb_switch_for_each_port(sw, i) { struct tb_tunnel *tunnel = NULL; port = &sw->ports[i]; @@ -95,10 +95,8 @@ static void tb_discover_tunnels(struct tb_switch *sw) list_add_tail(&tunnel->list, &tcm->tunnel_list); } - for (i = 1; i <= sw->config.max_port_number; i++) { - if (tb_port_has_remote(&sw->ports[i])) - tb_discover_tunnels(sw->ports[i].remote->sw); - } + tb_switch_for_each_remote_port(sw, i) + tb_discover_tunnels(sw->ports[i].remote->sw); } static void tb_scan_xdomain(struct tb_port *port) @@ -131,7 +129,8 @@ static void tb_scan_port(struct tb_port *port); static void tb_scan_switch(struct tb_switch *sw) { int i; - for (i = 1; i <= sw->config.max_port_number; i++) + + tb_switch_for_each_port(sw, i) tb_scan_port(&sw->ports[i]); } @@ -267,11 +266,9 @@ static void tb_free_invalid_tunnels(struct tb *tb) static void tb_free_unplugged_children(struct tb_switch *sw) { int i; - for (i = 1; i <= sw->config.max_port_number; i++) { - struct tb_port *port = &sw->ports[i]; - if (!tb_port_has_remote(port)) - continue; + tb_switch_for_each_remote_port(sw, i) { + struct tb_port *port = &sw->ports[i]; if (port->remote->sw->is_unplugged) { tb_switch_remove(port->remote->sw); @@ -293,9 +290,12 @@ static struct tb_port *tb_find_port(struct tb_switch *sw, enum tb_port_type type) { int i; - for (i = 1; i <= sw->config.max_port_number; i++) + + tb_switch_for_each_port(sw, i) { if (sw->ports[i].config.type == type) return &sw->ports[i]; + } + return NULL; } @@ -309,7 +309,7 @@ static struct tb_port *tb_find_unused_port(struct tb_switch *sw, { int i; - for (i = 1; i <= sw->config.max_port_number; i++) { + tb_switch_for_each_port(sw, i) { if (tb_is_upstream_port(&sw->ports[i])) continue; if (sw->ports[i].config.type != type) @@ -739,7 +739,7 @@ static int tb_free_unplugged_xdomains(struct tb_switch *sw) { int i, ret = 0; - for (i = 1; i <= sw->config.max_port_number; i++) { + tb_switch_for_each_port(sw, i) { struct tb_port *port = &sw->ports[i]; if (tb_is_upstream_port(port)) diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 455ca490ea87..b723b86f4e72 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -530,6 +530,41 @@ struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid); struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route); +/** + * tb_switch_for_each_port() - Iterate over each switch port + * @sw: Switch whose ports to iterate + * @i: Port index inside @sw->ports[] array + * + * Iterates over each switch port except control port. + */ +#define tb_switch_for_each_port(sw, i) \ + for (i = 1; i <= sw->config.max_port_number; i++) + +/** + * tb_switch_for_each_remote_port() - Iterate over each port that has remote + * @sw: Switch whose ports to iterate + * @i: Port index inside @sw->ports[] array + * + * Iterates over all ports that have another switch connected + * downstream. + */ +#define tb_switch_for_each_remote_port(sw, i) \ + tb_switch_for_each_port(sw, i) \ + if (!tb_port_has_remote(&sw->ports[i])) {} else + +/** + * tb_switch_for_each_connected_port() - Iterate over each connected port + * @sw: Switch whose ports to iterate + * @i: Port index inside @sw->ports[] array + * + * Iterates over each connected port. This means each port that has + * either a switch or remote host (XDomain) connected. + */ +#define tb_switch_for_each_connected_port(sw, i) \ + tb_switch_for_each_port(sw, i) \ + if (!tb_port_has_remote(&sw->ports[i]) && \ + !sw->ports[i].xdomain) {} else + static inline struct tb_switch *tb_switch_get(struct tb_switch *sw) { if (sw) diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index 4e17a7c7bf0a..37ef0b4da1cf 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -1406,7 +1406,7 @@ static struct tb_xdomain *switch_find_xdomain(struct tb_switch *sw, { int i; - for (i = 1; i <= sw->config.max_port_number; i++) { + tb_switch_for_each_port(sw, i) { struct tb_port *port = &sw->ports[i]; struct tb_xdomain *xd; From patchwork Tue Oct 1 11:38:14 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168537 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 40EEF14DB for ; Tue, 1 Oct 2019 11:39:39 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 0C29621A4C for ; Tue, 1 Oct 2019 11:39:39 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1733102AbfJALja (ORCPT ); Tue, 1 Oct 2019 07:39:30 -0400 Received: from mga12.intel.com ([192.55.52.136]:17382 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732591AbfJALik (ORCPT ); Tue, 1 Oct 2019 07:38:40 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:38 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="220968923" Received: from black.fi.intel.com ([10.237.72.28]) by fmsmga002.fm.intel.com with ESMTP; 01 Oct 2019 04:38:35 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id 2C456300; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 06/22] thunderbolt: Add support for lane bonding Date: Tue, 1 Oct 2019 14:38:14 +0300 Message-Id: <20191001113830.13028-7-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org Lane bonding allows aggregating the two 10/20 Gb/s (depending on the generation) lanes into a single 20/40 Gb/s bonded link. This allows sharing the full bandwidth more efficiently. In order to establish lane bonding we need to check that the lane bonding is possible through LC and that both end of the link actually supports 2x widths. This also means that all the paths should be established through the primary port so update tb_path_alloc() to handle this as well. Lane bonding is supported starting from Falcon Ridge (2nd generation) controllers. Signed-off-by: Mika Westerberg --- .../ABI/testing/sysfs-bus-thunderbolt | 17 ++ drivers/thunderbolt/icm.c | 18 +- drivers/thunderbolt/lc.c | 28 ++ drivers/thunderbolt/path.c | 30 +- drivers/thunderbolt/switch.c | 274 ++++++++++++++++++ drivers/thunderbolt/tb.c | 21 ++ drivers/thunderbolt/tb.h | 10 + drivers/thunderbolt/tb_msgs.h | 2 + drivers/thunderbolt/tb_regs.h | 20 ++ drivers/thunderbolt/tunnel.c | 19 +- 10 files changed, 429 insertions(+), 10 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-bus-thunderbolt b/Documentation/ABI/testing/sysfs-bus-thunderbolt index b21fba14689b..2c9166f6fa97 100644 --- a/Documentation/ABI/testing/sysfs-bus-thunderbolt +++ b/Documentation/ABI/testing/sysfs-bus-thunderbolt @@ -104,6 +104,23 @@ Contact: thunderbolt-software@lists.01.org Description: This attribute contains name of this device extracted from the device DROM. +What: /sys/bus/thunderbolt/devices/.../link_speed +Date: Apr 2020 +KernelVersion: 5.6 +Contact: Mika Westerberg +Description: This attribute reports the current upstream link speed + in Gb/s per lane. If there are two lanes they both are + running at the same speed. Use link_width to determine + whether the two lanes are bonded or not. + +What: /sys/bus/thunderbolt/devices/.../link_width +Date: Apr 2020 +KernelVersion: 5.6 +Contact: Mika Westerberg +Description: This attribute reports the current upstream link width. + It is 1 for single lane link (or two single lane links) + and 2 for bonded dual lane link. + What: /sys/bus/thunderbolt/devices/.../vendor Date: Sep 2017 KernelVersion: 4.13 diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 6550f68f92ce..9c9c6ea2b790 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -567,7 +567,8 @@ static struct tb_switch *add_switch(struct tb_switch *parent_sw, u64 route, size_t ep_name_size, u8 connection_id, u8 connection_key, u8 link, u8 depth, enum tb_security_level security_level, - bool authorized, bool boot) + bool authorized, bool boot, bool dual_lane, + bool speed_gen3) { const struct intel_vss *vss; struct tb_switch *sw; @@ -592,6 +593,8 @@ static struct tb_switch *add_switch(struct tb_switch *parent_sw, u64 route, sw->authorized = authorized; sw->security_level = security_level; sw->boot = boot; + sw->link_speed = speed_gen3 ? 20 : 10; + sw->link_width = dual_lane ? 2 : 1; init_completion(&sw->rpm_complete); vss = parse_intel_vss(ep_name, ep_name_size); @@ -697,11 +700,11 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) (const struct icm_fr_event_device_connected *)hdr; enum tb_security_level security_level; struct tb_switch *sw, *parent_sw; + bool boot, dual_lane, speed_gen3; struct icm *icm = tb_priv(tb); bool authorized = false; struct tb_xdomain *xd; u8 link, depth; - bool boot; u64 route; int ret; @@ -714,6 +717,8 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> ICM_FLAGS_SLEVEL_SHIFT; boot = pkg->link_info & ICM_LINK_INFO_BOOT; + dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE; + speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3; if (pkg->link_info & ICM_LINK_INFO_REJECTED) { tb_info(tb, "switch at %u.%u was rejected by ICM firmware because topology limit exceeded\n", @@ -814,7 +819,7 @@ icm_fr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr) add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, sizeof(pkg->ep_name), pkg->connection_id, pkg->connection_key, link, depth, security_level, - authorized, boot); + authorized, boot, dual_lane, speed_gen3); tb_switch_put(parent_sw); } @@ -1142,10 +1147,10 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr, { const struct icm_tr_event_device_connected *pkg = (const struct icm_tr_event_device_connected *)hdr; + bool authorized, boot, dual_lane, speed_gen3; enum tb_security_level security_level; struct tb_switch *sw, *parent_sw; struct tb_xdomain *xd; - bool authorized, boot; u64 route; icm_postpone_rescan(tb); @@ -1163,6 +1168,8 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr, security_level = (pkg->hdr.flags & ICM_FLAGS_SLEVEL_MASK) >> ICM_FLAGS_SLEVEL_SHIFT; boot = pkg->link_info & ICM_LINK_INFO_BOOT; + dual_lane = pkg->hdr.flags & ICM_FLAGS_DUAL_LANE; + speed_gen3 = pkg->hdr.flags & ICM_FLAGS_SPEED_GEN3; if (pkg->link_info & ICM_LINK_INFO_REJECTED) { tb_info(tb, "switch at %llx was rejected by ICM firmware because topology limit exceeded\n", @@ -1207,7 +1214,8 @@ __icm_tr_device_connected(struct tb *tb, const struct icm_pkg_header *hdr, sw = add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, sizeof(pkg->ep_name), pkg->connection_id, 0, 0, 0, - security_level, authorized, boot); + security_level, authorized, boot, dual_lane, + speed_gen3); if (!IS_ERR(sw) && force_rtd3) sw->rpm = true; diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c index af38076088f6..df56523eb822 100644 --- a/drivers/thunderbolt/lc.c +++ b/drivers/thunderbolt/lc.c @@ -177,3 +177,31 @@ int tb_lc_set_sleep(struct tb_switch *sw) return 0; } + +/** + * tb_lc_lane_bonding_possible() - Is lane bonding possible towards switch + * @sw: Switch to check + * + * Checks whether conditions for lane bonding from parent to @sw are + * possible. + */ +bool tb_lc_lane_bonding_possible(struct tb_switch *sw) +{ + struct tb_port *up; + int cap, ret; + u32 val; + + if (sw->generation < 2) + return false; + + up = tb_upstream_port(sw); + cap = find_port_lc_cap(up); + if (cap < 0) + return false; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, cap + TB_LC_PORT_ATTR, 1); + if (ret) + return false; + + return !!(val & TB_LC_PORT_ATTR_BE); +} diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c index afe5f8391ebf..6cf66597d5d8 100644 --- a/drivers/thunderbolt/path.c +++ b/drivers/thunderbolt/path.c @@ -220,7 +220,8 @@ struct tb_path *tb_path_discover(struct tb_port *src, int src_hopid, * Creates path between two ports starting with given @src_hopid. Reserves * HopIDs for each port (they can be different from @src_hopid depending on * how many HopIDs each port already have reserved). If there are dual - * links on the path, prioritizes using @link_nr. + * links on the path, prioritizes using @link_nr but takes into account + * that the lanes may be bonded. * * Return: Returns a tb_path on success or NULL on failure. */ @@ -259,7 +260,9 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, if (!in_port) goto err; - if (in_port->dual_link_port && in_port->link_nr != link_nr) + /* When lanes are bonded primary link must be used */ + if (!in_port->bonded && in_port->dual_link_port && + in_port->link_nr != link_nr) in_port = in_port->dual_link_port; ret = tb_port_alloc_in_hopid(in_port, in_hopid, in_hopid); @@ -271,8 +274,27 @@ struct tb_path *tb_path_alloc(struct tb *tb, struct tb_port *src, int src_hopid, if (!out_port) goto err; - if (out_port->dual_link_port && out_port->link_nr != link_nr) - out_port = out_port->dual_link_port; + /* + * Pick up right port when going from non-bonded to + * bonded or from bonded to non-bonded. + */ + if (out_port->dual_link_port) { + if (!in_port->bonded && out_port->bonded && + out_port->link_nr) { + /* + * Use primary link when going from + * non-bonded to bonded. + */ + out_port = out_port->dual_link_port; + } else if (!out_port->bonded && + out_port->link_nr != link_nr) { + /* + * If out port is not bonded follow + * link_nr. + */ + out_port = out_port->dual_link_port; + } + } if (i == num_hops - 1) ret = tb_port_alloc_out_hopid(out_port, dst_hopid, diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index cc2670dd2698..2b00ea7a979a 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -775,6 +775,132 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, return next; } +static int tb_port_get_link_speed(struct tb_port *port) +{ + u32 val, speed; + int ret; + + if (!port->cap_phy) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + speed = (val & LANE_ADP_CS_1_CURRENT_SPEED_MASK) >> + LANE_ADP_CS_1_CURRENT_SPEED_SHIFT; + return speed == LANE_ADP_CS_1_CURRENT_SPEED_GEN3 ? 20 : 10; +} + +static int tb_port_get_link_width(struct tb_port *port) +{ + u32 val; + int ret; + + if (!port->cap_phy) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + return (val & LANE_ADP_CS_1_CURRENT_WIDTH_MASK) >> + LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT; +} + +static bool tb_port_is_width_supported(struct tb_port *port, int width) +{ + u32 phy, widths; + int ret; + + if (!port->cap_phy) + return false; + + ret = tb_port_read(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_0, 1); + if (ret) + return ret; + + widths = (phy & LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK) >> + LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT; + + return !!(widths & width); +} + +static int tb_port_set_link_width(struct tb_port *port, unsigned int width) +{ + u32 val; + int ret; + + if (!port->cap_phy) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + val &= ~LANE_ADP_CS_1_TARGET_WIDTH_MASK; + switch (width) { + case 1: + val |= LANE_ADP_CS_1_TARGET_WIDTH_SINGLE << + LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; + break; + case 2: + val |= LANE_ADP_CS_1_TARGET_WIDTH_DUAL << + LANE_ADP_CS_1_TARGET_WIDTH_SHIFT; + break; + default: + return -EINVAL; + } + + val |= LANE_ADP_CS_1_LB; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); +} + +static int tb_port_lane_bonding_enable(struct tb_port *port) +{ + int ret; + + /* + * Enable lane bonding for both links if not already enabled by + * for example the boot firmware. + */ + ret = tb_port_get_link_width(port); + if (ret == 1) { + ret = tb_port_set_link_width(port, 2); + if (ret) + return ret; + } + + ret = tb_port_get_link_width(port->dual_link_port); + if (ret == 1) { + ret = tb_port_set_link_width(port->dual_link_port, 2); + if (ret) { + tb_port_set_link_width(port, 1); + return ret; + } + } + + port->bonded = true; + port->dual_link_port->bonded = true; + + return 0; +} + +static void tb_port_lane_bonding_disable(struct tb_port *port) +{ + port->dual_link_port->bonded = false; + port->bonded = false; + + tb_port_set_link_width(port->dual_link_port, 1); + tb_port_set_link_width(port, 1); +} + /** * tb_port_is_enabled() - Is the adapter port enabled * @port: Port to check @@ -1166,6 +1292,26 @@ static ssize_t key_store(struct device *dev, struct device_attribute *attr, } static DEVICE_ATTR(key, 0600, key_show, key_store); +static ssize_t link_speed_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + + return sprintf(buf, "%u.0 Gb/s\n", sw->link_speed); + +} +static DEVICE_ATTR_RO(link_speed); + +static ssize_t link_width_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct tb_switch *sw = tb_to_switch(dev); + + return sprintf(buf, "%u\n", sw->link_width); + +} +static DEVICE_ATTR_RO(link_width); + static void nvm_authenticate_start(struct tb_switch *sw) { struct pci_dev *root_port; @@ -1320,6 +1466,8 @@ static struct attribute *switch_attrs[] = { &dev_attr_device.attr, &dev_attr_device_name.attr, &dev_attr_key.attr, + &dev_attr_link_speed.attr, + &dev_attr_link_width.attr, &dev_attr_nvm_authenticate.attr, &dev_attr_nvm_version.attr, &dev_attr_vendor.attr, @@ -1352,6 +1500,11 @@ static umode_t switch_attr_is_visible(struct kobject *kobj, sw->security_level == TB_SECURITY_SECURE) return attr->mode; return 0; + } else if (attr == &dev_attr_link_speed.attr || + attr == &dev_attr_link_width.attr) { + if (tb_route(sw)) + return attr->mode; + return 0; } else if (attr == &dev_attr_nvm_authenticate.attr) { if (sw->dma_port && !sw->no_nvm_upgrade) return attr->mode; @@ -1751,6 +1904,123 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) return -ESHUTDOWN; } +static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) +{ + const struct tb_port *up = tb_upstream_port(sw); + + if (!up->dual_link_port || !up->dual_link_port->remote) + return false; + + return tb_lc_lane_bonding_possible(sw); +} + +static int tb_switch_update_link_attributes(struct tb_switch *sw) +{ + struct tb_port *up; + bool change = false; + int ret; + + if (!tb_route(sw) || tb_switch_is_icm(sw)) + return 0; + + up = tb_upstream_port(sw); + + ret = tb_port_get_link_speed(up); + if (ret < 0) + return ret; + if (sw->link_speed != ret) + change = true; + sw->link_speed = ret; + + ret = tb_port_get_link_width(up); + if (ret < 0) + return ret; + if (sw->link_width != ret) + change = true; + sw->link_width = ret; + + /* Notify userspace that there is possible link attribute change */ + if (device_is_registered(&sw->dev) && change) + kobject_uevent(&sw->dev.kobj, KOBJ_CHANGE); + + return 0; +} + +/** + * tb_switch_lane_bonding_enable() - Enable lane bonding + * @sw: Switch to enable lane bonding + * + * Connection manager can call this function to enable lane bonding of a + * switch. If conditions are correct and both switches support the feature, + * lanes are bonded. It is safe to call this to any switch. + */ +int tb_switch_lane_bonding_enable(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_to_switch(sw->dev.parent); + struct tb_port *up, *down; + u64 route = tb_route(sw); + int ret; + + if (!route) + return 0; + + if (!tb_switch_lane_bonding_possible(sw)) + return 0; + + up = tb_upstream_port(sw); + down = tb_port_at(route, parent); + + if (!tb_port_is_width_supported(up, 2) || + !tb_port_is_width_supported(down, 2)) + return 0; + + ret = tb_port_lane_bonding_enable(up); + if (ret) { + tb_port_warn(up, "failed to enable lane bonding\n"); + return ret; + } + + ret = tb_port_lane_bonding_enable(down); + if (ret) { + tb_port_warn(down, "failed to enable lane bonding\n"); + tb_port_lane_bonding_disable(up); + return ret; + } + + tb_switch_update_link_attributes(sw); + + tb_sw_dbg(sw, "lane bonding enabled\n"); + return ret; +} + +/** + * tb_switch_lane_bonding_disable() - Disable lane bonding + * @sw: Switch whose lane bonding to disable + * + * Disables lane bonding between @sw and parent. This can be called even + * if lanes were not bonded originally. + */ +void tb_switch_lane_bonding_disable(struct tb_switch *sw) +{ + struct tb_switch *parent = tb_to_switch(sw->dev.parent); + struct tb_port *up, *down; + + if (!tb_route(sw)) + return; + + up = tb_upstream_port(sw); + if (!up->bonded) + return; + + down = tb_port_at(tb_route(sw), parent); + + tb_port_lane_bonding_disable(up); + tb_port_lane_bonding_disable(down); + + tb_switch_update_link_attributes(sw); + tb_sw_dbg(sw, "lane bonding disabled\n"); +} + /** * tb_switch_add() - Add a switch to the domain * @sw: Switch to add @@ -1800,6 +2070,10 @@ int tb_switch_add(struct tb_switch *sw) if (ret) return ret; } + + ret = tb_switch_update_link_attributes(sw); + if (ret) + return ret; } ret = device_add(&sw->dev); diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index ab42f0fea787..dbbe9afb9fb7 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -219,6 +219,10 @@ static void tb_scan_port(struct tb_port *port) upstream_port->dual_link_port->remote = port->dual_link_port; } + /* Enable lane bonding if supported */ + if (tb_switch_lane_bonding_enable(sw)) + tb_sw_warn(sw, "failed to enable lane bonding\n"); + tb_scan_switch(sw); } @@ -271,6 +275,7 @@ static void tb_free_unplugged_children(struct tb_switch *sw) struct tb_port *port = &sw->ports[i]; if (port->remote->sw->is_unplugged) { + tb_switch_lane_bonding_disable(port->remote->sw); tb_switch_remove(port->remote->sw); port->remote = NULL; if (port->dual_link_port) @@ -536,6 +541,7 @@ static void tb_handle_hotplug(struct work_struct *work) tb_port_dbg(port, "switch unplugged\n"); tb_sw_set_unplugged(port->remote->sw); tb_free_invalid_tunnels(tb); + tb_switch_lane_bonding_disable(port->remote->sw); tb_switch_remove(port->remote->sw); port->remote = NULL; if (port->dual_link_port) @@ -705,6 +711,20 @@ static int tb_suspend_noirq(struct tb *tb) return 0; } +static void tb_restore_children(struct tb_switch *sw) +{ + int i; + + tb_switch_for_each_remote_port(sw, i) { + struct tb_port *port = &sw->ports[i]; + + if (tb_switch_lane_bonding_enable(port->remote->sw)) + dev_warn(&sw->dev, "failed to restore lane bonding\n"); + + tb_restore_children(port->remote->sw); + } +} + static int tb_resume_noirq(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); @@ -718,6 +738,7 @@ static int tb_resume_noirq(struct tb *tb) tb_switch_resume(tb->root_switch); tb_free_invalid_tunnels(tb); tb_free_unplugged_children(tb->root_switch); + tb_restore_children(tb->root_switch); list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) tb_tunnel_restart(tunnel); if (!list_empty(&tcm->tunnel_list)) { diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index b723b86f4e72..e641dcebd50a 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -61,6 +61,8 @@ struct tb_switch_nvm { * @device: Device ID of the switch * @vendor_name: Name of the vendor (or %NULL if not known) * @device_name: Name of the device (or %NULL if not known) + * @link_speed: Speed of the link in Gb/s + * @link_width: Width of the link (1 or 2) * @generation: Switch Thunderbolt generation * @cap_plug_events: Offset to the plug events capability (%0 if not found) * @cap_lc: Offset to the link controller capability (%0 if not found) @@ -97,6 +99,8 @@ struct tb_switch { u16 device; const char *vendor_name; const char *device_name; + unsigned int link_speed; + unsigned int link_width; unsigned int generation; int cap_plug_events; int cap_lc; @@ -127,6 +131,7 @@ struct tb_switch { * @cap_adap: Offset of the adapter specific capability (%0 if not present) * @port: Port number on switch * @disabled: Disabled by eeprom + * @bonded: true if the port is bonded (two lanes combined as one) * @dual_link_port: If the switch is connected using two ports, points * to the other port. * @link_nr: Is this primary or secondary port on the dual_link. @@ -142,6 +147,7 @@ struct tb_port { int cap_adap; u8 port; bool disabled; + bool bonded; struct tb_port *dual_link_port; u8 link_nr:1; struct ida in_hopids; @@ -640,6 +646,9 @@ static inline bool tb_switch_is_icm(const struct tb_switch *sw) return !sw->config.enabled; } +int tb_switch_lane_bonding_enable(struct tb_switch *sw); +void tb_switch_lane_bonding_disable(struct tb_switch *sw); + int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); int tb_port_add_nfc_credits(struct tb_port *port, int credits); int tb_port_set_initial_credits(struct tb_port *port, u32 credits); @@ -683,6 +692,7 @@ int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid); int tb_lc_configure_link(struct tb_switch *sw); void tb_lc_unconfigure_link(struct tb_switch *sw); int tb_lc_set_sleep(struct tb_switch *sw); +bool tb_lc_lane_bonding_possible(struct tb_switch *sw); static inline int tb_route_length(u64 route) { diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h index 4b641e4ee0c5..3705057723b6 100644 --- a/drivers/thunderbolt/tb_msgs.h +++ b/drivers/thunderbolt/tb_msgs.h @@ -122,6 +122,8 @@ struct icm_pkg_header { #define ICM_FLAGS_NO_KEY BIT(1) #define ICM_FLAGS_SLEVEL_SHIFT 3 #define ICM_FLAGS_SLEVEL_MASK GENMASK(4, 3) +#define ICM_FLAGS_DUAL_LANE BIT(5) +#define ICM_FLAGS_SPEED_GEN3 BIT(7) #define ICM_FLAGS_WRITE BIT(7) struct icm_pkg_driver_ready { diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index deb9d4a977b9..6d4e072f1f63 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -219,6 +219,23 @@ struct tb_regs_port_header { #define TB_PORT_LCA_SHIFT 22 #define TB_PORT_LCA_MASK GENMASK(28, 22) +/* Lane adapter registers */ +#define LANE_ADP_CS_0 0x00 +#define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20) +#define LANE_ADP_CS_0_SUPPORTED_WIDTH_SHIFT 20 +#define LANE_ADP_CS_1 0x01 +#define LANE_ADP_CS_1_TARGET_WIDTH_MASK GENMASK(9, 4) +#define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4 +#define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1 +#define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3 +#define LANE_ADP_CS_1_LB BIT(15) +#define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16) +#define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16 +#define LANE_ADP_CS_1_CURRENT_SPEED_GEN2 0x8 +#define LANE_ADP_CS_1_CURRENT_SPEED_GEN3 0x4 +#define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20) +#define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20 + /* Display Port adapter registers */ /* DWORD 0 */ @@ -280,6 +297,9 @@ struct tb_regs_hop { #define TB_LC_FUSE 0x03 /* Link controller registers */ +#define TB_LC_PORT_ATTR 0x8d +#define TB_LC_PORT_ATTR_BE BIT(12) + #define TB_LC_SX_CTRL 0x96 #define TB_LC_SX_CTRL_L1C BIT(16) #define TB_LC_SX_CTRL_L2C BIT(20) diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 5a99234826e7..ff55a114825a 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -90,6 +90,22 @@ static int tb_pci_activate(struct tb_tunnel *tunnel, bool activate) return 0; } +static int tb_initial_credits(const struct tb_switch *sw) +{ + /* If the path is complete sw is not NULL */ + if (sw) { + /* More credits for faster link */ + switch (sw->link_speed * sw->link_width) { + case 40: + return 32; + case 20: + return 24; + } + } + + return 16; +} + static void tb_pci_init_path(struct tb_path *path) { path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; @@ -101,7 +117,8 @@ static void tb_pci_init_path(struct tb_path *path) path->drop_packages = 0; path->nfc_credits = 0; path->hops[0].initial_credits = 7; - path->hops[1].initial_credits = 16; + path->hops[1].initial_credits = + tb_initial_credits(path->hops[1].in_port->sw); } /** From patchwork Tue Oct 1 11:38:15 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168535 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id BCF45112B for ; Tue, 1 Oct 2019 11:39:31 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id A5AE021906 for ; Tue, 1 Oct 2019 11:39:31 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1733109AbfJALja (ORCPT ); Tue, 1 Oct 2019 07:39:30 -0400 Received: from mga17.intel.com ([192.55.52.151]:27910 "EHLO mga17.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732600AbfJALik (ORCPT ); Tue, 1 Oct 2019 07:38:40 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:39 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="181663255" Received: from black.fi.intel.com ([10.237.72.28]) by orsmga007.jf.intel.com with ESMTP; 01 Oct 2019 04:38:35 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id 34CC3419; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 07/22] thunderbolt: Add default linking between ports if not provided by DROM Date: Tue, 1 Oct 2019 14:38:15 +0300 Message-Id: <20191001113830.13028-8-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org Some cases the DROM information is not correct or is simply missing. This prevents establishing lane bonding even if it would be possible otherwise. To make this work better provide default linking between ports if DROM has not provided that information. This works with legacy devices where ports 1 and 2, and 3 and 4 are linked together and also with USB4. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/eeprom.c | 11 ----------- drivers/thunderbolt/switch.c | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+), 11 deletions(-) diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c index ee5196479854..8dd7de0cc826 100644 --- a/drivers/thunderbolt/eeprom.c +++ b/drivers/thunderbolt/eeprom.c @@ -514,17 +514,6 @@ int tb_drom_read(struct tb_switch *sw) * no entries). Hardcode the configuration here. */ tb_drom_read_uid_only(sw, &sw->uid); - - sw->ports[1].link_nr = 0; - sw->ports[2].link_nr = 1; - sw->ports[1].dual_link_port = &sw->ports[2]; - sw->ports[2].dual_link_port = &sw->ports[1]; - - sw->ports[3].link_nr = 0; - sw->ports[4].link_nr = 1; - sw->ports[3].dual_link_port = &sw->ports[4]; - sw->ports[4].dual_link_port = &sw->ports[3]; - return 0; } diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 2b00ea7a979a..f7547287be68 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -1904,6 +1904,36 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) return -ESHUTDOWN; } +static void tb_switch_default_link_ports(struct tb_switch *sw) +{ + int i; + + for (i = 1; i <= sw->config.max_port_number; i += 2) { + struct tb_port *port = &sw->ports[i]; + struct tb_port *subordinate; + + if (!tb_port_is_null(port)) + continue; + + /* Check for the subordinate port */ + if (i == sw->config.max_port_number || + !tb_port_is_null(&sw->ports[i + 1])) + continue; + + /* Link them if not already done so (by DROM) */ + subordinate = &sw->ports[i + 1]; + if (!port->dual_link_port && !subordinate->dual_link_port) { + port->link_nr = 0; + port->dual_link_port = subordinate; + subordinate->link_nr = 1; + subordinate->dual_link_port = port; + + tb_sw_dbg(sw, "linked ports %d <-> %d\n", + port->port, subordinate->port); + } + } +} + static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) { const struct tb_port *up = tb_upstream_port(sw); @@ -2071,6 +2101,8 @@ int tb_switch_add(struct tb_switch *sw) return ret; } + tb_switch_default_link_ports(sw); + ret = tb_switch_update_link_attributes(sw); if (ret) return ret; From patchwork Tue Oct 1 11:38:16 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168545 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 7DF4D112B for ; Tue, 1 Oct 2019 11:39:55 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 6635A21A4C for ; Tue, 1 Oct 2019 11:39:55 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1733252AbfJALjy (ORCPT ); Tue, 1 Oct 2019 07:39:54 -0400 Received: from mga17.intel.com ([192.55.52.151]:27909 "EHLO mga17.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732568AbfJALik (ORCPT ); Tue, 1 Oct 2019 07:38:40 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:38 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="181663251" Received: from black.fi.intel.com ([10.237.72.28]) by orsmga007.jf.intel.com with ESMTP; 01 Oct 2019 04:38:35 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id 3C8B0440; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 08/22] thunderbolt: Add downstream PCIe port mappings for Alpine and Titan Ridge Date: Tue, 1 Oct 2019 14:38:16 +0300 Message-Id: <20191001113830.13028-9-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org In order to keep PCIe hierarchies consistent across hotplugs, add hard-coded PCIe downstream port to Thunderbolt port for Alpine Ridge and Titan Ridge as well. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/tb.c | 4 +++- drivers/thunderbolt/tb.h | 25 +++++++++++++++++++++++++ 2 files changed, 28 insertions(+), 1 deletion(-) diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index dbbe9afb9fb7..704455a4f763 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -344,10 +344,12 @@ static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, * Hard-coded Thunderbolt port to PCIe down port mapping * per controller. */ - if (tb_switch_is_cr(sw)) + if (tb_switch_is_cr(sw) || tb_switch_is_ar(sw)) index = !phy_port ? 6 : 7; else if (tb_switch_is_fr(sw)) index = !phy_port ? 6 : 8; + else if (tb_switch_is_tr(sw)) + index = !phy_port ? 8 : 9; else goto out; diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index e641dcebd50a..dbab06551eaa 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -632,6 +632,31 @@ static inline bool tb_switch_is_fr(const struct tb_switch *sw) } } +static inline bool tb_switch_is_ar(const struct tb_switch *sw) +{ + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: + return true; + default: + return false; + } +} + +static inline bool tb_switch_is_tr(const struct tb_switch *sw) +{ + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: + return true; + default: + return false; + } +} + /** * tb_switch_is_icm() - Is the switch handled by ICM firmware * @sw: Switch to check From patchwork Tue Oct 1 11:38:17 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168541 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 0DE40112B for ; Tue, 1 Oct 2019 11:39:53 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id E9D7421D71 for ; Tue, 1 Oct 2019 11:39:52 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1733223AbfJALjt (ORCPT ); Tue, 1 Oct 2019 07:39:49 -0400 Received: from mga17.intel.com ([192.55.52.151]:27909 "EHLO mga17.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732423AbfJALik (ORCPT ); Tue, 1 Oct 2019 07:38:40 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:38 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="181663253" Received: from black.fi.intel.com ([10.237.72.28]) by orsmga007.jf.intel.com with ESMTP; 01 Oct 2019 04:38:35 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id 452B948B; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 09/22] thunderbolt: Convert basic adapter register names to follow the USB4 spec Date: Tue, 1 Oct 2019 14:38:17 +0300 Message-Id: <20191001113830.13028-10-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org Now that USB4 spec has names for these basic registers we can use them instead. This makes it easier to match certain register to the spec. No functional changes. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/switch.c | 18 +++++++++--------- drivers/thunderbolt/tb_regs.h | 15 ++++++++------- drivers/thunderbolt/tunnel.c | 10 +++++----- 3 files changed, 22 insertions(+), 21 deletions(-) diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index f7547287be68..8d17398e3349 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -553,17 +553,17 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits) if (credits == 0 || port->sw->is_unplugged) return 0; - nfc_credits = port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK; + nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; nfc_credits += credits; - tb_port_dbg(port, "adding %d NFC credits to %lu", - credits, port->config.nfc_credits & TB_PORT_NFC_CREDITS_MASK); + tb_port_dbg(port, "adding %d NFC credits to %lu", credits, + port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK); - port->config.nfc_credits &= ~TB_PORT_NFC_CREDITS_MASK; + port->config.nfc_credits &= ~ADP_CS_4_NFC_BUFFERS_MASK; port->config.nfc_credits |= nfc_credits; return tb_port_write(port, &port->config.nfc_credits, - TB_CFG_PORT, 4, 1); + TB_CFG_PORT, ADP_CS_4, 1); } /** @@ -578,14 +578,14 @@ int tb_port_set_initial_credits(struct tb_port *port, u32 credits) u32 data; int ret; - ret = tb_port_read(port, &data, TB_CFG_PORT, 5, 1); + ret = tb_port_read(port, &data, TB_CFG_PORT, ADP_CS_5, 1); if (ret) return ret; - data &= ~TB_PORT_LCA_MASK; - data |= (credits << TB_PORT_LCA_SHIFT) & TB_PORT_LCA_MASK; + data &= ~ADP_CS_5_LCA_MASK; + data |= (credits << ADP_CS_5_LCA_SHIFT) & ADP_CS_5_LCA_MASK; - return tb_port_write(port, &data, TB_CFG_PORT, 5, 1); + return tb_port_write(port, &data, TB_CFG_PORT, ADP_CS_5, 1); } /** diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 6d4e072f1f63..0ac22fc26a5f 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -211,13 +211,14 @@ struct tb_regs_port_header { } __packed; -/* DWORD 4 */ -#define TB_PORT_NFC_CREDITS_MASK GENMASK(19, 0) -#define TB_PORT_MAX_CREDITS_SHIFT 20 -#define TB_PORT_MAX_CREDITS_MASK GENMASK(26, 20) -/* DWORD 5 */ -#define TB_PORT_LCA_SHIFT 22 -#define TB_PORT_LCA_MASK GENMASK(28, 22) +/* Basic adapter configuration registers */ +#define ADP_CS_4 0x04 +#define ADP_CS_4_NFC_BUFFERS_MASK GENMASK(9, 0) +#define ADP_CS_4_TOTAL_BUFFERS_MASK GENMASK(29, 20) +#define ADP_CS_4_TOTAL_BUFFERS_SHIFT 20 +#define ADP_CS_5 0x05 +#define ADP_CS_5_LCA_MASK GENMASK(28, 22) +#define ADP_CS_5_LCA_SHIFT 22 /* Lane adapter registers */ #define LANE_ADP_CS_0 0x00 diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index ff55a114825a..c901fa488478 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -341,12 +341,12 @@ static void tb_dp_init_video_path(struct tb_path *path, bool discover) path->weight = 1; if (discover) { - path->nfc_credits = nfc_credits & TB_PORT_NFC_CREDITS_MASK; + path->nfc_credits = nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; } else { u32 max_credits; - max_credits = (nfc_credits & TB_PORT_MAX_CREDITS_MASK) >> - TB_PORT_MAX_CREDITS_SHIFT; + max_credits = (nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> + ADP_CS_4_TOTAL_BUFFERS_SHIFT; /* Leave some credits for AUX path */ path->nfc_credits = min(max_credits - 2, 12U); } @@ -495,8 +495,8 @@ static u32 tb_dma_credits(struct tb_port *nhi) { u32 max_credits; - max_credits = (nhi->config.nfc_credits & TB_PORT_MAX_CREDITS_MASK) >> - TB_PORT_MAX_CREDITS_SHIFT; + max_credits = (nhi->config.nfc_credits & ADP_CS_4_TOTAL_BUFFERS_MASK) >> + ADP_CS_4_TOTAL_BUFFERS_SHIFT; return min(max_credits, 13U); } From patchwork Tue Oct 1 11:38:18 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168543 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 714D714DB for ; Tue, 1 Oct 2019 11:39:53 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 59BB021D81 for ; Tue, 1 Oct 2019 11:39:53 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732589AbfJALik (ORCPT ); Tue, 1 Oct 2019 07:38:40 -0400 Received: from mga02.intel.com ([134.134.136.20]:39070 "EHLO mga02.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732559AbfJALij (ORCPT ); Tue, 1 Oct 2019 07:38:39 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:38 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="191434929" Received: from black.fi.intel.com ([10.237.72.28]) by fmsmga007.fm.intel.com with ESMTP; 01 Oct 2019 04:38:35 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id 4D65C4E5; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 10/22] thunderbolt: Convert PCIe adapter register names to use USB4 names Date: Tue, 1 Oct 2019 14:38:18 +0300 Message-Id: <20191001113830.13028-11-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org Now that USB4 spec has names for these PCIe adapter registers we can use them instead. This makes it easier to match certain register to the spec. No functional changes. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/switch.c | 10 ++++++---- drivers/thunderbolt/tb_regs.h | 4 ++-- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 8d17398e3349..2079e6065038 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -929,10 +929,11 @@ bool tb_pci_port_is_enabled(struct tb_port *port) { u32 data; - if (tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap, 1)) + if (tb_port_read(port, &data, TB_CFG_PORT, + port->cap_adap + ADP_PCIE_CS_0, 1)) return false; - return !!(data & TB_PCI_EN); + return !!(data & ADP_PCIE_CS_0_PE); } /** @@ -942,10 +943,11 @@ bool tb_pci_port_is_enabled(struct tb_port *port) */ int tb_pci_port_enable(struct tb_port *port, bool enable) { - u32 word = enable ? TB_PCI_EN : 0x0; + u32 word = enable ? ADP_PCIE_CS_0_PE : 0x0; if (!port->cap_adap) return -ENXIO; - return tb_port_write(port, &word, TB_CFG_PORT, port->cap_adap, 1); + return tb_port_write(port, &word, TB_CFG_PORT, + port->cap_adap + ADP_PCIE_CS_0, 1); } /** diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 0ac22fc26a5f..cd03d160634c 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -258,8 +258,8 @@ struct tb_regs_port_header { #define TB_DP_REMOTE_CAP 0x5 /* PCIe adapter registers */ - -#define TB_PCI_EN BIT(31) +#define ADP_PCIE_CS_0 0x00 +#define ADP_PCIE_CS_0_PE BIT(31) /* Hop register from TB_CFG_HOPS. 8 byte per entry. */ struct tb_regs_hop { From patchwork Tue Oct 1 11:38:19 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168547 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 07F3514DB for ; Tue, 1 Oct 2019 11:40:04 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id DAA9321D7B for ; Tue, 1 Oct 2019 11:40:03 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1733246AbfJALjx (ORCPT ); Tue, 1 Oct 2019 07:39:53 -0400 Received: from mga12.intel.com ([192.55.52.136]:17382 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732566AbfJALik (ORCPT ); Tue, 1 Oct 2019 07:38:40 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:38 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="220968921" Received: from black.fi.intel.com ([10.237.72.28]) by fmsmga002.fm.intel.com with ESMTP; 01 Oct 2019 04:38:35 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id 57EF151B; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 11/22] thunderbolt: Convert DP adapter register names to follow the USB4 spec Date: Tue, 1 Oct 2019 14:38:19 +0300 Message-Id: <20191001113830.13028-12-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org Now that USB4 spec has names for these DP adapter registers we can use them instead. This makes it easier to match certain register to the spec. No functional changes. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/switch.c | 50 ++++++++++++++++++++--------------- drivers/thunderbolt/tb_regs.h | 32 ++++++++++------------ drivers/thunderbolt/tunnel.c | 8 +++--- 3 files changed, 46 insertions(+), 44 deletions(-) diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 2079e6065038..604cb3ef4985 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -961,11 +961,12 @@ int tb_dp_port_hpd_is_active(struct tb_port *port) u32 data; int ret; - ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 2, 1); + ret = tb_port_read(port, &data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_2, 1); if (ret) return ret; - return !!(data & TB_DP_HDP); + return !!(data & ADP_DP_CS_2_HDP); } /** @@ -979,12 +980,14 @@ int tb_dp_port_hpd_clear(struct tb_port *port) u32 data; int ret; - ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1); + ret = tb_port_read(port, &data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_3, 1); if (ret) return ret; - data |= TB_DP_HPDC; - return tb_port_write(port, &data, TB_CFG_PORT, port->cap_adap + 3, 1); + data |= ADP_DP_CS_3_HDPC; + return tb_port_write(port, &data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_3, 1); } /** @@ -1002,20 +1005,23 @@ int tb_dp_port_set_hops(struct tb_port *port, unsigned int video, u32 data[2]; int ret; - ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap, - ARRAY_SIZE(data)); + ret = tb_port_read(port, data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); if (ret) return ret; - data[0] &= ~TB_DP_VIDEO_HOPID_MASK; - data[1] &= ~(TB_DP_AUX_RX_HOPID_MASK | TB_DP_AUX_TX_HOPID_MASK); + data[0] &= ~ADP_DP_CS_0_VIDEO_HOPID_MASK; + data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; + data[1] &= ~ADP_DP_CS_1_AUX_RX_HOPID_MASK; - data[0] |= (video << TB_DP_VIDEO_HOPID_SHIFT) & TB_DP_VIDEO_HOPID_MASK; - data[1] |= aux_tx & TB_DP_AUX_TX_HOPID_MASK; - data[1] |= (aux_rx << TB_DP_AUX_RX_HOPID_SHIFT) & TB_DP_AUX_RX_HOPID_MASK; + data[0] |= (video << ADP_DP_CS_0_VIDEO_HOPID_SHIFT) & + ADP_DP_CS_0_VIDEO_HOPID_MASK; + data[1] |= aux_tx & ADP_DP_CS_1_AUX_TX_HOPID_MASK; + data[1] |= (aux_rx << ADP_DP_CS_1_AUX_RX_HOPID_SHIFT) & + ADP_DP_CS_1_AUX_RX_HOPID_MASK; - return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap, - ARRAY_SIZE(data)); + return tb_port_write(port, data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); } /** @@ -1026,11 +1032,11 @@ bool tb_dp_port_is_enabled(struct tb_port *port) { u32 data[2]; - if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap, + if (tb_port_read(port, data, TB_CFG_PORT, port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data))) return false; - return !!(data[0] & (TB_DP_VIDEO_EN | TB_DP_AUX_EN)); + return !!(data[0] & (ADP_DP_CS_0_VE | ADP_DP_CS_0_AE)); } /** @@ -1046,18 +1052,18 @@ int tb_dp_port_enable(struct tb_port *port, bool enable) u32 data[2]; int ret; - ret = tb_port_read(port, data, TB_CFG_PORT, port->cap_adap, - ARRAY_SIZE(data)); + ret = tb_port_read(port, data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); if (ret) return ret; if (enable) - data[0] |= TB_DP_VIDEO_EN | TB_DP_AUX_EN; + data[0] |= ADP_DP_CS_0_VE | ADP_DP_CS_0_AE; else - data[0] &= ~(TB_DP_VIDEO_EN | TB_DP_AUX_EN); + data[0] &= ~(ADP_DP_CS_0_VE | ADP_DP_CS_0_AE); - return tb_port_write(port, data, TB_CFG_PORT, port->cap_adap, - ARRAY_SIZE(data)); + return tb_port_write(port, data, TB_CFG_PORT, + port->cap_adap + ADP_DP_CS_0, ARRAY_SIZE(data)); } /* switch utility functions */ diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index cd03d160634c..3a39490a954b 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -238,24 +238,20 @@ struct tb_regs_port_header { #define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20 /* Display Port adapter registers */ - -/* DWORD 0 */ -#define TB_DP_VIDEO_HOPID_SHIFT 16 -#define TB_DP_VIDEO_HOPID_MASK GENMASK(26, 16) -#define TB_DP_AUX_EN BIT(30) -#define TB_DP_VIDEO_EN BIT(31) -/* DWORD 1 */ -#define TB_DP_AUX_TX_HOPID_MASK GENMASK(10, 0) -#define TB_DP_AUX_RX_HOPID_SHIFT 11 -#define TB_DP_AUX_RX_HOPID_MASK GENMASK(21, 11) -/* DWORD 2 */ -#define TB_DP_HDP BIT(6) -/* DWORD 3 */ -#define TB_DP_HPDC BIT(9) -/* DWORD 4 */ -#define TB_DP_LOCAL_CAP 0x4 -/* DWORD 5 */ -#define TB_DP_REMOTE_CAP 0x5 +#define ADP_DP_CS_0 0x00 +#define ADP_DP_CS_0_VIDEO_HOPID_MASK GENMASK(26, 16) +#define ADP_DP_CS_0_VIDEO_HOPID_SHIFT 16 +#define ADP_DP_CS_0_AE BIT(30) +#define ADP_DP_CS_0_VE BIT(31) +#define ADP_DP_CS_1_AUX_TX_HOPID_MASK GENMASK(10, 0) +#define ADP_DP_CS_1_AUX_RX_HOPID_MASK GENMASK(21, 11) +#define ADP_DP_CS_1_AUX_RX_HOPID_SHIFT 11 +#define ADP_DP_CS_2 0x02 +#define ADP_DP_CS_2_HDP BIT(6) +#define ADP_DP_CS_3 0x03 +#define ADP_DP_CS_3_HDPC BIT(9) +#define DP_LOCAL_CAP 0x04 +#define DP_REMOTE_CAP 0x05 /* PCIe adapter registers */ #define ADP_PCIE_CS_0 0x00 diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index c901fa488478..3353396e0806 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -258,23 +258,23 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) /* Read both DP_LOCAL_CAP registers */ ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT, - in->cap_adap + TB_DP_LOCAL_CAP, 1); + in->cap_adap + DP_LOCAL_CAP, 1); if (ret) return ret; ret = tb_port_read(out, &out_dp_cap, TB_CFG_PORT, - out->cap_adap + TB_DP_LOCAL_CAP, 1); + out->cap_adap + DP_LOCAL_CAP, 1); if (ret) return ret; /* Write IN local caps to OUT remote caps */ ret = tb_port_write(out, &in_dp_cap, TB_CFG_PORT, - out->cap_adap + TB_DP_REMOTE_CAP, 1); + out->cap_adap + DP_REMOTE_CAP, 1); if (ret) return ret; return tb_port_write(in, &out_dp_cap, TB_CFG_PORT, - in->cap_adap + TB_DP_REMOTE_CAP, 1); + in->cap_adap + DP_REMOTE_CAP, 1); } static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) From patchwork Tue Oct 1 11:38:20 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168515 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 3870A14DB for ; Tue, 1 Oct 2019 11:38:42 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 20A0D21A4A for ; Tue, 1 Oct 2019 11:38:42 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732627AbfJALik (ORCPT ); Tue, 1 Oct 2019 07:38:40 -0400 Received: from mga07.intel.com ([134.134.136.100]:49106 "EHLO mga07.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732572AbfJALik (ORCPT ); Tue, 1 Oct 2019 07:38:40 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:38 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="197830157" Received: from black.fi.intel.com ([10.237.72.28]) by FMSMGA003.fm.intel.com with ESMTP; 01 Oct 2019 04:38:35 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id 626D656A; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 12/22] thunderbolt: Add Display Port CM handshake for Titan Ridge devices Date: Tue, 1 Oct 2019 14:38:20 +0300 Message-Id: <20191001113830.13028-13-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org Titan Ridge devices and newer need an additional connection manager handshake in order to do proper Display Port tunneling so implement it here. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/tb_regs.h | 3 +++ drivers/thunderbolt/tunnel.c | 44 +++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 3a39490a954b..8d11b4a2d552 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -252,6 +252,9 @@ struct tb_regs_port_header { #define ADP_DP_CS_3_HDPC BIT(9) #define DP_LOCAL_CAP 0x04 #define DP_REMOTE_CAP 0x05 +#define DP_STATUS_CTRL 0x06 +#define DP_STATUS_CTRL_CMHS BIT(25) +#define DP_STATUS_CTRL_UF BIT(26) /* PCIe adapter registers */ #define ADP_PCIE_CS_0 0x00 diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 3353396e0806..369800110e5e 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -6,6 +6,7 @@ * Copyright (C) 2019, Intel Corporation */ +#include #include #include @@ -242,6 +243,41 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, return tunnel; } +static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out) +{ + int timeout = 10; + u32 val; + int ret; + + /* Both ends need to support this */ + if (!tb_switch_is_tr(in->sw) || !tb_switch_is_tr(out->sw)) + return 0; + + ret = tb_port_read(out, &val, TB_CFG_PORT, + out->cap_adap + DP_STATUS_CTRL, 1); + if (ret) + return ret; + + val |= DP_STATUS_CTRL_UF | DP_STATUS_CTRL_CMHS; + + ret = tb_port_write(out, &val, TB_CFG_PORT, + out->cap_adap + DP_STATUS_CTRL, 1); + if (ret) + return ret; + + do { + ret = tb_port_read(out, &val, TB_CFG_PORT, + out->cap_adap + DP_STATUS_CTRL, 1); + if (ret) + return ret; + if (!(val & DP_STATUS_CTRL_CMHS)) + return 0; + usleep_range(10, 100); + } while (timeout--); + + return -ETIMEDOUT; +} + static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) { struct tb_port *out = tunnel->dst_port; @@ -256,6 +292,14 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) if (in->sw->generation < 2 || out->sw->generation < 2) return 0; + /* + * Perform connection manager handshake between IN and OUT ports + * before capabilities exchange can take place. + */ + ret = tb_dp_cm_handshake(in, out); + if (ret) + return ret; + /* Read both DP_LOCAL_CAP registers */ ret = tb_port_read(in, &in_dp_cap, TB_CFG_PORT, in->cap_adap + DP_LOCAL_CAP, 1); From patchwork Tue Oct 1 11:38:21 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168539 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id E317914DB for ; Tue, 1 Oct 2019 11:39:43 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id AE12621A4C for ; Tue, 1 Oct 2019 11:39:43 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732600AbfJALjj (ORCPT ); Tue, 1 Oct 2019 07:39:39 -0400 Received: from mga14.intel.com ([192.55.52.115]:23131 "EHLO mga14.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732606AbfJALik (ORCPT ); Tue, 1 Oct 2019 07:38:40 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga006.jf.intel.com ([10.7.209.51]) by fmsmga103.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:39 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="195619888" Received: from black.fi.intel.com ([10.237.72.28]) by orsmga006.jf.intel.com with ESMTP; 01 Oct 2019 04:38:36 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id 6DE92570; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 13/22] thunderbolt: Add Display Port adapter pairing and resource management Date: Tue, 1 Oct 2019 14:38:21 +0300 Message-Id: <20191001113830.13028-14-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org To perform proper Display Port tunneling for Thunderbolt 3 devices we need to allocate DP resources for DP IN port before they can be used. The reason for this is that the user can also connect a monitor directly to the Type-C ports in which case the Thunderbolt controller acts as re-driver for Display Port (no tunneling takes place) taking the DP sinks away from the connection manager. This allocation is done using special sink allocation registers available through the link controller. We can pair DP IN to DP OUT only if * DP IN has sink allocated via link controller * DP OUT port receives hotplug event For DP IN adapters (only for the root switch) we first query whether there is DP resource available (it may be the previous instance of the driver for example already allocated it) and if it is we add it to the list. We then update the list when after each plug/unplug event to a DP IN/OUT adapter. Each time the list is updated we try to find additional DP IN <-> DP OUT pairs for tunnel establishment. This strategy also makes it possible to establish another tunnel in case there are 3 monitors connected and one gets unplugged releasing the DP IN adapter for the new tunnel. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/lc.c | 161 +++++++++++++++++++++++++++ drivers/thunderbolt/switch.c | 44 ++++++++ drivers/thunderbolt/tb.c | 203 ++++++++++++++++++++++++++++------ drivers/thunderbolt/tb.h | 9 ++ drivers/thunderbolt/tb_regs.h | 6 + 5 files changed, 389 insertions(+), 34 deletions(-) diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c index df56523eb822..13a5e39b0043 100644 --- a/drivers/thunderbolt/lc.c +++ b/drivers/thunderbolt/lc.c @@ -205,3 +205,164 @@ bool tb_lc_lane_bonding_possible(struct tb_switch *sw) return !!(val & TB_LC_PORT_ATTR_BE); } + +static int tb_lc_dp_sink_from_port(const struct tb_switch *sw, + struct tb_port *in) +{ + int i; + + /* The first DP IN port is sink 0 and second is sink 1 */ + tb_switch_for_each_port(sw, i) { + if (tb_port_is_dpin(&sw->ports[i])) + return in != &sw->ports[i]; + } + + return -EINVAL; +} + +static int tb_lc_dp_sink_available(struct tb_switch *sw, int sink) +{ + u32 val, alloc; + int ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_lc + TB_LC_SNK_ALLOCATION, 1); + if (ret) + return ret; + + /* + * Sink is available for CM/SW to use if the allocation valie is + * either 0 or 1. + */ + if (!sink) { + alloc = val & TB_LC_SNK_ALLOCATION_SNK0_MASK; + if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK0_CM) + return 0; + } else { + alloc = (val & TB_LC_SNK_ALLOCATION_SNK1_MASK) >> + TB_LC_SNK_ALLOCATION_SNK1_SHIFT; + if (!alloc || alloc == TB_LC_SNK_ALLOCATION_SNK1_CM) + return 0; + } + + return -EBUSY; +} + +/** + * tb_lc_dp_sink_query() - Is DP sink available for DP IN port + * @sw: Switch whose DP sink is queried + * @in: DP IN port to check + * + * Queries through LC SNK_ALLOCATION registers whether DP sink is available + * for the given DP IN port or not. + */ +bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in) +{ + int sink; + + /* + * For older generations sink is always available as there is no + * allocation mechanism. + */ + if (sw->generation < 3) + return true; + + sink = tb_lc_dp_sink_from_port(sw, in); + if (sink < 0) + return false; + + return !tb_lc_dp_sink_available(sw, sink); +} + +/** + * tb_lc_dp_sink_alloc() - Allocate DP sink + * @sw: Switch whose DP sink is allocated + * @in: DP IN port the DP sink is allocated for + * + * Allocate DP sink for @in via LC SNK_ALLOCATION registers. If the + * resource is available and allocation is successful returns %0. In all + * other cases returs negative errno. In particular %-EBUSY is returned if + * the resource was not available. + */ +int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in) +{ + int ret, sink; + u32 val; + + if (sw->generation < 3) + return 0; + + sink = tb_lc_dp_sink_from_port(sw, in); + if (sink < 0) + return sink; + + ret = tb_lc_dp_sink_available(sw, sink); + if (ret) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_lc + TB_LC_SNK_ALLOCATION, 1); + if (ret) + return ret; + + if (!sink) { + val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK; + val |= TB_LC_SNK_ALLOCATION_SNK0_CM; + } else { + val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK; + val |= TB_LC_SNK_ALLOCATION_SNK1_CM << + TB_LC_SNK_ALLOCATION_SNK1_SHIFT; + } + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->cap_lc + TB_LC_SNK_ALLOCATION, 1); + + if (ret) + return ret; + + tb_port_dbg(in, "sink %d allocated\n", sink); + return 0; +} + +/** + * tb_lc_dp_sink_dealloc() - De-allocate DP sink + * @sw: Switch whose DP sink is de-allocated + * @in: DP IN port whose DP sink is de-allocated + * + * De-allocate DP sink from @in using LC SNK_ALLOCATION registers. + */ +int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in) +{ + int ret, sink; + u32 val; + + if (sw->generation < 3) + return 0; + + sink = tb_lc_dp_sink_from_port(sw, in); + if (sink < 0) + return sink; + + /* Needs to be owned by CM/SW */ + ret = tb_lc_dp_sink_available(sw, sink); + if (ret) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->cap_lc + TB_LC_SNK_ALLOCATION, 1); + if (ret) + return ret; + + if (!sink) + val &= ~TB_LC_SNK_ALLOCATION_SNK0_MASK; + else + val &= ~TB_LC_SNK_ALLOCATION_SNK1_MASK; + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->cap_lc + TB_LC_SNK_ALLOCATION, 1); + if (ret) + return ret; + + tb_port_dbg(in, "sink %d de-allocated\n", sink); + return 0; +} diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 604cb3ef4985..87c74c916a7c 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -645,6 +645,7 @@ static int tb_init_port(struct tb_port *port) ida_init(&port->out_hopids); } + INIT_LIST_HEAD(&port->list); return 0; } @@ -2292,6 +2293,49 @@ void tb_switch_suspend(struct tb_switch *sw) tb_lc_set_sleep(sw); } +/** + * tb_switch_query_dp_resource() - Query availability of DP resource + * @sw: Switch whose DP resource is queried + * @in: DP IN port + * + * Queries availability of DP resource for DP tunneling using switch + * specific means. Returns %true if resource is available. + */ +bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + return tb_lc_dp_sink_query(sw, in); +} + +/** + * tb_switch_alloc_dp_resource() - Allocate available DP resource + * @sw: Switch whose DP resource is allocated + * @in: DP IN port + * + * Allocates DP resource for DP tunneling. The resource must be + * available for this to succeed (see tb_switch_query_dp_resource()). + * Returns %0 in success and negative errno otherwise. + */ +int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + return tb_lc_dp_sink_alloc(sw, in); +} + +/** + * tb_switch_dealloc_dp_resource() - De-allocate DP resource + * @sw: Switch whose DP resource is de-allocated + * @in: DP IN port + * + * De-allocates DP resource that was previously allocated for DP + * tunneling. + */ +void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + if (tb_lc_dp_sink_dealloc(sw, in)) { + tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", + in->port); + } +} + struct tb_sw_lookup { struct tb *tb; u8 link; diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 704455a4f763..5b457874e51a 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -18,6 +18,7 @@ /** * struct tb_cm - Simple Thunderbolt connection manager * @tunnel_list: List of active tunnels + * @dp_resources: List of available DP resources for DP tunneling * @hotplug_active: tb_handle_hotplug will stop progressing plug * events and exit if this is not set (it needs to * acquire the lock one more time). Used to drain wq @@ -25,6 +26,7 @@ */ struct tb_cm { struct list_head tunnel_list; + struct list_head dp_resources; bool hotplug_active; }; @@ -56,6 +58,44 @@ static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) /* enumeration & hot plug handling */ +static void tb_add_dp_resources(struct tb_switch *sw) +{ + struct tb_cm *tcm = tb_priv(sw->tb); + struct tb_port *port; + int i; + + tb_switch_for_each_port(sw, i) { + port = &sw->ports[i]; + + if (!tb_port_is_dpin(port)) + continue; + + if (!tb_switch_query_dp_resource(sw, port)) + continue; + + list_add_tail(&port->list, &tcm->dp_resources); + tb_port_dbg(port, "DP IN resource available\n"); + } +} + +static void tb_remove_dp_resources(struct tb_switch *sw) +{ + struct tb_cm *tcm = tb_priv(sw->tb); + struct tb_port *port, *tmp; + int i; + + /* Clear children resources first */ + tb_switch_for_each_remote_port(sw, i) + tb_remove_dp_resources(sw->ports[i].remote->sw); + + list_for_each_entry_safe(port, tmp, &tcm->dp_resources, list) { + if (port->sw == sw) { + tb_port_dbg(port, "DP OUT resource unavailable\n"); + list_del_init(&port->list); + } + } +} + static void tb_discover_tunnels(struct tb_switch *sw) { struct tb *tb = sw->tb; @@ -226,8 +266,9 @@ static void tb_scan_port(struct tb_port *port) tb_scan_switch(sw); } -static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type, - struct tb_port *src_port, struct tb_port *dst_port) +static struct tb_tunnel *tb_find_tunnel(struct tb *tb, enum tb_tunnel_type type, + struct tb_port *src_port, + struct tb_port *dst_port) { struct tb_cm *tcm = tb_priv(tb); struct tb_tunnel *tunnel; @@ -236,14 +277,32 @@ static int tb_free_tunnel(struct tb *tb, enum tb_tunnel_type type, if (tunnel->type == type && ((src_port && src_port == tunnel->src_port) || (dst_port && dst_port == tunnel->dst_port))) { - tb_tunnel_deactivate(tunnel); - list_del(&tunnel->list); - tb_tunnel_free(tunnel); - return 0; + return tunnel; } } - return -ENODEV; + return NULL; +} + +static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) +{ + if (!tunnel) + return; + + tb_tunnel_deactivate(tunnel); + list_del(&tunnel->list); + + /* + * In case of DP tunnel make sure the DP IN resource is deallocated + * properly. + */ + if (tb_tunnel_is_dp(tunnel)) { + struct tb_port *in = tunnel->src_port; + + tb_switch_dealloc_dp_resource(in->sw, in); + } + + tb_tunnel_free(tunnel); } /** @@ -256,11 +315,8 @@ static void tb_free_invalid_tunnels(struct tb *tb) struct tb_tunnel *n; list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { - if (tb_tunnel_is_invalid(tunnel)) { - tb_tunnel_deactivate(tunnel); - list_del(&tunnel->list); - tb_tunnel_free(tunnel); - } + if (tb_tunnel_is_invalid(tunnel)) + tb_deactivate_and_free_tunnel(tunnel); } } @@ -275,6 +331,7 @@ static void tb_free_unplugged_children(struct tb_switch *sw) struct tb_port *port = &sw->ports[i]; if (port->remote->sw->is_unplugged) { + tb_remove_dp_resources(port->remote->sw); tb_switch_lane_bonding_disable(port->remote->sw); tb_switch_remove(port->remote->sw); port->remote = NULL; @@ -368,42 +425,112 @@ static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); } -static int tb_tunnel_dp(struct tb *tb, struct tb_port *out) +static void tb_tunnel_dp(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); - struct tb_switch *sw = out->sw; + struct tb_port *port, *in, *out; struct tb_tunnel *tunnel; - struct tb_port *in; - if (tb_port_is_enabled(out)) - return 0; + /* + * Find pair of inactive DP IN and DP OUT adapters and then + * establish a DP tunnel between them. + */ + tb_dbg(tb, "looking for DP IN <-> DP OUT pairs:\n"); + + in = NULL; + out = NULL; + list_for_each_entry(port, &tcm->dp_resources, list) { + if (tb_port_is_enabled(port)) { + tb_port_dbg(port, "in use\n"); + continue; + } + + tb_port_dbg(port, "available\n"); + + if (!in && tb_port_is_dpin(port)) + in = port; + else if (!out && tb_port_is_dpout(port)) + out = port; + } + + if (!in) { + tb_dbg(tb, "no suitable DP IN adapter available, not tunneling\n"); + return; + } + if (!out) { + tb_dbg(tb, "no suitable DP OUT adapter available, not tunneling\n"); + return; + } - do { - sw = tb_to_switch(sw->dev.parent); - if (!sw) - return 0; - in = tb_find_unused_port(sw, TB_TYPE_DP_HDMI_IN); - } while (!in); + if (tb_switch_alloc_dp_resource(in->sw, in)) { + tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); + return; + } tunnel = tb_tunnel_alloc_dp(tb, in, out); if (!tunnel) { - tb_port_dbg(out, "DP tunnel allocation failed\n"); - return -ENOMEM; + tb_port_dbg(out, "could not allocate DP tunnel\n"); + goto dealloc_dp; } if (tb_tunnel_activate(tunnel)) { tb_port_info(out, "DP tunnel activation failed, aborting\n"); tb_tunnel_free(tunnel); - return -EIO; + goto dealloc_dp; } list_add_tail(&tunnel->list, &tcm->tunnel_list); - return 0; + return; + +dealloc_dp: + tb_switch_dealloc_dp_resource(in->sw, in); } -static void tb_teardown_dp(struct tb *tb, struct tb_port *out) +static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) { - tb_free_tunnel(tb, TB_TUNNEL_DP, NULL, out); + struct tb_port *in, *out; + struct tb_tunnel *tunnel; + + if (tb_port_is_dpin(port)) { + tb_port_dbg(port, "DP IN resource unavailable\n"); + in = port; + out = NULL; + } else { + tb_port_dbg(port, "DP OUT resource unavailable\n"); + in = NULL; + out = port; + } + + tunnel = tb_find_tunnel(tb, TB_TUNNEL_DP, in, out); + tb_deactivate_and_free_tunnel(tunnel); + list_del_init(&port->list); + + /* + * See if there is another DP OUT port that can be used for + * to create another tunnel. + */ + tb_tunnel_dp(tb); +} + +static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_port *p; + + if (tb_port_is_enabled(port)) + return; + + list_for_each_entry(p, &tcm->dp_resources, list) { + if (p == port) + return; + } + + tb_port_dbg(port, "DP %s resource available\n", + tb_port_is_dpin(port) ? "IN" : "OUT"); + list_add_tail(&port->list, &tcm->dp_resources); + + /* Look for suitable DP IN <-> DP OUT pairs now */ + tb_tunnel_dp(tb); } static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) @@ -478,6 +605,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) { struct tb_port *dst_port; + struct tb_tunnel *tunnel; struct tb_switch *sw; sw = tb_to_switch(xd->dev.parent); @@ -488,7 +616,8 @@ static void __tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) * case of cable disconnect) so it is fine if we cannot find it * here anymore. */ - tb_free_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port); + tunnel = tb_find_tunnel(tb, TB_TUNNEL_DMA, NULL, dst_port); + tb_deactivate_and_free_tunnel(tunnel); } static int tb_disconnect_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) @@ -543,11 +672,14 @@ static void tb_handle_hotplug(struct work_struct *work) tb_port_dbg(port, "switch unplugged\n"); tb_sw_set_unplugged(port->remote->sw); tb_free_invalid_tunnels(tb); + tb_remove_dp_resources(port->remote->sw); tb_switch_lane_bonding_disable(port->remote->sw); tb_switch_remove(port->remote->sw); port->remote = NULL; if (port->dual_link_port) port->dual_link_port->remote = NULL; + /* Maybe we can create another DP tunnel */ + tb_tunnel_dp(tb); } else if (port->xdomain) { struct tb_xdomain *xd = tb_xdomain_get(port->xdomain); @@ -564,8 +696,8 @@ static void tb_handle_hotplug(struct work_struct *work) port->xdomain = NULL; __tb_disconnect_xdomain_paths(tb, xd); tb_xdomain_put(xd); - } else if (tb_port_is_dpout(port)) { - tb_teardown_dp(tb, port); + } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { + tb_dp_resource_unavailable(tb, port); } else { tb_port_dbg(port, "got unplug event for disconnected port, ignoring\n"); @@ -578,8 +710,8 @@ static void tb_handle_hotplug(struct work_struct *work) tb_scan_port(port); if (!port->remote) tb_port_dbg(port, "hotplug: no switch found\n"); - } else if (tb_port_is_dpout(port)) { - tb_tunnel_dp(tb, port); + } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { + tb_dp_resource_available(tb, port); } } @@ -692,6 +824,8 @@ static int tb_start(struct tb *tb) tb_scan_switch(tb->root_switch); /* Find out tunnels created by the boot firmware */ tb_discover_tunnels(tb->root_switch); + /* Add DP IN resources for the root switch */ + tb_add_dp_resources(tb->root_switch); /* Make the discovered switches available to the userspace */ device_for_each_child(&tb->root_switch->dev, NULL, tb_scan_finalize_switch); @@ -821,6 +955,7 @@ struct tb *tb_probe(struct tb_nhi *nhi) tcm = tb_priv(tb); INIT_LIST_HEAD(&tcm->tunnel_list); + INIT_LIST_HEAD(&tcm->dp_resources); return tb; } diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index dbab06551eaa..48f3725249a9 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -137,6 +137,7 @@ struct tb_switch { * @link_nr: Is this primary or secondary port on the dual_link. * @in_hopids: Currently allocated input HopIDs * @out_hopids: Currently allocated output HopIDs + * @list: Used to link ports to DP resources list */ struct tb_port { struct tb_regs_port_header config; @@ -152,6 +153,7 @@ struct tb_port { u8 link_nr:1; struct ida in_hopids; struct ida out_hopids; + struct list_head list; }; /** @@ -674,6 +676,10 @@ static inline bool tb_switch_is_icm(const struct tb_switch *sw) int tb_switch_lane_bonding_enable(struct tb_switch *sw); void tb_switch_lane_bonding_disable(struct tb_switch *sw); +bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); +int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); +void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); + int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); int tb_port_add_nfc_credits(struct tb_port *port, int credits); int tb_port_set_initial_credits(struct tb_port *port, u32 credits); @@ -718,6 +724,9 @@ int tb_lc_configure_link(struct tb_switch *sw); void tb_lc_unconfigure_link(struct tb_switch *sw); int tb_lc_set_sleep(struct tb_switch *sw); bool tb_lc_lane_bonding_possible(struct tb_switch *sw); +bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in); +int tb_lc_dp_sink_alloc(struct tb_switch *sw, struct tb_port *in); +int tb_lc_dp_sink_dealloc(struct tb_switch *sw, struct tb_port *in); static inline int tb_route_length(u64 route) { diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 8d11b4a2d552..aec35e61cc14 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -295,6 +295,12 @@ struct tb_regs_hop { #define TB_LC_DESC_PORT_SIZE_SHIFT 16 #define TB_LC_DESC_PORT_SIZE_MASK GENMASK(27, 16) #define TB_LC_FUSE 0x03 +#define TB_LC_SNK_ALLOCATION 0x10 +#define TB_LC_SNK_ALLOCATION_SNK0_MASK GENMASK(3, 0) +#define TB_LC_SNK_ALLOCATION_SNK0_CM 0x1 +#define TB_LC_SNK_ALLOCATION_SNK1_SHIFT 4 +#define TB_LC_SNK_ALLOCATION_SNK1_MASK GENMASK(7, 4) +#define TB_LC_SNK_ALLOCATION_SNK1_CM 0x1 /* Link controller registers */ #define TB_LC_PORT_ATTR 0x8d From patchwork Tue Oct 1 11:38:22 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168529 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 8E34914DB for ; Tue, 1 Oct 2019 11:39:21 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 5935F21906 for ; Tue, 1 Oct 2019 11:39:21 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732641AbfJALil (ORCPT ); Tue, 1 Oct 2019 07:38:41 -0400 Received: from mga12.intel.com ([192.55.52.136]:17384 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732601AbfJALik (ORCPT ); Tue, 1 Oct 2019 07:38:40 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga003.jf.intel.com ([10.7.209.27]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:39 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="194502906" Received: from black.fi.intel.com ([10.237.72.28]) by orsmga003.jf.intel.com with ESMTP; 01 Oct 2019 04:38:36 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id 78D80587; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 14/22] thunderbolt: Add bandwidth management for Display Port tunnels Date: Tue, 1 Oct 2019 14:38:22 +0300 Message-Id: <20191001113830.13028-15-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org Thunderbolt 3 devices and especially Titan Ridge supports Display Port 1.4 which adds HBR3 (High Bit Rate) rates that may be up to 8.1 Gb/s over 4 lanes. This translates to effective data bandwidth of 25.92 Gb/s (as 8/10 encoding is removed by the DP adapters when going over Thunderbolt fabric). If another high rate monitor is connected we may need to reduce the bandwidth it consumes so that it fits into the total 40 Gb/s available on the Thunderbolt fabric. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/path.c | 22 +++ drivers/thunderbolt/tb.c | 52 ++++++- drivers/thunderbolt/tb.h | 2 + drivers/thunderbolt/tb_regs.h | 17 +++ drivers/thunderbolt/tunnel.c | 272 +++++++++++++++++++++++++++++++++- drivers/thunderbolt/tunnel.h | 10 +- 6 files changed, 371 insertions(+), 4 deletions(-) diff --git a/drivers/thunderbolt/path.c b/drivers/thunderbolt/path.c index 6cf66597d5d8..ad58559ea88e 100644 --- a/drivers/thunderbolt/path.c +++ b/drivers/thunderbolt/path.c @@ -557,3 +557,25 @@ bool tb_path_is_invalid(struct tb_path *path) } return false; } + +/** + * tb_path_switch_on_path() - Does the path go through certain switch + * @path: Path to check + * @sw: Switch to check + * + * Goes over all hops on path and checks if @sw is any of them. + * Direction does not matter. + */ +bool tb_path_switch_on_path(const struct tb_path *path, + const struct tb_switch *sw) +{ + int i; + + for (i = 0; i < path->path_length; i++) { + if (path->hops[i].in_port->sw == sw || + path->hops[i].out_port->sw == sw) + return true; + } + + return false; +} diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 5b457874e51a..a3b7a18dc6d9 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -425,11 +425,51 @@ static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, return tb_find_unused_port(sw, TB_TYPE_PCIE_DOWN); } +static int tb_available_bw(struct tb_cm *tcm, struct tb_port *in, + struct tb_port *out) +{ + struct tb_switch *sw = out->sw; + struct tb_tunnel *tunnel; + int bw, available_bw = 40000; + + while (sw && sw != in->sw) { + bw = sw->link_speed * sw->link_width * 1000; /* Mb/s */ + /* Leave 10% guard band */ + bw -= bw / 10; + + /* + * Check for any active DP tunnels that go through this + * switch and reduce their consumed bandwidth from + * available. + */ + list_for_each_entry(tunnel, &tcm->tunnel_list, list) { + int consumed_bw; + + if (!tb_tunnel_switch_on_path(tunnel, sw)) + continue; + + consumed_bw = tb_tunnel_consumed_bandwidth(tunnel); + if (consumed_bw < 0) + return consumed_bw; + + bw -= consumed_bw; + } + + if (bw < available_bw) + available_bw = bw; + + sw = tb_switch_parent(sw); + } + + return available_bw; +} + static void tb_tunnel_dp(struct tb *tb) { struct tb_cm *tcm = tb_priv(tb); struct tb_port *port, *in, *out; struct tb_tunnel *tunnel; + int available_bw; /* * Find pair of inactive DP IN and DP OUT adapters and then @@ -467,7 +507,17 @@ static void tb_tunnel_dp(struct tb *tb) return; } - tunnel = tb_tunnel_alloc_dp(tb, in, out); + /* Calculate available bandwidth between in and out */ + available_bw = tb_available_bw(tcm, in, out); + if (available_bw < 0) { + tb_warn(tb, "failed to determine available bandwidth\n"); + return; + } + + tb_dbg(tb, "available bandwidth for new DP tunnel %u Mb/s\n", + available_bw); + + tunnel = tb_tunnel_alloc_dp(tb, in, out, available_bw); if (!tunnel) { tb_port_dbg(out, "could not allocate DP tunnel\n"); goto dealloc_dp; diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 48f3725249a9..7a8cc56a2870 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -715,6 +715,8 @@ void tb_path_free(struct tb_path *path); int tb_path_activate(struct tb_path *path); void tb_path_deactivate(struct tb_path *path); bool tb_path_is_invalid(struct tb_path *path); +bool tb_path_switch_on_path(const struct tb_path *path, + const struct tb_switch *sw); int tb_drom_read(struct tb_switch *sw); int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid); diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index aec35e61cc14..7ee45b73c7f7 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -255,6 +255,23 @@ struct tb_regs_port_header { #define DP_STATUS_CTRL 0x06 #define DP_STATUS_CTRL_CMHS BIT(25) #define DP_STATUS_CTRL_UF BIT(26) +#define DP_COMMON_CAP 0x07 +/* + * DP_COMMON_CAP offsets work also for DP_LOCAL_CAP and DP_REMOTE_CAP + * with exception of DPRX done. + */ +#define DP_COMMON_CAP_RATE_MASK GENMASK(11, 8) +#define DP_COMMON_CAP_RATE_SHIFT 8 +#define DP_COMMON_CAP_RATE_RBR 0x0 +#define DP_COMMON_CAP_RATE_HBR 0x1 +#define DP_COMMON_CAP_RATE_HBR2 0x2 +#define DP_COMMON_CAP_RATE_HBR3 0x3 +#define DP_COMMON_CAP_LANES_MASK GENMASK(14, 12) +#define DP_COMMON_CAP_LANES_SHIFT 12 +#define DP_COMMON_CAP_1_LANE 0x0 +#define DP_COMMON_CAP_2_LANES 0x1 +#define DP_COMMON_CAP_4_LANES 0x2 +#define DP_COMMON_CAP_DPRX_DONE BIT(31) /* PCIe adapter registers */ #define ADP_PCIE_CS_0 0x00 diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 369800110e5e..4ef5bc8f912b 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -278,11 +278,133 @@ static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out) return -ETIMEDOUT; } +static inline u32 tb_dp_cap_get_rate(u32 val) +{ + u32 rate = (val & DP_COMMON_CAP_RATE_MASK) >> DP_COMMON_CAP_RATE_SHIFT; + + switch (rate) { + case DP_COMMON_CAP_RATE_RBR: + return 1620; + case DP_COMMON_CAP_RATE_HBR: + return 2700; + case DP_COMMON_CAP_RATE_HBR2: + return 5400; + case DP_COMMON_CAP_RATE_HBR3: + return 8100; + default: + return 0; + } +} + +static inline u32 tb_dp_cap_set_rate(u32 val, u32 rate) +{ + val &= ~DP_COMMON_CAP_RATE_MASK; + switch (rate) { + default: + WARN(1, "invalid rate %u passed, defaulting to 1620 MB/s\n", rate); + /* Fallthrough */ + case 1620: + val |= DP_COMMON_CAP_RATE_RBR << DP_COMMON_CAP_RATE_SHIFT; + break; + case 2700: + val |= DP_COMMON_CAP_RATE_HBR << DP_COMMON_CAP_RATE_SHIFT; + break; + case 5400: + val |= DP_COMMON_CAP_RATE_HBR2 << DP_COMMON_CAP_RATE_SHIFT; + break; + case 8100: + val |= DP_COMMON_CAP_RATE_HBR3 << DP_COMMON_CAP_RATE_SHIFT; + break; + } + return val; +} + +static inline u32 tb_dp_cap_get_lanes(u32 val) +{ + u32 lanes = (val & DP_COMMON_CAP_LANES_MASK) >> DP_COMMON_CAP_LANES_SHIFT; + + switch (lanes) { + case DP_COMMON_CAP_1_LANE: + return 1; + case DP_COMMON_CAP_2_LANES: + return 2; + case DP_COMMON_CAP_4_LANES: + return 4; + default: + return 0; + } +} + +static inline u32 tb_dp_cap_set_lanes(u32 val, u32 lanes) +{ + val &= ~DP_COMMON_CAP_LANES_MASK; + switch (lanes) { + default: + WARN(1, "invalid number of lanes %u passed, defaulting to 1\n", + lanes); + /* Fallthrough */ + case 1: + val |= DP_COMMON_CAP_1_LANE << DP_COMMON_CAP_LANES_SHIFT; + break; + case 2: + val |= DP_COMMON_CAP_2_LANES << DP_COMMON_CAP_LANES_SHIFT; + break; + case 4: + val |= DP_COMMON_CAP_4_LANES << DP_COMMON_CAP_LANES_SHIFT; + break; + } + return val; +} + +static unsigned int tb_dp_bandwidth(unsigned int rate, unsigned int lanes) +{ + /* Tunneling removes the DP 8b/10b encoding */ + return rate * lanes * 8 / 10; +} + +static int tb_dp_reduce_bandwidth(int max_bw, u32 rate, u32 lanes, + u32 *new_rate, u32 *new_lanes) +{ + static const u32 dp_bw[][2] = { + /* Mb/s, lanes */ + { 8100, 4 }, /* 25920 Mb/s */ + { 5400, 4 }, /* 17280 Mb/s */ + { 8100, 2 }, /* 12960 Mb/s */ + { 2700, 4 }, /* 8640 Mb/s */ + { 5400, 2 }, /* 8640 Mb/s */ + { 8100, 1 }, /* 6480 Mb/s */ + { 1620, 4 }, /* 5184 Mb/s */ + { 5400, 1 }, /* 4320 Mb/s */ + { 2700, 2 }, /* 4320 Mb/s */ + { 1620, 2 }, /* 2592 Mb/s */ + { 2700, 1 }, /* 2160 Mb/s */ + { 1620, 1 }, /* 1296 Mb/s */ + }; + int i; + + /* + * Find a combination that can fit into max_bw and does not + * exceed the maximum rate and lanes supporteed by the DP OUT + * adapter. + */ + for (i = 0; i < ARRAY_SIZE(dp_bw); i++) { + if (dp_bw[i][0] > rate || dp_bw[i][1] > lanes) + continue; + if (tb_dp_bandwidth(dp_bw[i][0], dp_bw[i][1]) <= max_bw) { + *new_rate = dp_bw[i][0]; + *new_lanes = dp_bw[i][1]; + return 0; + } + } + + return -ENOSR; +} + static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) { + u32 in_dp_cap, out_dp_cap, rate, lanes, bw; struct tb_port *out = tunnel->dst_port; struct tb_port *in = tunnel->src_port; - u32 in_dp_cap, out_dp_cap; int ret; /* @@ -317,6 +439,39 @@ static int tb_dp_xchg_caps(struct tb_tunnel *tunnel) if (ret) return ret; + /* + * If the tunnel bandwidth is limited (max_bw is set) then see + * if we need to reduce bandwidth to fit there. + */ + rate = tb_dp_cap_get_rate(out_dp_cap); + lanes = tb_dp_cap_get_lanes(out_dp_cap); + bw = tb_dp_bandwidth(rate, lanes); + + tb_port_dbg(out, "maximum supported bandwidth %u Mb/s x%u = %u Mb/s\n", + rate, lanes, bw); + + if (tunnel->max_bw && bw > tunnel->max_bw) { + u32 new_rate, new_lanes, new_bw; + + ret = tb_dp_reduce_bandwidth(tunnel->max_bw, rate, lanes, + &new_rate, &new_lanes); + if (ret) { + tb_port_info(out, "not enough bandwidth for DP tunnel\n"); + return ret; + } + + new_bw = tb_dp_bandwidth(new_rate, new_lanes); + tb_port_dbg(out, "bandwidth reduced to %u Mb/s x%u = %u Mb/s\n", + new_rate, new_lanes, new_bw); + + /* + * Set new rate and number of lanes before writing it to + * the IN port remote caps. + */ + out_dp_cap = tb_dp_cap_set_rate(out_dp_cap, new_rate); + out_dp_cap = tb_dp_cap_set_lanes(out_dp_cap, new_lanes); + } + return tb_port_write(in, &out_dp_cap, TB_CFG_PORT, in->cap_adap + DP_REMOTE_CAP, 1); } @@ -358,6 +513,56 @@ static int tb_dp_activate(struct tb_tunnel *tunnel, bool active) return 0; } +static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel) +{ + struct tb_port *in = tunnel->src_port; + const struct tb_switch *sw = in->sw; + u32 val, rate = 0, lanes = 0; + int ret; + + if (tb_switch_is_tr(sw)) { + int timeout = 10; + + /* + * Wait for DPRX done. Normally it should be already set + * for active tunnel. + */ + do { + ret = tb_port_read(in, &val, TB_CFG_PORT, + in->cap_adap + DP_COMMON_CAP, 1); + if (ret) + return ret; + + if (val & DP_COMMON_CAP_DPRX_DONE) { + rate = tb_dp_cap_get_rate(val); + lanes = tb_dp_cap_get_lanes(val); + break; + } + msleep(250); + } while (timeout--); + + if (!timeout) + return -ETIMEDOUT; + } else if (sw->generation >= 2) { + /* + * Read from the copied remote cap so that we take into + * account if capabilities were reduced during exchange. + */ + ret = tb_port_read(in, &val, TB_CFG_PORT, + in->cap_adap + DP_REMOTE_CAP, 1); + if (ret) + return ret; + + rate = tb_dp_cap_get_rate(val); + lanes = tb_dp_cap_get_lanes(val); + } else { + /* No bandwidth management for legacy devices */ + return 0; + } + + return tb_dp_bandwidth(rate, lanes); +} + static void tb_dp_init_aux_path(struct tb_path *path) { int i; @@ -422,6 +627,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in) tunnel->init = tb_dp_xchg_caps; tunnel->activate = tb_dp_activate; + tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; tunnel->src_port = in; path = tb_path_discover(in, TB_DP_VIDEO_HOPID, NULL, -1, @@ -480,6 +686,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in) * @tb: Pointer to the domain structure * @in: DP in adapter port * @out: DP out adapter port + * @max_bw: Maximum available bandwidth for the DP tunnel (%0 if not limited) * * Allocates a tunnel between @in and @out that is capable of tunneling * Display Port traffic. @@ -487,7 +694,7 @@ struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in) * Return: Returns a tb_tunnel on success or NULL on failure. */ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, - struct tb_port *out) + struct tb_port *out, int max_bw) { struct tb_tunnel *tunnel; struct tb_path **paths; @@ -502,8 +709,10 @@ struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, tunnel->init = tb_dp_xchg_caps; tunnel->activate = tb_dp_activate; + tunnel->consumed_bandwidth = tb_dp_consumed_bandwidth; tunnel->src_port = in; tunnel->dst_port = out; + tunnel->max_bw = max_bw; paths = tunnel->paths; @@ -750,3 +959,62 @@ void tb_tunnel_deactivate(struct tb_tunnel *tunnel) tb_path_deactivate(tunnel->paths[i]); } } + +/** + * tb_tunnel_switch_on_path() - Does the tunnel go through switch + * @tunnel: Tunnel to check + * @sw: Switch to check + * + * Returns true if @tunnel goes through @sw (direction does not matter), + * false otherwise. + */ +bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel, + const struct tb_switch *sw) +{ + int i; + + for (i = 0; i < tunnel->npaths; i++) { + if (!tunnel->paths[i]) + continue; + if (tb_path_switch_on_path(tunnel->paths[i], sw)) + return true; + } + + return false; +} + +static bool tb_tunnel_is_active(const struct tb_tunnel *tunnel) +{ + int i; + + for (i = 0; i < tunnel->npaths; i++) { + if (!tunnel->paths[i]) + return false; + if (!tunnel->paths[i]->activated) + return false; + } + + return true; +} + +/** + * tb_tunnel_consumed_bandwidth() - Return bandwidth consumed by the tunnel + * @tunnel: Tunnel to check + * + * Returns bandwidth currently consumed by @tunnel and %0 if the @tunnel + * is not active or does consume bandwidth. + */ +int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel) +{ + if (!tb_tunnel_is_active(tunnel)) + return 0; + + if (tunnel->consumed_bandwidth) { + int ret = tunnel->consumed_bandwidth(tunnel); + + tb_tunnel_dbg(tunnel, "consumed bandwidth %d Mb/s\n", ret); + return ret; + } + + return 0; +} diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h index c68bbcd3a62c..ba888da005f5 100644 --- a/drivers/thunderbolt/tunnel.h +++ b/drivers/thunderbolt/tunnel.h @@ -27,8 +27,11 @@ enum tb_tunnel_type { * @npaths: Number of paths in @paths * @init: Optional tunnel specific initialization * @activate: Optional tunnel specific activation/deactivation + * @consumed_bandwidth: Return how much bandwidth the tunnel consumes * @list: Tunnels are linked using this field * @type: Type of the tunnel + * @max_bw: Maximum bandwidth (Mb/s) available for the tunnel (only for DP). + * Only set if the bandwidth needs to be limited. */ struct tb_tunnel { struct tb *tb; @@ -38,8 +41,10 @@ struct tb_tunnel { size_t npaths; int (*init)(struct tb_tunnel *tunnel); int (*activate)(struct tb_tunnel *tunnel, bool activate); + int (*consumed_bandwidth)(struct tb_tunnel *tunnel); struct list_head list; enum tb_tunnel_type type; + unsigned int max_bw; }; struct tb_tunnel *tb_tunnel_discover_pci(struct tb *tb, struct tb_port *down); @@ -47,7 +52,7 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, struct tb_port *down); struct tb_tunnel *tb_tunnel_discover_dp(struct tb *tb, struct tb_port *in); struct tb_tunnel *tb_tunnel_alloc_dp(struct tb *tb, struct tb_port *in, - struct tb_port *out); + struct tb_port *out, int max_bw); struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, struct tb_port *dst, int transmit_ring, int transmit_path, int receive_ring, @@ -58,6 +63,9 @@ int tb_tunnel_activate(struct tb_tunnel *tunnel); int tb_tunnel_restart(struct tb_tunnel *tunnel); void tb_tunnel_deactivate(struct tb_tunnel *tunnel); bool tb_tunnel_is_invalid(struct tb_tunnel *tunnel); +bool tb_tunnel_switch_on_path(const struct tb_tunnel *tunnel, + const struct tb_switch *sw); +int tb_tunnel_consumed_bandwidth(struct tb_tunnel *tunnel); static inline bool tb_tunnel_is_pci(const struct tb_tunnel *tunnel) { From patchwork Tue Oct 1 11:38:23 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168519 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id B0CC414DB for ; Tue, 1 Oct 2019 11:39:01 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 8A35221D71 for ; Tue, 1 Oct 2019 11:39:01 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732695AbfJALin (ORCPT ); Tue, 1 Oct 2019 07:38:43 -0400 Received: from mga17.intel.com ([192.55.52.151]:27911 "EHLO mga17.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732669AbfJALim (ORCPT ); Tue, 1 Oct 2019 07:38:42 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:41 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="181663267" Received: from black.fi.intel.com ([10.237.72.28]) by orsmga007.jf.intel.com with ESMTP; 01 Oct 2019 04:38:38 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id 836B8592; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 15/22] thunderbolt: Make tb_find_port() available to other files Date: Tue, 1 Oct 2019 14:38:23 +0300 Message-Id: <20191001113830.13028-16-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org We will be needing this when adding initial USB4 support so make it available to other files in the driver as well. We also rename it to tb_switch_find_port() to follow conventions used in switch.c. No functional changes. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/switch.c | 18 ++++++++++++++++++ drivers/thunderbolt/tb.c | 22 ++-------------------- drivers/thunderbolt/tb.h | 2 ++ 3 files changed, 22 insertions(+), 20 deletions(-) diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 87c74c916a7c..8bc5d46011f8 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -2446,6 +2446,24 @@ struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) return NULL; } +/** + * tb_switch_find_port() - return the first port of @type on @sw or NULL + * @sw: Switch to find the port from + * @type: Port type to look for + */ +struct tb_port *tb_switch_find_port(struct tb_switch *sw, + enum tb_port_type type) +{ + int i; + + tb_switch_for_each_port(sw, i) { + if (sw->ports[i].config.type == type) + return &sw->ports[i]; + } + + return NULL; +} + void tb_switch_exit(void) { ida_destroy(&nvm_ida); diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index a3b7a18dc6d9..eab93e32cc4f 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -343,24 +343,6 @@ static void tb_free_unplugged_children(struct tb_switch *sw) } } -/** - * tb_find_port() - return the first port of @type on @sw or NULL - * @sw: Switch to find the port from - * @type: Port type to look for - */ -static struct tb_port *tb_find_port(struct tb_switch *sw, - enum tb_port_type type) -{ - int i; - - tb_switch_for_each_port(sw, i) { - if (sw->ports[i].config.type == type) - return &sw->ports[i]; - } - - return NULL; -} - /** * tb_find_unused_port() - return the first inactive port on @sw * @sw: Switch to find the port on @@ -590,7 +572,7 @@ static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) struct tb_switch *parent_sw; struct tb_tunnel *tunnel; - up = tb_find_port(sw, TB_TYPE_PCIE_UP); + up = tb_switch_find_port(sw, TB_TYPE_PCIE_UP); if (!up) return 0; @@ -628,7 +610,7 @@ static int tb_approve_xdomain_paths(struct tb *tb, struct tb_xdomain *xd) sw = tb_to_switch(xd->dev.parent); dst_port = tb_port_at(xd->route, sw); - nhi_port = tb_find_port(tb->root_switch, TB_TYPE_NHI); + nhi_port = tb_switch_find_port(tb->root_switch, TB_TYPE_NHI); mutex_lock(&tb->lock); tunnel = tb_tunnel_alloc_dma(tb, nhi_port, dst_port, xd->transmit_ring, diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 7a8cc56a2870..f4ce739bfd7a 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -533,6 +533,8 @@ void tb_switch_suspend(struct tb_switch *sw); int tb_switch_resume(struct tb_switch *sw); int tb_switch_reset(struct tb *tb, u64 route); void tb_sw_set_unplugged(struct tb_switch *sw); +struct tb_port *tb_switch_find_port(struct tb_switch *sw, + enum tb_port_type type); struct tb_switch *tb_switch_find_by_link_depth(struct tb *tb, u8 link, u8 depth); struct tb_switch *tb_switch_find_by_uuid(struct tb *tb, const uuid_t *uuid); From patchwork Tue Oct 1 11:38:24 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168527 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 6920C112B for ; Tue, 1 Oct 2019 11:39:20 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 523FA21906 for ; Tue, 1 Oct 2019 11:39:20 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732862AbfJALjC (ORCPT ); Tue, 1 Oct 2019 07:39:02 -0400 Received: from mga17.intel.com ([192.55.52.151]:27911 "EHLO mga17.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732684AbfJALin (ORCPT ); Tue, 1 Oct 2019 07:38:43 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga007.jf.intel.com ([10.7.209.58]) by fmsmga107.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:42 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="181663269" Received: from black.fi.intel.com ([10.237.72.28]) by orsmga007.jf.intel.com with ESMTP; 01 Oct 2019 04:38:38 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id 8F8865AD; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 16/22] thunderbolt: Call tb_eeprom_get_drom_offset() from tb_eeprom_read_n() Date: Tue, 1 Oct 2019 14:38:24 +0300 Message-Id: <20191001113830.13028-17-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org We are going to re-use tb_drom_read() for USB4 DROM reading as well. USB4 has separate router operations for this which does not need the drom_offset. Therefore we move call to tb_eeprom_get_drom_offset() into tb_eeprom_read_n() where it is needed. While there change return -ENOSYS to -ENODEV because the former is only supposed to be used with system calls (invalid syscall nr). Signed-off-by: Mika Westerberg --- drivers/thunderbolt/eeprom.c | 88 ++++++++++++++++++------------------ 1 file changed, 43 insertions(+), 45 deletions(-) diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c index 8dd7de0cc826..540e0105bcc0 100644 --- a/drivers/thunderbolt/eeprom.c +++ b/drivers/thunderbolt/eeprom.c @@ -130,13 +130,52 @@ static int tb_eeprom_in(struct tb_switch *sw, u8 *val) return 0; } +/** + * tb_eeprom_get_drom_offset - get drom offset within eeprom + */ +static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset) +{ + struct tb_cap_plug_events cap; + int res; + + if (!sw->cap_plug_events) { + tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n"); + return -ENODEV; + } + res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events, + sizeof(cap) / 4); + if (res) + return res; + + if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) { + tb_sw_warn(sw, "no NVM\n"); + return -ENODEV; + } + + if (cap.drom_offset > 0xffff) { + tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n", + cap.drom_offset); + return -ENXIO; + } + *offset = cap.drom_offset; + return 0; +} + /** * tb_eeprom_read_n - read count bytes from offset into val */ static int tb_eeprom_read_n(struct tb_switch *sw, u16 offset, u8 *val, size_t count) { + u16 drom_offset; int i, res; + + res = tb_eeprom_get_drom_offset(sw, &drom_offset); + if (res) + return res; + + offset += drom_offset; + res = tb_eeprom_active(sw, true); if (res) return res; @@ -238,36 +277,6 @@ struct tb_drom_entry_port { } __packed; -/** - * tb_eeprom_get_drom_offset - get drom offset within eeprom - */ -static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset) -{ - struct tb_cap_plug_events cap; - int res; - if (!sw->cap_plug_events) { - tb_sw_warn(sw, "no TB_CAP_PLUG_EVENTS, cannot read eeprom\n"); - return -ENOSYS; - } - res = tb_sw_read(sw, &cap, TB_CFG_SWITCH, sw->cap_plug_events, - sizeof(cap) / 4); - if (res) - return res; - - if (!cap.eeprom_ctl.present || cap.eeprom_ctl.not_present) { - tb_sw_warn(sw, "no NVM\n"); - return -ENOSYS; - } - - if (cap.drom_offset > 0xffff) { - tb_sw_warn(sw, "drom offset is larger than 0xffff: %#x\n", - cap.drom_offset); - return -ENXIO; - } - *offset = cap.drom_offset; - return 0; -} - /** * tb_drom_read_uid_only - read uid directly from drom * @@ -277,17 +286,11 @@ static int tb_eeprom_get_drom_offset(struct tb_switch *sw, u16 *offset) int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid) { u8 data[9]; - u16 drom_offset; u8 crc; - int res = tb_eeprom_get_drom_offset(sw, &drom_offset); - if (res) - return res; - - if (drom_offset == 0) - return -ENODEV; + int res; /* read uid */ - res = tb_eeprom_read_n(sw, drom_offset, data, 9); + res = tb_eeprom_read_n(sw, 0, data, 9); if (res) return res; @@ -489,7 +492,6 @@ static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size) */ int tb_drom_read(struct tb_switch *sw) { - u16 drom_offset; u16 size; u32 crc; struct tb_drom_header *header; @@ -517,11 +519,7 @@ int tb_drom_read(struct tb_switch *sw) return 0; } - res = tb_eeprom_get_drom_offset(sw, &drom_offset); - if (res) - return res; - - res = tb_eeprom_read_n(sw, drom_offset + 14, (u8 *) &size, 2); + res = tb_eeprom_read_n(sw, 14, (u8 *) &size, 2); if (res) return res; size &= 0x3ff; @@ -535,7 +533,7 @@ int tb_drom_read(struct tb_switch *sw) sw->drom = kzalloc(size, GFP_KERNEL); if (!sw->drom) return -ENOMEM; - res = tb_eeprom_read_n(sw, drom_offset, sw->drom, size); + res = tb_eeprom_read_n(sw, 0, sw->drom, size); if (res) goto err; From patchwork Tue Oct 1 11:38:25 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168517 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id D3DA1112B for ; Tue, 1 Oct 2019 11:38:46 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 8BB5B21A4A for ; Tue, 1 Oct 2019 11:38:46 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732725AbfJALip (ORCPT ); Tue, 1 Oct 2019 07:38:45 -0400 Received: from mga02.intel.com ([134.134.136.20]:39081 "EHLO mga02.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732694AbfJALio (ORCPT ); Tue, 1 Oct 2019 07:38:44 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from orsmga007.jf.intel.com ([10.7.209.58]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:43 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="181663276" Received: from black.fi.intel.com ([10.237.72.28]) by orsmga007.jf.intel.com with ESMTP; 01 Oct 2019 04:38:39 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id 9CE415C1; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 17/22] thunderbolt: Add initial support for USB4 Date: Tue, 1 Oct 2019 14:38:25 +0300 Message-Id: <20191001113830.13028-18-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org USB4 is a public spec based on Thunderbolt protocol. There are some differences in register layouts and flows. In addition to PCIe and DP tunneling, USB4 supports tunneling of USB 3.x. USB4 is also backward compatible with Thunderbolt 3 (and older generations but the spec only talks about 3rd generation). USB4 compliant devices can be identified by checking USB4 version field in router configuration space. This patch adds initial support for USB4 compliant hosts and devices which enables following features provided by the existing functionality in the driver: - PCIe tunneling - Display Port tunneling - Host and device NVM firmware upgrade - P2P networking This brings the USB4 support to the same level that we already have for Thunderbolt 1, 2 and 3 devices. Note the spec talks about host and device "routers" but in the driver we still use term "switch" in most places. Both can be used interchangeably. This also updates the Kconfig entry accordingly. Co-developed-by: Rajmohan Mani Signed-off-by: Rajmohan Mani Signed-off-by: Mika Westerberg --- drivers/thunderbolt/Kconfig | 9 +- drivers/thunderbolt/Makefile | 2 +- drivers/thunderbolt/eeprom.c | 53 ++- drivers/thunderbolt/nhi.c | 3 + drivers/thunderbolt/nhi.h | 2 + drivers/thunderbolt/switch.c | 384 +++++++++++++----- drivers/thunderbolt/tb.c | 20 +- drivers/thunderbolt/tb.h | 36 ++ drivers/thunderbolt/tb_regs.h | 36 +- drivers/thunderbolt/tunnel.c | 10 +- drivers/thunderbolt/usb4.c | 722 ++++++++++++++++++++++++++++++++++ drivers/thunderbolt/xdomain.c | 6 + 12 files changed, 1162 insertions(+), 121 deletions(-) create mode 100644 drivers/thunderbolt/usb4.c diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig index fd9adca898ff..8193ec310bae 100644 --- a/drivers/thunderbolt/Kconfig +++ b/drivers/thunderbolt/Kconfig @@ -1,6 +1,6 @@ # SPDX-License-Identifier: GPL-2.0-only menuconfig THUNDERBOLT - tristate "Thunderbolt support" + tristate "USB4 (Thunderbolt) support" depends on PCI depends on X86 || COMPILE_TEST select APPLE_PROPERTIES if EFI_STUB && X86 @@ -9,9 +9,10 @@ menuconfig THUNDERBOLT select CRYPTO_HASH select NVMEM help - Thunderbolt Controller driver. This driver is required if you - want to hotplug Thunderbolt devices on Apple hardware or on PCs - with Intel Falcon Ridge or newer. + USB4 (Thunderbolt) driver. USB4 is the public spec based on + Thunderbolt 3 protocol. This driver is required if you want to + hotplug Thunderbolt and USB4 compliant devices on Apple + hardware or on PCs with Intel Falcon Ridge or newer. To compile this driver a module, choose M here. The module will be called thunderbolt. diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile index 001187c577bf..c0b2fd73dfbd 100644 --- a/drivers/thunderbolt/Makefile +++ b/drivers/thunderbolt/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-${CONFIG_THUNDERBOLT} := thunderbolt.o thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o -thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o +thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o usb4.o diff --git a/drivers/thunderbolt/eeprom.c b/drivers/thunderbolt/eeprom.c index 540e0105bcc0..921d164b3f35 100644 --- a/drivers/thunderbolt/eeprom.c +++ b/drivers/thunderbolt/eeprom.c @@ -487,6 +487,37 @@ static int tb_drom_copy_nvm(struct tb_switch *sw, u16 *size) return ret; } +static int usb4_copy_host_drom(struct tb_switch *sw, u16 *size) +{ + int ret; + + ret = usb4_switch_drom_read(sw, 14, size, sizeof(*size)); + if (ret) + return ret; + + /* Size includes CRC8 + UID + CRC32 */ + *size += 1 + 8 + 4; + sw->drom = kzalloc(*size, GFP_KERNEL); + if (!sw->drom) + return -ENOMEM; + + ret = usb4_switch_drom_read(sw, 0, sw->drom, *size); + if (ret) { + kfree(sw->drom); + sw->drom = NULL; + } + + return ret; +} + +static int tb_drom_read_n(struct tb_switch *sw, u16 offset, u8 *val, + size_t count) +{ + if (tb_switch_is_usb4(sw)) + return usb4_switch_drom_read(sw, offset, val, count); + return tb_eeprom_read_n(sw, offset, val, count); +} + /** * tb_drom_read - copy drom to sw->drom and parse it */ @@ -512,14 +543,26 @@ int tb_drom_read(struct tb_switch *sw) goto parse; /* - * The root switch contains only a dummy drom (header only, - * no entries). Hardcode the configuration here. + * USB4 hosts may support reading DROM through router + * operations. */ - tb_drom_read_uid_only(sw, &sw->uid); + if (tb_switch_is_usb4(sw)) { + usb4_switch_read_uid(sw, &sw->uid); + if (!usb4_copy_host_drom(sw, &size)) + goto parse; + } else { + /* + * The root switch contains only a dummy drom + * (header only, no entries). Hardcode the + * configuration here. + */ + tb_drom_read_uid_only(sw, &sw->uid); + } + return 0; } - res = tb_eeprom_read_n(sw, 14, (u8 *) &size, 2); + res = tb_drom_read_n(sw, 14, (u8 *) &size, 2); if (res) return res; size &= 0x3ff; @@ -533,7 +576,7 @@ int tb_drom_read(struct tb_switch *sw) sw->drom = kzalloc(size, GFP_KERNEL); if (!sw->drom) return -ENOMEM; - res = tb_eeprom_read_n(sw, 0, sw->drom, size); + res = tb_drom_read_n(sw, 0, sw->drom, size); if (res) goto err; diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 641b21b54460..1be491ecbb45 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -1271,6 +1271,9 @@ static struct pci_device_id nhi_ids[] = { { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICL_NHI1), .driver_data = (kernel_ulong_t)&icl_nhi_ops }, + /* Any USB4 compliant host */ + { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_USB4, ~0) }, + { 0,} }; diff --git a/drivers/thunderbolt/nhi.h b/drivers/thunderbolt/nhi.h index b7b973949f8e..5d276ee9b38e 100644 --- a/drivers/thunderbolt/nhi.h +++ b/drivers/thunderbolt/nhi.h @@ -74,4 +74,6 @@ extern const struct tb_nhi_ops icl_nhi_ops; #define PCI_DEVICE_ID_INTEL_ICL_NHI1 0x8a0d #define PCI_DEVICE_ID_INTEL_ICL_NHI0 0x8a17 +#define PCI_CLASS_SERIAL_USB_USB4 0x0c0340 + #endif diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 8bc5d46011f8..2ccd1004920e 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -163,10 +163,12 @@ static int nvm_validate_and_write(struct tb_switch *sw) image_size -= hdr_size; } + if (tb_switch_is_usb4(sw)) + return usb4_switch_nvm_write(sw, 0, buf, image_size); return dma_port_flash_write(sw->dma_port, 0, buf, image_size); } -static int nvm_authenticate_host(struct tb_switch *sw) +static int nvm_authenticate_host_dma_port(struct tb_switch *sw) { int ret; @@ -195,7 +197,7 @@ static int nvm_authenticate_host(struct tb_switch *sw) return 0; } -static int nvm_authenticate_device(struct tb_switch *sw) +static int nvm_authenticate_device_dma_port(struct tb_switch *sw) { int ret, retries = 10; @@ -232,6 +234,80 @@ static int nvm_authenticate_device(struct tb_switch *sw) return -ETIMEDOUT; } +static void nvm_authenticate_start_dma_port(struct tb_switch *sw) +{ + struct pci_dev *root_port; + + /* + * During host router NVM upgrade we should not allow root port to + * go into D3cold because some root ports cannot trigger PME + * itself. To be on the safe side keep the root port in D0 during + * the whole upgrade process. + */ + root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); + if (root_port) + pm_runtime_get_noresume(&root_port->dev); +} + +static void nvm_authenticate_complete_dma_port(struct tb_switch *sw) +{ + struct pci_dev *root_port; + + root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); + if (root_port) + pm_runtime_put(&root_port->dev); +} + +static inline bool nvm_readable(struct tb_switch *sw) +{ + if (tb_switch_is_usb4(sw)) { + /* + * USB4 devices must support NVM operations but it is + * optional for hosts. Therefore we query the NVM sector + * size here and if it is supported assume NVM + * operations are implemented. + */ + return usb4_switch_nvm_sector_size(sw) > 0; + } + + /* Thunderbolt 2 and 3 devices support NVM through DMA port */ + return !!sw->dma_port; +} + +static inline bool nvm_upgradeable(struct tb_switch *sw) +{ + if (sw->no_nvm_upgrade) + return false; + return nvm_readable(sw); +} + +static inline int nvm_read(struct tb_switch *sw, unsigned int address, + void *buf, size_t size) +{ + if (tb_switch_is_usb4(sw)) + return usb4_switch_nvm_read(sw, address, buf, size); + return dma_port_flash_read(sw->dma_port, address, buf, size); +} + +static int nvm_authenticate(struct tb_switch *sw) +{ + int ret; + + if (tb_switch_is_usb4(sw)) + return usb4_switch_nvm_authenticate(sw); + + if (!tb_route(sw)) { + nvm_authenticate_start_dma_port(sw); + ret = nvm_authenticate_host_dma_port(sw); + if (ret) + nvm_authenticate_complete_dma_port(sw); + } else { + ret = nvm_authenticate_device_dma_port(sw); + } + + return ret; +} + static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, size_t bytes) { @@ -245,7 +321,7 @@ static int tb_switch_nvm_read(void *priv, unsigned int offset, void *val, goto out; } - ret = dma_port_flash_read(sw->dma_port, offset, val, bytes); + ret = nvm_read(sw, offset, val, bytes); mutex_unlock(&sw->tb->lock); out: @@ -322,9 +398,21 @@ static int tb_switch_nvm_add(struct tb_switch *sw) u32 val; int ret; - if (!sw->dma_port) + if (!nvm_readable(sw)) return 0; + /* + * The NVM format of non-Intel hardware is not known so + * currently restrict NVM upgrade for Intel hardware. We may + * relax this in the future when we learn other NVM formats. + */ + if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) { + dev_info(&sw->dev, + "NVM format of vendor %#x is not known, disabling NVM upgrade\n", + sw->config.vendor_id); + return 0; + } + nvm = kzalloc(sizeof(*nvm), GFP_KERNEL); if (!nvm) return -ENOMEM; @@ -339,8 +427,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw) if (!sw->safe_mode) { u32 nvm_size, hdr_size; - ret = dma_port_flash_read(sw->dma_port, NVM_FLASH_SIZE, &val, - sizeof(val)); + ret = nvm_read(sw, NVM_FLASH_SIZE, &val, sizeof(val)); if (ret) goto err_ida; @@ -348,8 +435,7 @@ static int tb_switch_nvm_add(struct tb_switch *sw) nvm_size = (SZ_1M << (val & 7)) / 8; nvm_size = (nvm_size - hdr_size) / 2; - ret = dma_port_flash_read(sw->dma_port, NVM_VERSION, &val, - sizeof(val)); + ret = nvm_read(sw, NVM_VERSION, &val, sizeof(val)); if (ret) goto err_ida; @@ -600,6 +686,24 @@ int tb_port_clear_counter(struct tb_port *port, int counter) return tb_port_write(port, zero, TB_CFG_COUNTERS, 3 * counter, 3); } +/** + * tb_port_unlock() - Unlock downstream port + * @port: Port to unlock + * + * Needed for USB4 but can be called for any CIO/USB4 ports. Makes the + * downstream router accessible for CM. + */ +int tb_port_unlock(struct tb_port *port) +{ + if (tb_switch_is_icm(port->sw)) + return 0; + if (!tb_port_is_null(port)) + return -EINVAL; + if (tb_switch_is_usb4(port->sw)) + return usb4_port_unlock(port); + return 0; +} + /** * tb_init_port() - initialize a port * @@ -631,6 +735,10 @@ static int tb_init_port(struct tb_port *port) port->cap_phy = cap; else tb_port_WARN(port, "non switch port without a PHY\n"); + + cap = tb_port_find_cap(port, TB_PORT_CAP_USB4); + if (cap > 0) + port->cap_usb4 = cap; } else if (port->port != 0) { cap = tb_port_find_cap(port, TB_PORT_CAP_ADAP); if (cap > 0) @@ -1069,20 +1177,38 @@ int tb_dp_port_enable(struct tb_port *port, bool enable) /* switch utility functions */ -static void tb_dump_switch(struct tb *tb, struct tb_regs_switch_header *sw) +static const char *tb_switch_generation_name(const struct tb_switch *sw) +{ + switch (sw->generation) { + case 1: + return "Thunderbolt 1"; + case 2: + return "Thunderbolt 2"; + case 3: + return "Thunderbolt 3"; + case 4: + return "USB4"; + default: + return "Unknown"; + } +} + +static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) { - tb_dbg(tb, " Switch: %x:%x (Revision: %d, TB Version: %d)\n", - sw->vendor_id, sw->device_id, sw->revision, - sw->thunderbolt_version); - tb_dbg(tb, " Max Port Number: %d\n", sw->max_port_number); + const struct tb_regs_switch_header *regs = &sw->config; + + tb_dbg(tb, " %s Switch: %x:%x (Revision: %d, TB Version: %d)\n", + tb_switch_generation_name(sw), regs->vendor_id, regs->device_id, + regs->revision, regs->thunderbolt_version); + tb_dbg(tb, " Max Port Number: %d\n", regs->max_port_number); tb_dbg(tb, " Config:\n"); tb_dbg(tb, " Upstream Port Number: %d Depth: %d Route String: %#llx Enabled: %d, PlugEventsDelay: %dms\n", - sw->upstream_port_number, sw->depth, - (((u64) sw->route_hi) << 32) | sw->route_lo, - sw->enabled, sw->plug_events_delay); + regs->upstream_port_number, regs->depth, + (((u64) regs->route_hi) << 32) | regs->route_lo, + regs->enabled, regs->plug_events_delay); tb_dbg(tb, " unknown1: %#x unknown4: %#x\n", - sw->__unknown1, sw->__unknown4); + regs->__unknown1, regs->__unknown4); } /** @@ -1129,6 +1255,10 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active) if (res) return res; + /* Plug events are always enabled in USB4 */ + if (tb_switch_is_usb4(sw)) + return 0; + res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); if (res) return res; @@ -1321,30 +1451,6 @@ static ssize_t link_width_show(struct device *dev, } static DEVICE_ATTR_RO(link_width); -static void nvm_authenticate_start(struct tb_switch *sw) -{ - struct pci_dev *root_port; - - /* - * During host router NVM upgrade we should not allow root port to - * go into D3cold because some root ports cannot trigger PME - * itself. To be on the safe side keep the root port in D0 during - * the whole upgrade process. - */ - root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); - if (root_port) - pm_runtime_get_noresume(&root_port->dev); -} - -static void nvm_authenticate_complete(struct tb_switch *sw) -{ - struct pci_dev *root_port; - - root_port = pci_find_pcie_root_port(sw->tb->nhi->pdev); - if (root_port) - pm_runtime_put(&root_port->dev); -} - static ssize_t nvm_authenticate_show(struct device *dev, struct device_attribute *attr, char *buf) { @@ -1393,19 +1499,7 @@ static ssize_t nvm_authenticate_store(struct device *dev, goto exit_unlock; sw->nvm->authenticating = true; - - if (!tb_route(sw)) { - /* - * Keep root port from suspending as long as the - * NVM upgrade process is running. - */ - nvm_authenticate_start(sw); - ret = nvm_authenticate_host(sw); - if (ret) - nvm_authenticate_complete(sw); - } else { - ret = nvm_authenticate_device(sw); - } + ret = nvm_authenticate(sw); } exit_unlock: @@ -1515,11 +1609,11 @@ static umode_t switch_attr_is_visible(struct kobject *kobj, return attr->mode; return 0; } else if (attr == &dev_attr_nvm_authenticate.attr) { - if (sw->dma_port && !sw->no_nvm_upgrade) + if (nvm_upgradeable(sw)) return attr->mode; return 0; } else if (attr == &dev_attr_nvm_version.attr) { - if (sw->dma_port) + if (nvm_readable(sw)) return attr->mode; return 0; } else if (attr == &dev_attr_boot.attr) { @@ -1631,6 +1725,9 @@ static int tb_switch_get_generation(struct tb_switch *sw) return 3; default: + if (tb_switch_is_usb4(sw)) + return 4; + /* * For unknown switches assume generation to be 1 to be * on the safe side. @@ -1641,6 +1738,19 @@ static int tb_switch_get_generation(struct tb_switch *sw) } } +static bool tb_switch_exceeds_max_depth(const struct tb_switch *sw, int depth) +{ + int max_depth; + + if (tb_switch_is_usb4(sw) || + (sw->tb->root_switch && tb_switch_is_usb4(sw->tb->root_switch))) + max_depth = USB4_SWITCH_MAX_DEPTH; + else + max_depth = TB_SWITCH_MAX_DEPTH; + + return depth > max_depth; +} + /** * tb_switch_alloc() - allocate a switch * @tb: Pointer to the owning domain @@ -1662,10 +1772,16 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, int upstream_port; int i, ret, depth; - /* Make sure we do not exceed maximum topology limit */ + /* Unlock the downstream port so we can access the switch below */ + if (route) { + struct tb_switch *parent_sw = tb_to_switch(parent); + struct tb_port *down; + + down = tb_port_at(route, parent_sw); + tb_port_unlock(down); + } + depth = tb_route_length(route); - if (depth > TB_SWITCH_MAX_DEPTH) - return ERR_PTR(-EADDRNOTAVAIL); upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); if (upstream_port < 0) @@ -1680,8 +1796,10 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, if (ret) goto err_free_sw_ports; + sw->generation = tb_switch_get_generation(sw); + tb_dbg(tb, "current switch config:\n"); - tb_dump_switch(tb, &sw->config); + tb_dump_switch(tb, sw); /* configure switch */ sw->config.upstream_port_number = upstream_port; @@ -1690,6 +1808,10 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, sw->config.route_lo = lower_32_bits(route); sw->config.enabled = 0; + /* Make sure we do not exceed maximum topology limit */ + if (tb_switch_exceeds_max_depth(sw, depth)) + return ERR_PTR(-EADDRNOTAVAIL); + /* initialize ports */ sw->ports = kcalloc(sw->config.max_port_number + 1, sizeof(*sw->ports), GFP_KERNEL); @@ -1704,14 +1826,9 @@ struct tb_switch *tb_switch_alloc(struct tb *tb, struct device *parent, sw->ports[i].port = i; } - sw->generation = tb_switch_get_generation(sw); - ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_PLUG_EVENTS); - if (ret < 0) { - tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); - goto err_free_sw_ports; - } - sw->cap_plug_events = ret; + if (ret > 0) + sw->cap_plug_events = ret; ret = tb_switch_find_vse_cap(sw, TB_VSE_CAP_LINK_CONTROLLER); if (ret > 0) @@ -1782,7 +1899,8 @@ tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) * * Call this function before the switch is added to the system. It will * upload configuration to the switch and makes it available for the - * connection manager to use. + * connection manager to use. Can be called to the switch again after + * resume from low power states to re-initialize it. * * Return: %0 in case of success and negative errno in case of failure */ @@ -1793,21 +1911,50 @@ int tb_switch_configure(struct tb_switch *sw) int ret; route = tb_route(sw); - tb_dbg(tb, "initializing Switch at %#llx (depth: %d, up port: %d)\n", - route, tb_route_length(route), sw->config.upstream_port_number); - if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) - tb_sw_warn(sw, "unknown switch vendor id %#x\n", - sw->config.vendor_id); + tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", + sw->config.enabled ? "restoring " : "initializing", route, + tb_route_length(route), sw->config.upstream_port_number); sw->config.enabled = 1; - /* upload configuration */ - ret = tb_sw_write(sw, 1 + (u32 *)&sw->config, TB_CFG_SWITCH, 1, 3); - if (ret) - return ret; + if (tb_switch_is_usb4(sw)) { + /* + * For USB4 devices, we need to program the CM version + * accordingly so that it knows to expose all the + * additional capabilities. + */ + sw->config.cmuv = USB4_VERSION_1_0; + + /* Enumerate the switch */ + ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, + ROUTER_CS_1, 4); + if (ret) + return ret; + + ret = usb4_switch_setup(sw); + if (ret) + return ret; + + ret = usb4_switch_configure_link(sw); + } else { + if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) + tb_sw_warn(sw, "unknown switch vendor id %#x\n", + sw->config.vendor_id); + + if (!sw->cap_plug_events) { + tb_sw_warn(sw, "cannot find TB_VSE_CAP_PLUG_EVENTS aborting\n"); + return -ENODEV; + } + + /* Enumerate the switch */ + ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, + ROUTER_CS_1, 3); + if (ret) + return ret; - ret = tb_lc_configure_link(sw); + ret = tb_lc_configure_link(sw); + } if (ret) return ret; @@ -1816,18 +1963,32 @@ int tb_switch_configure(struct tb_switch *sw) static int tb_switch_set_uuid(struct tb_switch *sw) { + bool uid = false; u32 uuid[4]; int ret; if (sw->uuid) return 0; - /* - * The newer controllers include fused UUID as part of link - * controller specific registers - */ - ret = tb_lc_read_uuid(sw, uuid); - if (ret) { + if (tb_switch_is_usb4(sw)) { + ret = usb4_switch_read_uid(sw, &sw->uid); + if (ret) + return ret; + uid = true; + } else { + /* + * The newer controllers include fused UUID as part of + * link controller specific registers + */ + ret = tb_lc_read_uuid(sw, uuid); + if (ret) { + if (ret != -EINVAL) + return ret; + uid = true; + } + } + + if (uid) { /* * ICM generates UUID based on UID and fills the upper * two words with ones. This is not strictly following @@ -1893,7 +2054,7 @@ static int tb_switch_add_dma_port(struct tb_switch *sw) /* Now we can allow root port to suspend again */ if (!tb_route(sw)) - nvm_authenticate_complete(sw); + nvm_authenticate_complete_dma_port(sw); if (status) { tb_sw_info(sw, "switch flash authentication failed\n"); @@ -1950,6 +2111,8 @@ static bool tb_switch_lane_bonding_possible(struct tb_switch *sw) if (!up->dual_link_port || !up->dual_link_port->remote) return false; + if (tb_switch_is_usb4(sw)) + return usb4_switch_lane_bonding_possible(sw); return tb_lc_lane_bonding_possible(sw); } @@ -2177,7 +2340,11 @@ void tb_switch_remove(struct tb_switch *sw) if (!sw->is_unplugged) tb_plug_events_active(sw, false); - tb_lc_unconfigure_link(sw); + + if (tb_switch_is_usb4(sw)) + usb4_switch_unconfigure_link(sw); + else + tb_lc_unconfigure_link(sw); tb_switch_nvm_remove(sw); @@ -2232,7 +2399,10 @@ int tb_switch_resume(struct tb_switch *sw) return err; } - err = tb_drom_read_uid_only(sw, &uid); + if (tb_switch_is_usb4(sw)) + err = usb4_switch_read_uid(sw, &uid); + else + err = tb_drom_read_uid_only(sw, &uid); if (err) { tb_sw_warn(sw, "uid read failed\n"); return err; @@ -2245,16 +2415,7 @@ int tb_switch_resume(struct tb_switch *sw) } } - /* upload configuration */ - err = tb_sw_write(sw, 1 + (u32 *) &sw->config, TB_CFG_SWITCH, 1, 3); - if (err) - return err; - - err = tb_lc_configure_link(sw); - if (err) - return err; - - err = tb_plug_events_active(sw, true); + err = tb_switch_configure(sw); if (err) return err; @@ -2269,8 +2430,14 @@ int tb_switch_resume(struct tb_switch *sw) tb_sw_set_unplugged(port->remote->sw); else if (port->xdomain) port->xdomain->is_unplugged = true; - } else if (tb_port_has_remote(port)) { - if (tb_switch_resume(port->remote->sw)) { + } else if (tb_port_has_remote(port) || port->xdomain) { + /* + * Always unlock the port so the downstream + * switch/domain is accessible. + */ + if (tb_port_unlock(port)) + tb_port_warn(port, "failed to unlock port\n"); + if (port->remote && tb_switch_resume(port->remote->sw)) { tb_port_warn(port, "lost during suspend, disconnecting\n"); tb_sw_set_unplugged(port->remote->sw); @@ -2290,7 +2457,10 @@ void tb_switch_suspend(struct tb_switch *sw) tb_switch_for_each_remote_port(sw, i) tb_switch_suspend(sw->ports[i].remote->sw); - tb_lc_set_sleep(sw); + if (tb_switch_is_usb4(sw)) + usb4_switch_set_sleep(sw); + else + tb_lc_set_sleep(sw); } /** @@ -2303,6 +2473,8 @@ void tb_switch_suspend(struct tb_switch *sw) */ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) { + if (tb_switch_is_usb4(sw)) + return usb4_switch_query_dp_resource(sw, in); return tb_lc_dp_sink_query(sw, in); } @@ -2317,6 +2489,8 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) */ int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) { + if (tb_switch_is_usb4(sw)) + return usb4_switch_alloc_dp_resource(sw, in); return tb_lc_dp_sink_alloc(sw, in); } @@ -2330,10 +2504,16 @@ int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) */ void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) { - if (tb_lc_dp_sink_dealloc(sw, in)) { + int ret; + + if (tb_switch_is_usb4(sw)) + ret = usb4_switch_dealloc_dp_resource(sw, in); + else + ret = tb_lc_dp_sink_dealloc(sw, in); + + if (ret) tb_sw_warn(sw, "failed to de-allocate DP resource for port %d\n", in->port); - } } struct tb_sw_lookup { diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index eab93e32cc4f..24e37e47dc48 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -370,12 +370,15 @@ static struct tb_port *tb_find_unused_port(struct tb_switch *sw, static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, const struct tb_port *port) { + struct tb_port *down = NULL; + /* * To keep plugging devices consistently in the same PCIe - * hierarchy, do mapping here for root switch downstream PCIe - * ports. + * hierarchy, do mapping here for switch downstream PCIe ports. */ - if (!tb_route(sw)) { + if (tb_switch_is_usb4(sw)) { + down = usb4_switch_map_pcie_down(sw, port); + } else if (!tb_route(sw)) { int phy_port = tb_phy_port_from_link(port->port); int index; @@ -395,12 +398,17 @@ static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, /* Validate the hard-coding */ if (WARN_ON(index > sw->config.max_port_number)) goto out; - if (WARN_ON(!tb_port_is_pcie_down(&sw->ports[index]))) + + down = &sw->ports[index]; + } + + if (down) { + if (WARN_ON(!tb_port_is_pcie_down(down))) goto out; - if (WARN_ON(tb_pci_port_is_enabled(&sw->ports[index]))) + if (WARN_ON(tb_pci_port_is_enabled(down))) goto out; - return &sw->ports[index]; + return down; } out: diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index f4ce739bfd7a..60f237020d1b 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -44,6 +44,7 @@ struct tb_switch_nvm { #define TB_SWITCH_KEY_SIZE 32 #define TB_SWITCH_MAX_DEPTH 6 +#define USB4_SWITCH_MAX_DEPTH 5 /** * struct tb_switch - a thunderbolt switch @@ -129,6 +130,7 @@ struct tb_switch { * @xdomain: Remote host (%NULL if not connected) * @cap_phy: Offset, zero if not found * @cap_adap: Offset of the adapter specific capability (%0 if not present) + * @cap_usb4: Offset to the USB4 port capability (%0 if not present) * @port: Port number on switch * @disabled: Disabled by eeprom * @bonded: true if the port is bonded (two lanes combined as one) @@ -146,6 +148,7 @@ struct tb_port { struct tb_xdomain *xdomain; int cap_phy; int cap_adap; + int cap_usb4; u8 port; bool disabled; bool bonded; @@ -661,6 +664,17 @@ static inline bool tb_switch_is_tr(const struct tb_switch *sw) } } +/** + * tb_switch_is_usb4() - Is the switch USB4 compliant + * @sw: Switch to check + * + * Returns true if the @sw is USB4 compliant router, false otherwise. + */ +static inline bool tb_switch_is_usb4(const struct tb_switch *sw) +{ + return sw->config.thunderbolt_version == USB4_VERSION_1_0; +} + /** * tb_switch_is_icm() - Is the switch handled by ICM firmware * @sw: Switch to check @@ -686,6 +700,7 @@ int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); int tb_port_add_nfc_credits(struct tb_port *port, int credits); int tb_port_set_initial_credits(struct tb_port *port, u32 credits); int tb_port_clear_counter(struct tb_port *port, int counter); +int tb_port_unlock(struct tb_port *port); int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid); void tb_port_release_in_hopid(struct tb_port *port, int hopid); int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid); @@ -760,4 +775,25 @@ void tb_xdomain_remove(struct tb_xdomain *xd); struct tb_xdomain *tb_xdomain_find_by_link_depth(struct tb *tb, u8 link, u8 depth); +int usb4_switch_setup(struct tb_switch *sw); +int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid); +int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size); +int usb4_switch_configure_link(struct tb_switch *sw); +void usb4_switch_unconfigure_link(struct tb_switch *sw); +bool usb4_switch_lane_bonding_possible(struct tb_switch *sw); +int usb4_switch_set_sleep(struct tb_switch *sw); +int usb4_switch_nvm_sector_size(struct tb_switch *sw); +int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size); +int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, + const void *buf, size_t size); +int usb4_switch_nvm_authenticate(struct tb_switch *sw); +bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); +int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); +int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); +struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, + const struct tb_port *port); + +int usb4_port_unlock(struct tb_port *port); #endif diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 7ee45b73c7f7..47f73f992412 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -41,6 +41,7 @@ enum tb_port_cap { TB_PORT_CAP_TIME1 = 0x03, TB_PORT_CAP_ADAP = 0x04, TB_PORT_CAP_VSE = 0x05, + TB_PORT_CAP_USB4 = 0x06, }; enum tb_port_state { @@ -164,10 +165,36 @@ struct tb_regs_switch_header { * milliseconds. Writing 0x00 is interpreted * as 255ms. */ - u32 __unknown4:16; + u32 cmuv:8; + u32 __unknown4:8; u32 thunderbolt_version:8; } __packed; +/* USB4 version 1.0 */ +#define USB4_VERSION_1_0 0x20 + +#define ROUTER_CS_1 0x01 +#define ROUTER_CS_4 0x04 +#define ROUTER_CS_5 0x05 +#define ROUTER_CS_5_SLP BIT(0) +#define ROUTER_CS_5_C3S BIT(23) +#define ROUTER_CS_5_PTO BIT(24) +#define ROUTER_CS_5_HCO BIT(26) +#define ROUTER_CS_5_CV BIT(31) +#define ROUTER_CS_6 0x06 +#define ROUTER_CS_6_SLPR BIT(0) +#define ROUTER_CS_6_TNS BIT(1) +#define ROUTER_CS_6_HCI BIT(18) +#define ROUTER_CS_6_CR BIT(25) +#define ROUTER_CS_7 0x07 +#define ROUTER_CS_9 0x09 +#define ROUTER_CS_25 0x19 +#define ROUTER_CS_26 0x1a +#define ROUTER_CS_26_STATUS_MASK GENMASK(29, 24) +#define ROUTER_CS_26_STATUS_SHIFT 24 +#define ROUTER_CS_26_ONS BIT(30) +#define ROUTER_CS_26_OV BIT(31) + enum tb_port_type { TB_TYPE_INACTIVE = 0x000000, TB_TYPE_PORT = 0x000001, @@ -216,6 +243,7 @@ struct tb_regs_port_header { #define ADP_CS_4_NFC_BUFFERS_MASK GENMASK(9, 0) #define ADP_CS_4_TOTAL_BUFFERS_MASK GENMASK(29, 20) #define ADP_CS_4_TOTAL_BUFFERS_SHIFT 20 +#define ADP_CS_4_LCK BIT(31) #define ADP_CS_5 0x05 #define ADP_CS_5_LCA_MASK GENMASK(28, 22) #define ADP_CS_5_LCA_SHIFT 22 @@ -237,6 +265,12 @@ struct tb_regs_port_header { #define LANE_ADP_CS_1_CURRENT_WIDTH_MASK GENMASK(25, 20) #define LANE_ADP_CS_1_CURRENT_WIDTH_SHIFT 20 +/* USB4 port registers */ +#define PORT_CS_18 0x12 +#define PORT_CS_18_BE BIT(8) +#define PORT_CS_19 0x13 +#define PORT_CS_19_PC BIT(3) + /* Display Port adapter registers */ #define ADP_DP_CS_0 0x00 #define ADP_DP_CS_0_VIDEO_HOPID_MASK GENMASK(26, 16) diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index 4ef5bc8f912b..e8e387c4211f 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -243,6 +243,12 @@ struct tb_tunnel *tb_tunnel_alloc_pci(struct tb *tb, struct tb_port *up, return tunnel; } +static bool tb_dp_is_usb4(const struct tb_switch *sw) +{ + /* Titan Ridge DP adapters need the same treatment as USB4 */ + return tb_switch_is_usb4(sw) || tb_switch_is_tr(sw); +} + static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out) { int timeout = 10; @@ -250,7 +256,7 @@ static int tb_dp_cm_handshake(struct tb_port *in, struct tb_port *out) int ret; /* Both ends need to support this */ - if (!tb_switch_is_tr(in->sw) || !tb_switch_is_tr(out->sw)) + if (!tb_dp_is_usb4(in->sw) || !tb_dp_is_usb4(out->sw)) return 0; ret = tb_port_read(out, &val, TB_CFG_PORT, @@ -520,7 +526,7 @@ static int tb_dp_consumed_bandwidth(struct tb_tunnel *tunnel) u32 val, rate = 0, lanes = 0; int ret; - if (tb_switch_is_tr(sw)) { + if (tb_dp_is_usb4(sw)) { int timeout = 10; /* diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c new file mode 100644 index 000000000000..4b0997292b43 --- /dev/null +++ b/drivers/thunderbolt/usb4.c @@ -0,0 +1,722 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * USB4 specific functionality + * + * Copyright (C) 2019, Intel Corporation + * Authors: Mika Westerberg + * Rajmohan Mani + */ + +#include +#include + +#include "tb.h" + +#define USB4_DATA_DWORDS 16 +#define USB4_DATA_RETRIES 3 + +enum usb4_switch_op { + USB4_SWITCH_OP_QUERY_DP_RESOURCE = 0x10, + USB4_SWITCH_OP_ALLOC_DP_RESOURCE = 0x11, + USB4_SWITCH_OP_DEALLOC_DP_RESOURCE = 0x12, + USB4_SWITCH_OP_NVM_WRITE = 0x20, + USB4_SWITCH_OP_NVM_AUTH = 0x21, + USB4_SWITCH_OP_NVM_READ = 0x22, + USB4_SWITCH_OP_NVM_SET_OFFSET = 0x23, + USB4_SWITCH_OP_DROM_READ = 0x24, + USB4_SWITCH_OP_NVM_SECTOR_SIZE = 0x25, +}; + +#define USB4_NVM_READ_OFFSET_MASK GENMASK(23, 2) +#define USB4_NVM_READ_OFFSET_SHIFT 2 +#define USB4_NVM_READ_LENGTH_MASK GENMASK(27, 24) +#define USB4_NVM_READ_LENGTH_SHIFT 24 + +#define USB4_NVM_SET_OFFSET_MASK USB4_NVM_READ_OFFSET_MASK +#define USB4_NVM_SET_OFFSET_SHIFT USB4_NVM_READ_OFFSET_SHIFT + +#define USB4_DROM_ADDRESS_MASK GENMASK(14, 2) +#define USB4_DROM_ADDRESS_SHIFT 2 +#define USB4_DROM_SIZE_MASK GENMASK(19, 15) +#define USB4_DROM_SIZE_SHIFT 15 + +#define USB4_NVM_SECTOR_SIZE_MASK GENMASK(23, 0) + +typedef int (*read_block_fn)(struct tb_switch *, unsigned int, void *, size_t); +typedef int (*write_block_fn)(struct tb_switch *, const void *, size_t); + +static int usb4_switch_wait_for_bit(struct tb_switch *sw, u32 offset, u32 bit, + u32 value, int timeout_msec) +{ + ktime_t timeout = ktime_add_ms(ktime_get(), timeout_msec); + + do { + u32 val; + int ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, offset, 1); + if (ret) + return ret; + + if ((val & bit) == value) + return 0; + + usleep_range(50, 100); + } while (ktime_before(ktime_get(), timeout)); + + return -ETIMEDOUT; +} + +static int usb4_switch_op_read_data(struct tb_switch *sw, void *data, + size_t dwords) +{ + if (dwords > USB4_DATA_DWORDS) + return -EINVAL; + + return tb_sw_read(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords); +} + +static int usb4_switch_op_write_data(struct tb_switch *sw, const void *data, + size_t dwords) +{ + if (dwords > USB4_DATA_DWORDS) + return -EINVAL; + + return tb_sw_write(sw, data, TB_CFG_SWITCH, ROUTER_CS_9, dwords); +} + +static int usb4_switch_op_read_metadata(struct tb_switch *sw, u32 *metadata) +{ + return tb_sw_read(sw, metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); +} + +static int usb4_switch_op_write_metadata(struct tb_switch *sw, u32 metadata) +{ + return tb_sw_write(sw, &metadata, TB_CFG_SWITCH, ROUTER_CS_25, 1); +} + +static int usb4_switch_do_read_data(struct tb_switch *sw, u16 address, + void *buf, size_t size, read_block_fn read_block) +{ + unsigned int retries = USB4_DATA_RETRIES; + unsigned int offset; + + offset = address & 3; + address = address & ~3; + + do { + size_t nbytes = min_t(size_t, size, USB4_DATA_DWORDS * 4); + unsigned int dwaddress, dwords; + u8 data[USB4_DATA_DWORDS * 4]; + int ret; + + dwaddress = address / 4; + dwords = ALIGN(nbytes, 4) / 4; + + ret = read_block(sw, dwaddress, data, dwords); + if (ret) { + if (ret == -ETIMEDOUT) { + if (retries--) + continue; + ret = -EIO; + } + return ret; + } + + memcpy(buf, data + offset, nbytes); + + size -= nbytes; + address += nbytes; + buf += nbytes; + } while (size > 0); + + return 0; +} + +static int usb4_switch_do_write_data(struct tb_switch *sw, u16 address, + const void *buf, size_t size, write_block_fn write_next_block) +{ + unsigned int retries = USB4_DATA_RETRIES; + unsigned int offset; + + offset = address & 3; + address = address & ~3; + + do { + u32 nbytes = min_t(u32, size, USB4_DATA_DWORDS * 4); + u8 data[USB4_DATA_DWORDS * 4]; + int ret; + + memcpy(data + offset, buf, nbytes); + + ret = write_next_block(sw, data, nbytes / 4); + if (ret) { + if (ret == -ETIMEDOUT) { + if (retries--) + continue; + ret = -EIO; + } + return ret; + } + + size -= nbytes; + address += nbytes; + buf += nbytes; + } while (size > 0); + + return 0; +} + +static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status) +{ + u32 val; + int ret; + + val = opcode | ROUTER_CS_26_OV; + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); + if (ret) + return ret; + + ret = usb4_switch_wait_for_bit(sw, ROUTER_CS_26, ROUTER_CS_26_OV, 0, 500); + if (ret) + return ret; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_26, 1); + if (val & ROUTER_CS_26_ONS) + return -EOPNOTSUPP; + + *status = (val & ROUTER_CS_26_STATUS_MASK) >> ROUTER_CS_26_STATUS_SHIFT; + return 0; +} + +/** + * usb4_switch_setup() - Additional setup for USB4 device + * @sw: USB4 router to setup + * + * USB4 routers need additional settings in order to enable all the + * tunneling. This function enables USB and PCIe tunneling if it can be + * enabled (e.g the parent switch also supports them). If USB tunneling + * is not available for some reason (like that there is Thunderbolt 3 + * switch upstream) then the internal xHCI controller is enabled + * instead. + */ +int usb4_switch_setup(struct tb_switch *sw) +{ + struct tb_switch *parent; + bool tbt3, xhci; + u32 val = 0; + int ret; + + if (!tb_route(sw)) + return 0; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1); + if (ret) + return ret; + + xhci = val & ROUTER_CS_6_HCI; + tbt3 = !(val & ROUTER_CS_6_TNS); + + tb_sw_dbg(sw, "TBT3 support: %s, xHCI: %s\n", + tbt3 ? "yes" : "no", xhci ? "yes" : "no"); + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + + parent = tb_switch_parent(sw); + + /* Only enable PCIe tunneling if the parent router supports it */ + if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) { + val |= ROUTER_CS_5_PTO; + /* xHCI can be enabled if PCIe tunneling is supported */ + if (xhci & ROUTER_CS_6_HCI) + val |= ROUTER_CS_5_HCO; + } + + /* TBT3 supported by the CM */ + val |= ROUTER_CS_5_C3S; + /* Tunneling configuration is ready now */ + val |= ROUTER_CS_5_CV; + + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + + return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_CR, + ROUTER_CS_6_CR, 50); +} + +/** + * usb4_switch_read_uid() - Read UID from USB4 router + * @sw: USB4 router + * + * Reads 64-bit UID from USB4 router config space. + */ +int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid) +{ + return tb_sw_read(sw, uid, TB_CFG_SWITCH, ROUTER_CS_7, 2); +} + +static int usb4_switch_drom_read_block(struct tb_switch *sw, + unsigned int dwaddress, void *buf, + size_t dwords) +{ + u8 status = 0; + u32 metadata; + int ret; + + metadata = (dwords << USB4_DROM_SIZE_SHIFT) & USB4_DROM_SIZE_MASK; + metadata |= (dwaddress << USB4_DROM_ADDRESS_SHIFT) & + USB4_DROM_ADDRESS_MASK; + + ret = usb4_switch_op_write_metadata(sw, metadata); + if (ret) + return ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_DROM_READ, &status); + if (ret) + return ret; + + if (status) + return -EIO; + + return usb4_switch_op_read_data(sw, buf, dwords); +} + +/** + * usb4_switch_drom_read() - Read arbitrary bytes from USB4 router DROM + * @sw: USB4 router + * + * Uses USB4 router operations to read router DROM. For devices this + * should always work but for hosts it may return %-EOPNOTSUPP in which + * case the host router does not have DROM. + */ +int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size) +{ + return usb4_switch_do_read_data(sw, address, buf, size, + usb4_switch_drom_read_block); +} + +static int usb4_set_port_configured(struct tb_port *port, bool configured) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + if (configured) + val |= PORT_CS_19_PC; + else + val &= ~PORT_CS_19_PC; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); +} + +/** + * usb4_switch_configure_link() - Set upstream USB4 link configured + * @sw: USB4 router + * + * Sets the upstream USB4 link to be configured for power management + * purposes. + */ +int usb4_switch_configure_link(struct tb_switch *sw) +{ + struct tb_port *up; + + if (!tb_route(sw)) + return 0; + + up = tb_upstream_port(sw); + return usb4_set_port_configured(up, true); +} + +/** + * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration + * @sw: USB4 router + * + * Reverse of usb4_switch_configure_link(). + */ +void usb4_switch_unconfigure_link(struct tb_switch *sw) +{ + struct tb_port *up; + + if (sw->is_unplugged || !tb_route(sw)) + return; + + up = tb_upstream_port(sw); + usb4_set_port_configured(up, false); +} + +/** + * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding + * @sw: USB4 router + * + * Checks whether conditions are met so that lane bonding can be + * established with the upstream router. Call only for device routers. + */ +bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) +{ + struct tb_port *up; + int ret; + u32 val; + + up = tb_upstream_port(sw); + ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1); + if (ret) + return false; + + return !!(val & PORT_CS_18_BE); +} + +/** + * usb4_switch_set_sleep() - Set sleep bit in order to enable low power states + * @sw: USB4 router + * + * Sets sleep bit for the router and waits for sleep ready to be + * asserted. + */ +int usb4_switch_set_sleep(struct tb_switch *sw) +{ + int ret; + u32 val; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + + val |= ROUTER_CS_5_SLP; + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + + /* Wait for sleep ready bit */ + return usb4_switch_wait_for_bit(sw, ROUTER_CS_6, ROUTER_CS_6_SLPR, + ROUTER_CS_6_SLPR, 500); +} + +/** + * usb4_switch_nvm_sector_size() - Return router NVM sector size + * @sw: USB4 router + * + * If the router supports NVM operations this function returns the NVM + * sector size in bytes. If NVM operations are not supported returns + * %-EOPNOTSUPP. + */ +int usb4_switch_nvm_sector_size(struct tb_switch *sw) +{ + u32 metadata; + u8 status; + int ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SECTOR_SIZE, &status); + if (ret) + return ret; + + if (status) + return status == 0x2 ? -EOPNOTSUPP : -EIO; + + ret = usb4_switch_op_read_metadata(sw, &metadata); + if (ret) + return ret; + + return metadata & USB4_NVM_SECTOR_SIZE_MASK; +} + +static int usb4_switch_nvm_read_block(struct tb_switch *sw, + unsigned int dwaddress, void *buf, size_t dwords) +{ + u8 status = 0; + u32 metadata; + int ret; + + metadata = (dwords << USB4_NVM_READ_LENGTH_SHIFT) & + USB4_NVM_READ_LENGTH_MASK; + metadata |= (dwaddress << USB4_NVM_READ_OFFSET_SHIFT) & + USB4_NVM_READ_OFFSET_MASK; + + ret = usb4_switch_op_write_metadata(sw, metadata); + if (ret) + return ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_READ, &status); + if (ret) + return ret; + + if (status) + return -EIO; + + return usb4_switch_op_read_data(sw, buf, dwords); +} + +/** + * usb4_switch_nvm_read() - Read arbitrary bytes from router NVM + * @sw: USB4 router + * @address: Starting address in bytes + * @buf: Read data is placed here + * @size: How many bytes to read + * + * Reads NVM contents of the router. If NVM is not supported returns + * %-EOPNOTSUPP. + */ +int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, + size_t size) +{ + return usb4_switch_do_read_data(sw, address, buf, size, + usb4_switch_nvm_read_block); +} + +static int usb4_switch_nvm_set_offset(struct tb_switch *sw, + unsigned int address) +{ + u32 metadata, dwaddress; + u8 status = 0; + int ret; + + dwaddress = address / 4; + metadata = (dwaddress << USB4_NVM_SET_OFFSET_SHIFT) & + USB4_NVM_SET_OFFSET_MASK; + + ret = usb4_switch_op_write_metadata(sw, metadata); + if (ret) + return ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_SET_OFFSET, &status); + if (ret) + return ret; + + return status ? -EIO : 0; +} + +static int usb4_switch_nvm_write_next_block(struct tb_switch *sw, + const void *buf, size_t dwords) +{ + u8 status; + int ret; + + ret = usb4_switch_op_write_data(sw, buf, dwords); + if (ret) + return ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_WRITE, &status); + if (ret) + return ret; + + return status ? -EIO : 0; +} + +/** + * usb4_switch_nvm_write() - Write to the router NVM + * @sw: USB4 router + * @address: Start address where to write in bytes + * @buf: Pointer to the data to write + * @size: Size of @buf in bytes + * + * Writes @buf to the router NVM using USB4 router operations. If NVM + * write is not supported returns %-EOPNOTSUPP. + */ +int usb4_switch_nvm_write(struct tb_switch *sw, unsigned int address, + const void *buf, size_t size) +{ + int ret; + + ret = usb4_switch_nvm_set_offset(sw, address); + if (ret) + return ret; + + return usb4_switch_do_write_data(sw, address, buf, size, + usb4_switch_nvm_write_next_block); +} + +/** + * usb4_switch_nvm_authenticate() - Authenticate new NVM + * @sw: USB4 router + * + * After the new NVM has been written via usb4_switch_nvm_write(), this + * function triggers NVM authentication process. If the authentication + * is successful the router is power cycled and the new NVM starts + * running. In case of failure returns negative errno. + */ +int usb4_switch_nvm_authenticate(struct tb_switch *sw) +{ + u8 status = 0; + int ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_NVM_AUTH, &status); + if (ret) + return ret; + + switch (status) { + case 0x0: + tb_sw_dbg(sw, "NVM authentication successful\n"); + return 0; + case 0x1: + return -EINVAL; + case 0x2: + return -EAGAIN; + case 0x3: + return -EOPNOTSUPP; + default: + return -EIO; + } +} + +/** + * usb4_switch_query_dp_resource() - Query availability of DP IN resource + * @sw: USB4 router + * @in: DP IN adapter + * + * For DP tunneling this function can be used to query availability of + * DP IN resource. Returns true if the resource is available for DP + * tunneling, false otherwise. + */ +bool usb4_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + u8 status; + int ret; + + ret = usb4_switch_op_write_metadata(sw, in->port); + if (ret) + return false; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_QUERY_DP_RESOURCE, &status); + /* + * If DP resource allocation is not supported assume it is + * always available. + */ + if (ret == -EOPNOTSUPP) + return true; + else if (ret) + return false; + + return !status; +} + +/** + * usb4_switch_alloc_dp_resource() - Allocate DP IN resource + * @sw: USB4 router + * @in: DP IN adapter + * + * Allocates DP IN resource for DP tunneling using USB4 router + * operations. If the resource was allocated returns %0. Otherwise + * returns negative errno, in particular %-EBUSY if the resource is + * already allocated. + */ +int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + u8 status; + int ret; + + ret = usb4_switch_op_write_metadata(sw, in->port); + if (ret) + return ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_ALLOC_DP_RESOURCE, &status); + if (ret == -EOPNOTSUPP) + return 0; + else if (ret) + return ret; + + return status ? -EBUSY : 0; +} + +/** + * usb4_switch_dealloc_dp_resource() - Releases allocated DP IN resource + * @sw: USB4 router + * @in: DP IN adapter + * + * Releases the previously allocated DP IN resource. + */ +int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in) +{ + u8 status; + int ret; + + ret = usb4_switch_op_write_metadata(sw, in->port); + if (ret) + return ret; + + ret = usb4_switch_op(sw, USB4_SWITCH_OP_DEALLOC_DP_RESOURCE, &status); + if (ret == -EOPNOTSUPP) + return 0; + else if (ret) + return ret; + + return status ? -EIO : 0; +} + +static int usb4_port_idx(const struct tb_switch *sw, const struct tb_port *port) +{ + int i, usb4_idx = 0; + + /* Assume port is primary */ + tb_switch_for_each_port(sw, i) { + if (tb_is_upstream_port(&sw->ports[i])) + continue; + if (!tb_port_is_null(&sw->ports[i])) + continue; + if (!sw->ports[i].link_nr) { + if (&sw->ports[i] == port) + break; + usb4_idx++; + } + } + + return usb4_idx; +} + +/** + * usb4_switch_map_pcie_down() - Map USB4 port to a PCIe downstream adapter + * @sw: USB4 router + * @port: USB4 port + * + * USB4 routers have direct mapping between USB4 ports and PCIe + * downstream adapters where the PCIe topology is extended. This + * function returns the corresponding downstream PCIe adapter or %NULL + * if no such mapping was possible. + */ +struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, + const struct tb_port *port) +{ + int usb4_idx = usb4_port_idx(sw, port); + int i, pcie_idx = 0; + + /* Find PCIe down port matching usb4_port */ + tb_switch_for_each_port(sw, i) { + if (!tb_port_is_pcie_down(&sw->ports[i])) + continue; + + if (pcie_idx == usb4_idx && + !tb_pci_port_is_enabled(&sw->ports[i])) + return &sw->ports[i]; + + pcie_idx++; + } + + return NULL; +} + +/** + * usb4_port_unlock() - Unlock USB4 downstream port + * @port: USB4 port to unlock + * + * Unlocks USB4 downstream port so that the connection manager can + * access the router below this port. + */ +int usb4_port_unlock(struct tb_port *port) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, ADP_CS_4, 1); + if (ret) + return ret; + + val &= ~ADP_CS_4_LCK; + return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1); +} diff --git a/drivers/thunderbolt/xdomain.c b/drivers/thunderbolt/xdomain.c index 37ef0b4da1cf..11c4fc40aa81 100644 --- a/drivers/thunderbolt/xdomain.c +++ b/drivers/thunderbolt/xdomain.c @@ -1220,7 +1220,13 @@ struct tb_xdomain *tb_xdomain_alloc(struct tb *tb, struct device *parent, u64 route, const uuid_t *local_uuid, const uuid_t *remote_uuid) { + struct tb_switch *parent_sw = tb_to_switch(parent); struct tb_xdomain *xd; + struct tb_port *down; + + /* Make sure the downstream domain is accessible */ + down = tb_port_at(route, parent_sw); + tb_port_unlock(down); xd = kzalloc(sizeof(*xd), GFP_KERNEL); if (!xd) From patchwork Tue Oct 1 11:38:26 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168523 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 35152112B for ; Tue, 1 Oct 2019 11:39:04 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 1E2D821D71 for ; Tue, 1 Oct 2019 11:39:04 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732902AbfJALjD (ORCPT ); Tue, 1 Oct 2019 07:39:03 -0400 Received: from mga02.intel.com ([134.134.136.20]:39070 "EHLO mga02.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732559AbfJALim (ORCPT ); Tue, 1 Oct 2019 07:38:42 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:41 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="191434986" Received: from black.fi.intel.com ([10.237.72.28]) by fmsmga007.fm.intel.com with ESMTP; 01 Oct 2019 04:38:39 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id A2D1C1AD; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 18/22] thunderbolt: Make tb_switch_find_cap() available to other files Date: Tue, 1 Oct 2019 14:38:26 +0300 Message-Id: <20191001113830.13028-19-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org From: Rajmohan Mani We need to find switch capabilities in order to implement TMU support so make it available to other files as well. Signed-off-by: Rajmohan Mani Signed-off-by: Mika Westerberg --- drivers/thunderbolt/cap.c | 11 ++++++++++- drivers/thunderbolt/tb.h | 1 + 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c index 8bf8e031f0bc..568d878f19f7 100644 --- a/drivers/thunderbolt/cap.c +++ b/drivers/thunderbolt/cap.c @@ -113,7 +113,16 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap) return ret; } -static int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap) +/** + * tb_switch_find_cap() - Find switch capability + * @sw Switch to find the capability for + * @cap: Capability to look + * + * Returns offset to start of capability or %-ENOENT if no such + * capability was found. Negative errno is returned if there was an + * error. + */ +int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap) { int offset = sw->config.first_cap_offset; diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 60f237020d1b..1488382066fd 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -709,6 +709,7 @@ struct tb_port *tb_next_port_on_path(struct tb_port *start, struct tb_port *end, struct tb_port *prev); int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); +int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap); int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap); bool tb_port_is_enabled(struct tb_port *port); From patchwork Tue Oct 1 11:38:27 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168533 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 54D63112B for ; Tue, 1 Oct 2019 11:39:29 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 2A0F221906 for ; Tue, 1 Oct 2019 11:39:29 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732559AbfJALjU (ORCPT ); Tue, 1 Oct 2019 07:39:20 -0400 Received: from mga07.intel.com ([134.134.136.100]:49106 "EHLO mga07.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732660AbfJALim (ORCPT ); Tue, 1 Oct 2019 07:38:42 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga003.fm.intel.com ([10.253.24.29]) by orsmga105.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:42 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="197830168" Received: from black.fi.intel.com ([10.237.72.28]) by FMSMGA003.fm.intel.com with ESMTP; 01 Oct 2019 04:38:39 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id B41DF694; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 19/22] thunderbolt: Add support for Time Management Unit Date: Tue, 1 Oct 2019 14:38:27 +0300 Message-Id: <20191001113830.13028-20-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org From: Rajmohan Mani Time Management Unit (TMU) is included in each USB4 router. It is used to synchronize time across the USB4 fabric. By default when USB4 router is plugged to the domain, its TMU is turned off. This differs from Thunderbolt (1, 2 and 3) devices whose TMU is by default configured to bi-directional HiFi mode. Since time synchronization is needed for proper Display Port tunneling this means we need to configure the TMU on USB4 compliant devices. The USB4 spec allows some flexibility on how the TMU can be configured. This makes it possible to enable link power management states (CLx) in certain topologies, where for example DP tunneling is not used. TMU can also be re-configured dynamicaly depending on types of tunnels created over the USB4 fabric. In this patch we simply configure the TMU to be in bi-directional HiFi mode. This way we can tunnel any kind of traffic without need to perform complex steps to re-configure the domain dynamically. We can add more fine-grained TMU configuration later on when we start enabling CLx states. Signed-off-by: Rajmohan Mani Co-developed-by: Mika Westerberg Signed-off-by: Mika Westerberg --- drivers/thunderbolt/Makefile | 2 +- drivers/thunderbolt/switch.c | 4 + drivers/thunderbolt/tb.c | 29 +++ drivers/thunderbolt/tb.h | 47 +++++ drivers/thunderbolt/tb_regs.h | 20 ++ drivers/thunderbolt/tmu.c | 380 ++++++++++++++++++++++++++++++++++ 6 files changed, 481 insertions(+), 1 deletion(-) create mode 100644 drivers/thunderbolt/tmu.c diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile index c0b2fd73dfbd..2014bc840b06 100644 --- a/drivers/thunderbolt/Makefile +++ b/drivers/thunderbolt/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only obj-${CONFIG_THUNDERBOLT} := thunderbolt.o thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o eeprom.o -thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o usb4.o +thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 2ccd1004920e..58e3f54ddbb9 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -2278,6 +2278,10 @@ int tb_switch_add(struct tb_switch *sw) ret = tb_switch_update_link_attributes(sw); if (ret) return ret; + + ret = tb_switch_tmu_init(sw); + if (ret) + return ret; } ret = device_add(&sw->dev); diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 24e37e47dc48..f2868c125637 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -161,6 +161,25 @@ static void tb_scan_xdomain(struct tb_port *port) } } +static int tb_enable_tmu(struct tb_switch *sw) +{ + int ret; + + /* If it is already enabled in correct mode, don't touch it */ + if (tb_switch_tmu_is_enabled(sw)) + return 0; + + ret = tb_switch_tmu_disable(sw); + if (ret) + return ret; + + ret = tb_switch_tmu_post_time(sw); + if (ret) + return ret; + + return tb_switch_tmu_enable(sw); +} + static void tb_scan_port(struct tb_port *port); /** @@ -263,6 +282,9 @@ static void tb_scan_port(struct tb_port *port) if (tb_switch_lane_bonding_enable(sw)) tb_sw_warn(sw, "failed to enable lane bonding\n"); + if (tb_enable_tmu(sw)) + tb_sw_warn(sw, "failed to enable TMU\n"); + tb_scan_switch(sw); } @@ -713,6 +735,7 @@ static void tb_handle_hotplug(struct work_struct *work) tb_sw_set_unplugged(port->remote->sw); tb_free_invalid_tunnels(tb); tb_remove_dp_resources(port->remote->sw); + tb_switch_tmu_disable(port->remote->sw); tb_switch_lane_bonding_disable(port->remote->sw); tb_switch_remove(port->remote->sw); port->remote = NULL; @@ -860,6 +883,9 @@ static int tb_start(struct tb *tb) return ret; } + /* Enable TMU if it is off */ + if (!tb_switch_tmu_is_enabled(tb->root_switch)) + tb_switch_tmu_enable(tb->root_switch); /* Full scan to discover devices added before the driver was loaded. */ tb_scan_switch(tb->root_switch); /* Find out tunnels created by the boot firmware */ @@ -891,6 +917,9 @@ static void tb_restore_children(struct tb_switch *sw) { int i; + if (tb_enable_tmu(sw)) + tb_sw_warn(sw, "failed to restore TMU configuration\n"); + tb_switch_for_each_remote_port(sw, i) { struct tb_port *port = &sw->ports[i]; diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 1488382066fd..087fd6d6ef9a 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -46,6 +46,38 @@ struct tb_switch_nvm { #define TB_SWITCH_MAX_DEPTH 6 #define USB4_SWITCH_MAX_DEPTH 5 +/** + * enum tb_switch_tmu_rate - TMU refresh rate + * @TB_SWITCH_TMU_RATE_OFF: %0 (Disable Time Sync handshake) + * @TB_SWITCH_TMU_RATE_HIFI: %16 us time interval between successive + * transmission of the Delay Request TSNOS + * (Time Sync Notification Ordered Set) on a Link + * @TB_SWITCH_TMU_RATE_NORMAL: %1 ms time interval between successive + * transmission of the Delay Request TSNOS on + * a Link + */ +enum tb_switch_tmu_rate { + TB_SWITCH_TMU_RATE_OFF = 0, + TB_SWITCH_TMU_RATE_HIFI = 16, + TB_SWITCH_TMU_RATE_NORMAL = 1000, +}; + +/** + * struct tb_switch_tmu - Structure holding switch TMU configuration + * @cap: Offset to the TMU capability (%0 if not found) + * @has_ucap: Does the switch support uni-directional mode + * @rate: TMU refresh rate related to upstream switch. In case of root + * switch this holds the domain rate. + * @unidirectional: Is the TMU in uni-directional or bi-directional mode + * related to upstream switch. Don't case for root switch. + */ +struct tb_switch_tmu { + int cap; + bool has_ucap; + enum tb_switch_tmu_rate rate; + bool unidirectional; +}; + /** * struct tb_switch - a thunderbolt switch * @dev: Device for the switch @@ -55,6 +87,7 @@ struct tb_switch_nvm { * mailbox this will hold the pointer to that (%NULL * otherwise). If set it also means the switch has * upgradeable NVM. + * @tmu: The switch TMU configuration * @tb: Pointer to the domain the switch belongs to * @uid: Unique ID of the switch * @uuid: UUID of the switch (or %NULL if not supported) @@ -93,6 +126,7 @@ struct tb_switch { struct tb_regs_switch_header config; struct tb_port *ports; struct tb_dma_port *dma_port; + struct tb_switch_tmu tmu; struct tb *tb; u64 uid; uuid_t *uuid; @@ -129,6 +163,7 @@ struct tb_switch { * @remote: Remote port (%NULL if not connected) * @xdomain: Remote host (%NULL if not connected) * @cap_phy: Offset, zero if not found + * @cap_tmu: Offset of the adapter specific TMU capability (%0 if not present) * @cap_adap: Offset of the adapter specific capability (%0 if not present) * @cap_usb4: Offset to the USB4 port capability (%0 if not present) * @port: Port number on switch @@ -147,6 +182,7 @@ struct tb_port { struct tb_port *remote; struct tb_xdomain *xdomain; int cap_phy; + int cap_tmu; int cap_adap; int cap_usb4; u8 port; @@ -696,6 +732,17 @@ bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); void tb_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); +int tb_switch_tmu_init(struct tb_switch *sw); +int tb_switch_tmu_post_time(struct tb_switch *sw); +int tb_switch_tmu_disable(struct tb_switch *sw); +int tb_switch_tmu_enable(struct tb_switch *sw); + +static inline bool tb_switch_tmu_is_enabled(const struct tb_switch *sw) +{ + return sw->tmu.rate == TB_SWITCH_TMU_RATE_HIFI && + !sw->tmu.unidirectional; +} + int tb_wait_for_port(struct tb_port *port, bool wait_if_unplugged); int tb_port_add_nfc_credits(struct tb_port *port, int credits); int tb_port_set_initial_credits(struct tb_port *port, u32 credits); diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index 47f73f992412..ec1a5d1f7c94 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -26,6 +26,7 @@ #define TB_MAX_CONFIG_RW_LENGTH 60 enum tb_switch_cap { + TB_SWITCH_CAP_TMU = 0x03, TB_SWITCH_CAP_VSE = 0x05, }; @@ -195,6 +196,21 @@ struct tb_regs_switch_header { #define ROUTER_CS_26_ONS BIT(30) #define ROUTER_CS_26_OV BIT(31) +/* Router TMU configuration */ +#define TMU_RTR_CS_0 0x00 +#define TMU_RTR_CS_0_TD BIT(27) +#define TMU_RTR_CS_0_UCAP BIT(30) +#define TMU_RTR_CS_1 0x01 +#define TMU_RTR_CS_1_LOCAL_TIME_NS_MASK GENMASK(31, 16) +#define TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT 16 +#define TMU_RTR_CS_2 0x02 +#define TMU_RTR_CS_3 0x03 +#define TMU_RTR_CS_3_LOCAL_TIME_NS_MASK GENMASK(15, 0) +#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK GENMASK(31, 16) +#define TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT 16 +#define TMU_RTR_CS_22 0x16 +#define TMU_RTR_CS_24 0x18 + enum tb_port_type { TB_TYPE_INACTIVE = 0x000000, TB_TYPE_PORT = 0x000001, @@ -248,6 +264,10 @@ struct tb_regs_port_header { #define ADP_CS_5_LCA_MASK GENMASK(28, 22) #define ADP_CS_5_LCA_SHIFT 22 +/* TMU adapter registers */ +#define TMU_ADP_CS_3 0x03 +#define TMU_ADP_CS_3_UDM BIT(29) + /* Lane adapter registers */ #define LANE_ADP_CS_0 0x00 #define LANE_ADP_CS_0_SUPPORTED_WIDTH_MASK GENMASK(25, 20) diff --git a/drivers/thunderbolt/tmu.c b/drivers/thunderbolt/tmu.c new file mode 100644 index 000000000000..d6314f6b20c7 --- /dev/null +++ b/drivers/thunderbolt/tmu.c @@ -0,0 +1,380 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Thunderbolt Time Management Unit (TMU) support + * + * Copyright (C) 2019, Intel Corporation + * Authors: Mika Westerberg + * Rajmohan Mani + */ + +#include + +#include "tb.h" + +static const char *tb_switch_tmu_mode_name(const struct tb_switch *sw) +{ + bool root_switch = !tb_route(sw); + + switch (sw->tmu.rate) { + case TB_SWITCH_TMU_RATE_OFF: + return "off"; + + case TB_SWITCH_TMU_RATE_HIFI: + /* Root switch does not have upstream directionality */ + if (root_switch) + return "HiFi"; + if (sw->tmu.unidirectional) + return "uni-directional, HiFi"; + return "bi-directional, HiFi"; + + case TB_SWITCH_TMU_RATE_NORMAL: + if (root_switch) + return "normal"; + return "uni-directional, normal"; + + default: + return "unknown"; + } +} + +static bool tb_switch_tmu_ucap_supported(struct tb_switch *sw) +{ + int ret; + u32 val; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_0, 1); + if (ret) + return false; + + return !!(val & TMU_RTR_CS_0_UCAP); +} + +static int tb_switch_tmu_rate_read(struct tb_switch *sw) +{ + int ret; + u32 val; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_3, 1); + if (ret) + return ret; + + val >>= TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT; + return val; +} + +static int tb_switch_tmu_rate_write(struct tb_switch *sw, int rate) +{ + int ret; + u32 val; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_3, 1); + if (ret) + return ret; + + val &= ~TMU_RTR_CS_3_TS_PACKET_INTERVAL_MASK; + val |= rate << TMU_RTR_CS_3_TS_PACKET_INTERVAL_SHIFT; + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_3, 1); +} + +static int tb_port_tmu_write(struct tb_port *port, u8 offset, u32 mask, + u32 value) +{ + u32 data; + int ret; + + ret = tb_port_read(port, &data, TB_CFG_PORT, port->cap_tmu + offset, 1); + if (ret) + return ret; + + data &= ~mask; + data |= value; + + return tb_port_write(port, &data, TB_CFG_PORT, + port->cap_tmu + offset, 1); +} + +static int tb_port_tmu_set_unidirectional(struct tb_port *port, + bool unidirectional) +{ + u32 val; + + if (!port->sw->tmu.has_ucap) + return 0; + + val = unidirectional ? TMU_ADP_CS_3_UDM : 0; + return tb_port_tmu_write(port, TMU_ADP_CS_3, TMU_ADP_CS_3_UDM, val); +} + +static inline int tb_port_tmu_unidirectional_disable(struct tb_port *port) +{ + return tb_port_tmu_set_unidirectional(port, false); +} + +static bool tb_port_tmu_is_unidirectional(struct tb_port *port) +{ + int ret; + u32 val; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_tmu + TMU_ADP_CS_3, 1); + if (ret) + return false; + + return val & TMU_ADP_CS_3_UDM; +} + +static int tb_switch_tmu_set_time_disruption(struct tb_switch *sw, bool set) +{ + int ret; + u32 val; + + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_0, 1); + if (ret) + return ret; + + if (set) + val |= TMU_RTR_CS_0_TD; + else + val &= ~TMU_RTR_CS_0_TD; + + return tb_sw_write(sw, &val, TB_CFG_SWITCH, + sw->tmu.cap + TMU_RTR_CS_0, 1); +} + +/** + * tb_switch_tmu_init() - Initialize switch TMU structures + * @sw: Switch to initialized + * + * This function must be called before other TMU related functions to + * makes the internal structures are filled in correctly. Does not + * change any hardware configuration. + */ +int tb_switch_tmu_init(struct tb_switch *sw) +{ + int ret, i; + + if (tb_switch_is_icm(sw)) + return 0; + + ret = tb_switch_find_cap(sw, TB_SWITCH_CAP_TMU); + if (ret > 0) + sw->tmu.cap = ret; + + tb_switch_for_each_port(sw, i) { + struct tb_port *port = &sw->ports[i]; + int cap; + + cap = tb_port_find_cap(port, TB_PORT_CAP_TIME1); + if (cap > 0) + port->cap_tmu = cap; + } + + ret = tb_switch_tmu_rate_read(sw); + if (ret < 0) + return ret; + + sw->tmu.rate = ret; + + sw->tmu.has_ucap = tb_switch_tmu_ucap_supported(sw); + if (sw->tmu.has_ucap) { + tb_sw_dbg(sw, "TMU: supports uni-directional mode\n"); + + if (tb_route(sw)) { + struct tb_port *up = tb_upstream_port(sw); + + sw->tmu.unidirectional = + tb_port_tmu_is_unidirectional(up); + } + } else { + sw->tmu.unidirectional = false; + } + + tb_sw_dbg(sw, "TMU: current mode: %s\n", tb_switch_tmu_mode_name(sw)); + return 0; +} + +/** + * tb_switch_tmu_post_time() - Update switch local time + * @sw: Switch whose time to update + * + * Updates switch local time using time posting procedure. + */ +int tb_switch_tmu_post_time(struct tb_switch *sw) +{ + unsigned int post_local_time_offset, post_time_offset; + struct tb_switch *root_switch = sw->tb->root_switch; + u64 hi, mid, lo, local_time, post_time; + int i, ret, retries = 100; + u32 gm_local_time[3]; + + if (!tb_route(sw)) + return 0; + + if (!tb_switch_is_usb4(sw)) + return 0; + + /* Need to be able to read the grand master time */ + if (!root_switch->tmu.cap) + return 0; + + ret = tb_sw_read(root_switch, gm_local_time, TB_CFG_SWITCH, + root_switch->tmu.cap + TMU_RTR_CS_1, + ARRAY_SIZE(gm_local_time)); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(gm_local_time); i++) + tb_sw_dbg(root_switch, "local_time[%d]=0x%08x\n", i, + gm_local_time[i]); + + /* Convert to nanoseconds (drop fractional part) */ + hi = gm_local_time[2] & TMU_RTR_CS_3_LOCAL_TIME_NS_MASK; + mid = gm_local_time[1]; + lo = (gm_local_time[0] & TMU_RTR_CS_1_LOCAL_TIME_NS_MASK) >> + TMU_RTR_CS_1_LOCAL_TIME_NS_SHIFT; + local_time = hi << 48 | mid << 16 | lo; + + /* Tell the switch that time sync is disrupted for a while */ + ret = tb_switch_tmu_set_time_disruption(sw, true); + if (ret) + return ret; + + post_local_time_offset = sw->tmu.cap + TMU_RTR_CS_22; + post_time_offset = sw->tmu.cap + TMU_RTR_CS_24; + + /* + * Write the Grandmaster time to the Post Local Time registers + * of the new switch. + */ + ret = tb_sw_write(sw, &local_time, TB_CFG_SWITCH, + post_local_time_offset, 2); + if (ret) + goto out; + + /* + * Have the new switch update its local time (by writing 1 to + * the post_time registers) and wait for the completion of the + * same (post_time register becomes 0). This means the time has + * been converged properly. + */ + post_time = 1; + + ret = tb_sw_write(sw, &post_time, TB_CFG_SWITCH, post_time_offset, 2); + if (ret) + goto out; + + do { + usleep_range(5, 10); + ret = tb_sw_read(sw, &post_time, TB_CFG_SWITCH, + post_time_offset, 2); + if (ret) + goto out; + } while (--retries && post_time); + + if (!retries) { + ret = -ETIMEDOUT; + goto out; + } + + tb_sw_dbg(sw, "TMU: updated local time to %#llx\n", local_time); + +out: + tb_switch_tmu_set_time_disruption(sw, false); + return ret; +} + +/** + * tb_switch_tmu_disable() - Disable TMU of a switch + * @sw: Switch whose TMU to disable + * + * Turns off TMU of @sw if it is enabled. If not enabled does nothing. + */ +int tb_switch_tmu_disable(struct tb_switch *sw) +{ + int ret; + + if (!tb_switch_is_usb4(sw)) + return 0; + + /* Already disabled? */ + if (sw->tmu.rate == TB_SWITCH_TMU_RATE_OFF) + return 0; + + if (sw->tmu.unidirectional) { + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *up, *down; + + up = tb_upstream_port(sw); + down = tb_port_at(tb_route(sw), parent); + + /* The switch may be unplugged so ignore any errors */ + tb_port_tmu_unidirectional_disable(up); + ret = tb_port_tmu_unidirectional_disable(down); + if (ret) + return ret; + } + + tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_OFF); + + sw->tmu.unidirectional = false; + sw->tmu.rate = TB_SWITCH_TMU_RATE_OFF; + + tb_sw_dbg(sw, "TMU: disabled\n"); + return 0; +} + +/** + * tb_switch_tmu_enable() - Enable TMU on a switch + * @sw: Switch whose TMU to enable + * + * Enables TMU of a switch to be in bi-directional, HiFi mode. In this mode + * all tunneling should work. + */ +int tb_switch_tmu_enable(struct tb_switch *sw) +{ + int ret; + + if (!tb_switch_is_usb4(sw)) + return 0; + + ret = tb_switch_tmu_set_time_disruption(sw, true); + if (ret) + return ret; + + /* Change mode to bi-directional */ + if (tb_route(sw) && sw->tmu.unidirectional) { + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *up, *down; + + up = tb_upstream_port(sw); + down = tb_port_at(tb_route(sw), parent); + + ret = tb_port_tmu_unidirectional_disable(down); + if (ret) + return ret; + + ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI); + if (ret) + return ret; + + ret = tb_port_tmu_unidirectional_disable(up); + if (ret) + return ret; + } else { + ret = tb_switch_tmu_rate_write(sw, TB_SWITCH_TMU_RATE_HIFI); + if (ret) + return ret; + } + + sw->tmu.unidirectional = false; + sw->tmu.rate = TB_SWITCH_TMU_RATE_HIFI; + tb_sw_dbg(sw, "TMU: mode set to: %s\n", tb_switch_tmu_mode_name(sw)); + + return tb_switch_tmu_set_time_disruption(sw, false); +} From patchwork Tue Oct 1 11:38:28 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168531 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id 7ED3A14DB for ; Tue, 1 Oct 2019 11:39:22 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 529892133F for ; Tue, 1 Oct 2019 11:39:22 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1733034AbfJALjV (ORCPT ); Tue, 1 Oct 2019 07:39:21 -0400 Received: from mga12.intel.com ([192.55.52.136]:17385 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732670AbfJALim (ORCPT ); Tue, 1 Oct 2019 07:38:42 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:42 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="220968933" Received: from black.fi.intel.com ([10.237.72.28]) by fmsmga002.fm.intel.com with ESMTP; 01 Oct 2019 04:38:39 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id BFB996D2; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 20/22] thunderbolt: Add support for USB tunnels Date: Tue, 1 Oct 2019 14:38:28 +0300 Message-Id: <20191001113830.13028-21-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org From: Rajmohan Mani USB4 added a capability to tunnel USB 3.x protocol over the USB4 fabric. USB4 device routers may include integrated SuperSpeed HUB or a function or both. USB tunneling follows PCIe so that the tunnel is created between the parent and the child router from USB downstream adapter port to USB upstream adapter port over a single USB4 link. This adds support for USB tunneling and also capability to discover existing USB tunnels (for example created by connection manager in boot firmware). Signed-off-by: Rajmohan Mani Co-developed-by: Mika Westerberg Signed-off-by: Mika Westerberg --- drivers/thunderbolt/switch.c | 35 ++++++++ drivers/thunderbolt/tb.c | 153 ++++++++++++++++++++++++++------ drivers/thunderbolt/tb.h | 15 ++++ drivers/thunderbolt/tb_regs.h | 9 +- drivers/thunderbolt/tunnel.c | 158 +++++++++++++++++++++++++++++++++- drivers/thunderbolt/tunnel.h | 9 ++ drivers/thunderbolt/usb4.c | 41 ++++++++- 7 files changed, 393 insertions(+), 27 deletions(-) diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index 58e3f54ddbb9..5a3236fefb76 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -1025,11 +1025,46 @@ bool tb_port_is_enabled(struct tb_port *port) case TB_TYPE_DP_HDMI_OUT: return tb_dp_port_is_enabled(port); + case TB_TYPE_USB_UP: + case TB_TYPE_USB_DOWN: + return tb_usb_port_is_enabled(port); + default: return false; } } +/** + * tb_usb_port_is_enabled() - Is the USB adapter port enabled + * @port: USB port to check + */ +bool tb_usb_port_is_enabled(struct tb_port *port) +{ + u32 data; + + if (tb_port_read(port, &data, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_0, 1)) + return false; + + return !!(data & ADP_USB3_CS_0_PE); +} + +/** + * tb_usb_port_enable() - Enable USB adapter port + * @port: USB port to enable + * @enable: Enable/disable the USB adapter + */ +int tb_usb_port_enable(struct tb_port *port, bool enable) +{ + u32 word = enable ? (ADP_USB3_CS_0_PE | ADP_USB3_CS_0_V) + : ADP_USB3_CS_0_V; + + if (!port->cap_adap) + return -ENXIO; + return tb_port_write(port, &word, TB_CFG_PORT, + port->cap_adap + ADP_USB3_CS_0, 1); +} + /** * tb_pci_port_is_enabled() - Is the PCIe adapter port enabled * @port: PCIe port to check diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index f2868c125637..6c468ba96e9a 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -116,6 +116,10 @@ static void tb_discover_tunnels(struct tb_switch *sw) tunnel = tb_tunnel_discover_pci(tb, port); break; + case TB_TYPE_USB_DOWN: + tunnel = tb_tunnel_discover_usb(tb, port); + break; + default: break; } @@ -180,6 +184,117 @@ static int tb_enable_tmu(struct tb_switch *sw) return tb_switch_tmu_enable(sw); } +/** + * tb_find_unused_port() - return the first inactive port on @sw + * @sw: Switch to find the port on + * @type: Port type to look for + */ +static struct tb_port *tb_find_unused_port(struct tb_switch *sw, + enum tb_port_type type) +{ + int i; + + tb_switch_for_each_port(sw, i) { + if (tb_is_upstream_port(&sw->ports[i])) + continue; + if (sw->ports[i].config.type != type) + continue; + if (!sw->ports[i].cap_adap) + continue; + if (tb_port_is_enabled(&sw->ports[i])) + continue; + return &sw->ports[i]; + } + return NULL; +} + +static struct tb_port *tb_find_usb_down(struct tb_switch *sw, + const struct tb_port *port) +{ + struct tb_port *down; + + down = usb4_switch_map_usb_down(sw, port); + if (down) { + if (WARN_ON(!tb_port_is_usb_down(down))) + goto out; + if (WARN_ON(tb_usb_port_is_enabled(down))) + goto out; + + return down; + } + +out: + return tb_find_unused_port(sw, TB_TYPE_USB_DOWN); +} + +static int tb_tunnel_usb(struct tb *tb, struct tb_switch *sw) +{ + struct tb_switch *parent = tb_switch_parent(sw); + struct tb_port *up, *down, *port; + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel; + + up = tb_switch_find_port(sw, TB_TYPE_USB_UP); + if (!up) + return 0; + + /* + * Look up available down port. Since we are chaining it should + * be found right above this switch. + */ + port = tb_port_at(tb_route(sw), parent); + down = tb_find_usb_down(parent, port); + if (!down) + return 0; + + if (tb_route(parent)) { + struct tb_port *parent_up; + /* + * Check first that the parent switch has its upstream + * USB port enabled. Otherwise the chain is not complete + * and there is no point setting up a new tunnel. + */ + parent_up = tb_switch_find_port(parent, TB_TYPE_USB_UP); + if (!parent_up || !tb_port_is_enabled(parent_up)) + return 0; + } + + tunnel = tb_tunnel_alloc_usb(tb, up, down); + if (!tunnel) + return -ENOMEM; + + if (tb_tunnel_activate(tunnel)) { + tb_port_info(up, + "USB tunnel activation failed, aborting\n"); + tb_tunnel_free(tunnel); + return -EIO; + } + + list_add_tail(&tunnel->list, &tcm->tunnel_list); + return 0; +} + +static int tb_create_usb_tunnels(struct tb_switch *sw) +{ + int i, ret; + + if (tb_route(sw)) { + ret = tb_tunnel_usb(sw->tb, sw); + if (ret) + return ret; + } + + tb_switch_for_each_remote_port(sw, i) { + struct tb_port *port = &sw->ports[i]; + + ret = tb_create_usb_tunnels(port->remote->sw); + if (ret) + return ret; + } + + return 0; +} + static void tb_scan_port(struct tb_port *port); /** @@ -285,6 +400,15 @@ static void tb_scan_port(struct tb_port *port) if (tb_enable_tmu(sw)) tb_sw_warn(sw, "failed to enable TMU\n"); + /* + * Create USB tunnels only when the switch is plugged to the + * domain. This is because we scan the domain also during + * discovery and want to discover existing USB tunnels before we + * create any new. + */ + if (tcm->hotplug_active && tb_tunnel_usb(sw->tb, sw)) + tb_sw_warn(sw, "USB tunnel creation failed\n"); + tb_scan_switch(sw); } @@ -365,30 +489,6 @@ static void tb_free_unplugged_children(struct tb_switch *sw) } } -/** - * tb_find_unused_port() - return the first inactive port on @sw - * @sw: Switch to find the port on - * @type: Port type to look for - */ -static struct tb_port *tb_find_unused_port(struct tb_switch *sw, - enum tb_port_type type) -{ - int i; - - tb_switch_for_each_port(sw, i) { - if (tb_is_upstream_port(&sw->ports[i])) - continue; - if (sw->ports[i].config.type != type) - continue; - if (!sw->ports[i].cap_adap) - continue; - if (tb_port_is_enabled(&sw->ports[i])) - continue; - return &sw->ports[i]; - } - return NULL; -} - static struct tb_port *tb_find_pcie_down(struct tb_switch *sw, const struct tb_port *port) { @@ -890,6 +990,11 @@ static int tb_start(struct tb *tb) tb_scan_switch(tb->root_switch); /* Find out tunnels created by the boot firmware */ tb_discover_tunnels(tb->root_switch); + /* + * If the boot firmware did not create USB tunnels create them + * now for the whole topology. + */ + tb_create_usb_tunnels(tb->root_switch); /* Add DP IN resources for the root switch */ tb_add_dp_resources(tb->root_switch); /* Make the discovered switches available to the userspace */ diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 087fd6d6ef9a..fbeb8bab77e5 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -432,6 +432,16 @@ static inline bool tb_port_is_dpout(const struct tb_port *port) return port && port->config.type == TB_TYPE_DP_HDMI_OUT; } +static inline bool tb_port_is_usb_down(const struct tb_port *port) +{ + return port && port->config.type == TB_TYPE_USB_DOWN; +} + +static inline bool tb_port_is_usb_up(const struct tb_port *port) +{ + return port && port->config.type == TB_TYPE_USB_UP; +} + static inline int tb_sw_read(struct tb_switch *sw, void *buffer, enum tb_cfg_space space, u32 offset, u32 length) { @@ -760,6 +770,9 @@ int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap); int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap); bool tb_port_is_enabled(struct tb_port *port); +bool tb_usb_port_is_enabled(struct tb_port *port); +int tb_usb_port_enable(struct tb_port *port, bool enable); + bool tb_pci_port_is_enabled(struct tb_port *port); int tb_pci_port_enable(struct tb_port *port, bool enable); @@ -842,6 +855,8 @@ int usb4_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); int usb4_switch_dealloc_dp_resource(struct tb_switch *sw, struct tb_port *in); struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, const struct tb_port *port); +struct tb_port *usb4_switch_map_usb_down(struct tb_switch *sw, + const struct tb_port *port); int usb4_port_unlock(struct tb_port *port); #endif diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index ec1a5d1f7c94..062676229ac1 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -180,6 +180,7 @@ struct tb_regs_switch_header { #define ROUTER_CS_5_SLP BIT(0) #define ROUTER_CS_5_C3S BIT(23) #define ROUTER_CS_5_PTO BIT(24) +#define ROUTER_CS_5_UTO BIT(25) #define ROUTER_CS_5_HCO BIT(26) #define ROUTER_CS_5_CV BIT(31) #define ROUTER_CS_6 0x06 @@ -221,7 +222,8 @@ enum tb_port_type { TB_TYPE_DP_HDMI_OUT = 0x0e0102, TB_TYPE_PCIE_DOWN = 0x100101, TB_TYPE_PCIE_UP = 0x100102, - /* TB_TYPE_USB = 0x200000, lower order bits are not known */ + TB_TYPE_USB_DOWN = 0x200101, + TB_TYPE_USB_UP = 0x200102, }; /* Present on every port in TB_CF_PORT at address zero. */ @@ -331,6 +333,11 @@ struct tb_regs_port_header { #define ADP_PCIE_CS_0 0x00 #define ADP_PCIE_CS_0_PE BIT(31) +/* USB adapter registers */ +#define ADP_USB3_CS_0 0x00 +#define ADP_USB3_CS_0_V BIT(30) +#define ADP_USB3_CS_0_PE BIT(31) + /* Hop register from TB_CFG_HOPS. 8 byte per entry. */ struct tb_regs_hop { /* DWORD 0 */ diff --git a/drivers/thunderbolt/tunnel.c b/drivers/thunderbolt/tunnel.c index e8e387c4211f..299b77665d83 100644 --- a/drivers/thunderbolt/tunnel.c +++ b/drivers/thunderbolt/tunnel.c @@ -19,6 +19,12 @@ #define TB_PCI_PATH_DOWN 0 #define TB_PCI_PATH_UP 1 +/* USB adapters use always HopID of 8 for both directions */ +#define TB_USB_HOPID 8 + +#define TB_USB_PATH_DOWN 0 +#define TB_USB_PATH_UP 1 + /* DP adapters use HopID 8 for AUX and 9 for Video */ #define TB_DP_AUX_TX_HOPID 8 #define TB_DP_AUX_RX_HOPID 8 @@ -31,7 +37,7 @@ #define TB_DMA_PATH_OUT 0 #define TB_DMA_PATH_IN 1 -static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA" }; +static const char * const tb_tunnel_names[] = { "PCI", "DP", "DMA", "USB"}; #define __TB_TUNNEL_PRINT(level, tunnel, fmt, arg...) \ do { \ @@ -838,6 +844,156 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, return tunnel; } +static int tb_usb_activate(struct tb_tunnel *tunnel, bool activate) +{ + int res; + + res = tb_usb_port_enable(tunnel->src_port, activate); + if (res) + return res; + + if (tb_port_is_usb_up(tunnel->dst_port)) + return tb_usb_port_enable(tunnel->dst_port, activate); + + return 0; +} + +static void tb_usb_init_path(struct tb_path *path) +{ + path->egress_fc_enable = TB_PATH_SOURCE | TB_PATH_INTERNAL; + path->egress_shared_buffer = TB_PATH_NONE; + path->ingress_fc_enable = TB_PATH_ALL; + path->ingress_shared_buffer = TB_PATH_NONE; + path->priority = 3; + path->weight = 3; + path->drop_packages = 0; + path->nfc_credits = 0; + path->hops[0].initial_credits = 7; + path->hops[1].initial_credits = + tb_initial_credits(path->hops[1].in_port->sw); +} + +/** + * tb_tunnel_discover_usb() - Discover existing USB tunnels + * @tb: Pointer to the domain structure + * @down: USB downstream adapter + * + * If @down adapter is active, follows the tunnel to the USB upstream + * adapter and back. Returns the discovered tunnel or %NULL if there was + * no tunnel. + */ +struct tb_tunnel *tb_tunnel_discover_usb(struct tb *tb, struct tb_port *down) +{ + struct tb_tunnel *tunnel; + struct tb_path *path; + + if (!tb_usb_port_is_enabled(down)) + return NULL; + + tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB); + if (!tunnel) + return NULL; + + tunnel->activate = tb_usb_activate; + tunnel->src_port = down; + + /* + * Discover both paths even if they are not complete. We will + * clean them up by calling tb_tunnel_deactivate() below in that + * case. + */ + path = tb_path_discover(down, TB_USB_HOPID, NULL, -1, + &tunnel->dst_port, "USB Up"); + if (!path) { + /* Just disable the downstream port */ + tb_usb_port_enable(down, false); + goto err_free; + } + tunnel->paths[TB_USB_PATH_UP] = path; + tb_usb_init_path(tunnel->paths[TB_USB_PATH_UP]); + + path = tb_path_discover(tunnel->dst_port, -1, down, TB_USB_HOPID, NULL, + "USB Down"); + if (!path) + goto err_deactivate; + tunnel->paths[TB_USB_PATH_DOWN] = path; + tb_usb_init_path(tunnel->paths[TB_USB_PATH_DOWN]); + + /* Validate that the tunnel is complete */ + if (!tb_port_is_usb_up(tunnel->dst_port)) { + tb_port_warn(tunnel->dst_port, + "path does not end on an USB adapter, cleaning up\n"); + goto err_deactivate; + } + + if (down != tunnel->src_port) { + tb_tunnel_warn(tunnel, "path is not complete, cleaning up\n"); + goto err_deactivate; + } + + if (!tb_usb_port_is_enabled(tunnel->dst_port)) { + tb_tunnel_warn(tunnel, + "tunnel is not fully activated, cleaning up\n"); + goto err_deactivate; + } + + tb_tunnel_dbg(tunnel, "discovered\n"); + return tunnel; + +err_deactivate: + tb_tunnel_deactivate(tunnel); +err_free: + tb_tunnel_free(tunnel); + + return NULL; +} + +/** + * tb_tunnel_alloc_usb() - allocate a USB tunnel + * @tb: Pointer to the domain structure + * @up: USB upstream adapter port + * @down: USB downstream adapter port + * + * Allocate an USB tunnel. The ports must be of type @TB_TYPE_USB_UP and + * @TB_TYPE_USB_DOWN. + * + * Return: Returns a tb_tunnel on success or %NULL on failure. + */ +struct tb_tunnel *tb_tunnel_alloc_usb(struct tb *tb, struct tb_port *up, + struct tb_port *down) +{ + struct tb_tunnel *tunnel; + struct tb_path *path; + + tunnel = tb_tunnel_alloc(tb, 2, TB_TUNNEL_USB); + if (!tunnel) + return NULL; + + tunnel->activate = tb_usb_activate; + tunnel->src_port = down; + tunnel->dst_port = up; + + path = tb_path_alloc(tb, down, TB_USB_HOPID, up, TB_USB_HOPID, 0, + "USB Down"); + if (!path) { + tb_tunnel_free(tunnel); + return NULL; + } + tb_usb_init_path(path); + tunnel->paths[TB_USB_PATH_DOWN] = path; + + path = tb_path_alloc(tb, up, TB_USB_HOPID, down, TB_USB_HOPID, 0, + "USB Up"); + if (!path) { + tb_tunnel_free(tunnel); + return NULL; + } + tb_usb_init_path(path); + tunnel->paths[TB_USB_PATH_UP] = path; + + return tunnel; +} + /** * tb_tunnel_free() - free a tunnel * @tunnel: Tunnel to be freed diff --git a/drivers/thunderbolt/tunnel.h b/drivers/thunderbolt/tunnel.h index ba888da005f5..83c6723fe8d8 100644 --- a/drivers/thunderbolt/tunnel.h +++ b/drivers/thunderbolt/tunnel.h @@ -15,6 +15,7 @@ enum tb_tunnel_type { TB_TUNNEL_PCI, TB_TUNNEL_DP, TB_TUNNEL_DMA, + TB_TUNNEL_USB, }; /** @@ -57,6 +58,9 @@ struct tb_tunnel *tb_tunnel_alloc_dma(struct tb *tb, struct tb_port *nhi, struct tb_port *dst, int transmit_ring, int transmit_path, int receive_ring, int receive_path); +struct tb_tunnel *tb_tunnel_discover_usb(struct tb *tb, struct tb_port *down); +struct tb_tunnel *tb_tunnel_alloc_usb(struct tb *tb, struct tb_port *up, + struct tb_port *down); void tb_tunnel_free(struct tb_tunnel *tunnel); int tb_tunnel_activate(struct tb_tunnel *tunnel); @@ -82,5 +86,10 @@ static inline bool tb_tunnel_is_dma(const struct tb_tunnel *tunnel) return tunnel->type == TB_TUNNEL_DMA; } +static inline bool tb_tunnel_is_usb(const struct tb_tunnel *tunnel) +{ + return tunnel->type == TB_TUNNEL_USB; +} + #endif diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 4b0997292b43..24f60f7523b8 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -226,10 +226,19 @@ int usb4_switch_setup(struct tb_switch *sw) parent = tb_switch_parent(sw); + if (tb_switch_find_port(parent, TB_TYPE_USB_DOWN)) { + val |= ROUTER_CS_5_UTO; + xhci = false; + } + /* Only enable PCIe tunneling if the parent router supports it */ if (tb_switch_find_port(parent, TB_TYPE_PCIE_DOWN)) { val |= ROUTER_CS_5_PTO; - /* xHCI can be enabled if PCIe tunneling is supported */ + /* + * xHCI can be enabled if PCIe tunneling is supported + * and the parent does not have any USB dowstream + * adapters (so we cannot do USB tunneling). + */ if (xhci & ROUTER_CS_6_HCI) val |= ROUTER_CS_5_HCO; } @@ -701,6 +710,36 @@ struct tb_port *usb4_switch_map_pcie_down(struct tb_switch *sw, return NULL; } +/** + * usb4_switch_map_usb_down() - Map USB4 port to a USB downstream adapter + * @sw: USB4 router + * @port: USB4 port + * + * USB4 routers have direct mapping between USB4 ports and USB 3.x + * downstream adapters where the USB 3.x topology is extended. This + * function returns the corresponding downstream USB 3.x adapter or + * %NULL if no such mapping was possible. + */ +struct tb_port *usb4_switch_map_usb_down(struct tb_switch *sw, + const struct tb_port *port) +{ + int usb4_idx = usb4_port_idx(sw, port); + int i, usb_idx = 0; + + /* Find USB down port matching usb4_port */ + tb_switch_for_each_port(sw, i) { + if (tb_port_is_usb_down(&sw->ports[i])) { + if (usb_idx == usb4_idx && + !tb_usb_port_is_enabled(&sw->ports[i])) { + return &sw->ports[i]; + } + usb_idx++; + } + } + + return NULL; +} + /** * usb4_port_unlock() - Unlock USB4 downstream port * @port: USB4 port to unlock From patchwork Tue Oct 1 11:38:29 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168521 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id BF5291747 for ; Tue, 1 Oct 2019 11:39:03 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id A750521906 for ; Tue, 1 Oct 2019 11:39:03 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732887AbfJALjC (ORCPT ); Tue, 1 Oct 2019 07:39:02 -0400 Received: from mga02.intel.com ([134.134.136.20]:39078 "EHLO mga02.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732674AbfJALim (ORCPT ); Tue, 1 Oct 2019 07:38:42 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga007.fm.intel.com ([10.253.24.52]) by orsmga101.jf.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:42 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="191434989" Received: from black.fi.intel.com ([10.237.72.28]) by fmsmga007.fm.intel.com with ESMTP; 01 Oct 2019 04:38:39 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id CA452720; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 21/22] thunderbolt: Update documentation with the USB4 information Date: Tue, 1 Oct 2019 14:38:29 +0300 Message-Id: <20191001113830.13028-22-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org Update user's and administrator's guide to mention USB4, how it relates to Thunderbolt (it is public spec of Thunderbolt 3) and and how it is supported in Linux. Signed-off-by: Mika Westerberg --- Documentation/admin-guide/thunderbolt.rst | 27 ++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/Documentation/admin-guide/thunderbolt.rst b/Documentation/admin-guide/thunderbolt.rst index 898ad78f3cc7..4cbed319133d 100644 --- a/Documentation/admin-guide/thunderbolt.rst +++ b/Documentation/admin-guide/thunderbolt.rst @@ -1,6 +1,25 @@ -============= - Thunderbolt -============= +====================== + Thunderbolt and USB4 +====================== +USB4 is the public spec of Thunderbolt 3 with some differences at the +register level among other things. There are two different +implementations available: firmware connection manager and software +connection manager. Typically PCs come with a firmware connection +manager for Thunderbolt 3 and early USB4 capable systems. Apple systems +on the other hand use software connection manager and the future USB4 +compliant PCs follow the suit. + +The Linux Thunderbolt driver supports both and can detect at runtime +which connection manager implementation is to be used. To be on the safe +side the software connection manager in Linux also advertises security +level ``user`` which means PCIe tunneling is disabled by default. The +documentation below applies to both implementations with the exception +that the software connection manager only supports ``user`` security +level and is expected to be accompanied with an IOMMU based DMA +protection. + +Security levels and how to use them +----------------------------------- The interface presented here is not meant for end users. Instead there should be a userspace tool that handles all the low-level details, keeps a database of the authorized devices and prompts users for new connections. @@ -18,8 +37,6 @@ This will authorize all devices automatically when they appear. However, keep in mind that this bypasses the security levels and makes the system vulnerable to DMA attacks. -Security levels and how to use them ------------------------------------ Starting with Intel Falcon Ridge Thunderbolt controller there are 4 security levels available. Intel Titan Ridge added one more security level (usbonly). The reason for these is the fact that the connected devices can From patchwork Tue Oct 1 11:38:30 2019 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Mika Westerberg X-Patchwork-Id: 11168525 Return-Path: Received: from mail.kernel.org (pdx-korg-mail-1.web.codeaurora.org [172.30.200.123]) by pdx-korg-patchwork-2.web.codeaurora.org (Postfix) with ESMTP id CE1CB14DB for ; Tue, 1 Oct 2019 11:39:19 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id B648721906 for ; Tue, 1 Oct 2019 11:39:19 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1732877AbfJALjC (ORCPT ); Tue, 1 Oct 2019 07:39:02 -0400 Received: from mga12.intel.com ([192.55.52.136]:17385 "EHLO mga12.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1732685AbfJALin (ORCPT ); Tue, 1 Oct 2019 07:38:43 -0400 X-Amp-Result: SKIPPED(no attachment in message) X-Amp-File-Uploaded: False Received: from fmsmga002.fm.intel.com ([10.253.24.26]) by fmsmga106.fm.intel.com with ESMTP/TLS/DHE-RSA-AES256-GCM-SHA384; 01 Oct 2019 04:38:42 -0700 X-ExtLoop1: 1 X-IronPort-AV: E=Sophos;i="5.64,571,1559545200"; d="scan'208";a="220968935" Received: from black.fi.intel.com ([10.237.72.28]) by fmsmga002.fm.intel.com with ESMTP; 01 Oct 2019 04:38:39 -0700 Received: by black.fi.intel.com (Postfix, from userid 1001) id D4A8577A; Tue, 1 Oct 2019 14:38:31 +0300 (EEST) From: Mika Westerberg To: linux-usb@vger.kernel.org Cc: Andreas Noever , Michael Jamet , Mika Westerberg , Yehezkel Bernat , Rajmohan Mani , Nicholas Johnson , Lukas Wunner , Greg Kroah-Hartman , Alan Stern , Mario.Limonciello@dell.com, Anthony Wong , linux-kernel@vger.kernel.org Subject: [RFC PATCH 22/22] thunderbolt: Do not start firmware unless asked by the user Date: Tue, 1 Oct 2019 14:38:30 +0300 Message-Id: <20191001113830.13028-23-mika.westerberg@linux.intel.com> X-Mailer: git-send-email 2.23.0 In-Reply-To: <20191001113830.13028-1-mika.westerberg@linux.intel.com> References: <20191001113830.13028-1-mika.westerberg@linux.intel.com> MIME-Version: 1.0 Sender: linux-usb-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-usb@vger.kernel.org Since now we can do pretty much the same thing in the software connection manager than the firmware would do, there is no point starting it by default. Instead we can just continue using the software connection manager. Make it possible for user to switch between the two by adding a module pararameter (start_icm) which is by default false. Having this ability to enable the firmware may be useful at least when debugging possible issues with the software connection manager implementation. Signed-off-by: Mika Westerberg --- drivers/thunderbolt/icm.c | 14 +++++++++++--- drivers/thunderbolt/tb.c | 4 ---- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index 9c9c6ea2b790..c4a2de0f2a44 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -11,6 +11,7 @@ #include #include +#include #include #include #include @@ -43,6 +44,10 @@ #define ICM_APPROVE_TIMEOUT 10000 /* ms */ #define ICM_MAX_LINK 4 +static bool start_icm; +module_param(start_icm, bool, 0444); +MODULE_PARM_DESC(start_icm, "start ICM firmware if it is not running (default: false)"); + /** * struct icm - Internal connection manager private data * @request_lock: Makes sure only one message is send to ICM at time @@ -1353,13 +1358,16 @@ static bool icm_ar_is_supported(struct tb *tb) { struct pci_dev *upstream_port; struct icm *icm = tb_priv(tb); + u32 val; /* * Starting from Alpine Ridge we can use ICM on Apple machines * as well. We just need to reset and re-enable it first. + * However, only start it if explicitly asked by the user. */ - if (!x86_apple_machine) - return true; + val = ioread32(tb->nhi->iobase + REG_FW_STS); + if (!(val & REG_FW_STS_ICM_EN) && !start_icm) + return false; /* * Find the upstream PCIe port in case we need to do reset @@ -2224,7 +2232,7 @@ struct tb *icm_probe(struct tb_nhi *nhi) case PCI_DEVICE_ID_INTEL_ICL_NHI0: case PCI_DEVICE_ID_INTEL_ICL_NHI1: - icm->is_supported = icm_ar_is_supported; + icm->is_supported = icm_fr_is_supported; icm->driver_ready = icm_icl_driver_ready; icm->set_uuid = icm_icl_set_uuid; icm->device_connected = icm_icl_device_connected; diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index 6c468ba96e9a..aebf2c10aa85 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -9,7 +9,6 @@ #include #include #include -#include #include "tb.h" #include "tb_regs.h" @@ -1117,9 +1116,6 @@ struct tb *tb_probe(struct tb_nhi *nhi) struct tb_cm *tcm; struct tb *tb; - if (!x86_apple_machine) - return NULL; - tb = tb_domain_alloc(nhi, sizeof(*tcm)); if (!tb) return NULL;