diff mbox series

[net-next,v18,02/13] rtase: Implement the .ndo_open function

Message ID 20240508123945.201524-3-justinlai0215@realtek.com (mailing list archive)
State Superseded
Delegated to: Netdev Maintainers
Headers show
Series Add Realtek automotive PCIe driver | expand

Checks

Context Check Description
netdev/series_format success Posting correctly formatted
netdev/tree_selection success Clearly marked for net-next, async
netdev/ynl success Generated files up to date; no warnings/errors; no diff in generated;
netdev/fixes_present success Fixes tag not required for -next series
netdev/header_inline success No static functions without inline keyword in header files
netdev/build_32bit success Errors and warnings before: 8 this patch: 8
netdev/build_tools success No tools touched, skip
netdev/cc_maintainers success CCed 5 of 5 maintainers
netdev/build_clang success Errors and warnings before: 8 this patch: 8
netdev/verify_signedoff success Signed-off-by tag matches author and committer
netdev/deprecated_api success None detected
netdev/check_selftest success No net selftest shell script
netdev/verify_fixes success No Fixes tag
netdev/build_allmodconfig_warn success Errors and warnings before: 8 this patch: 8
netdev/checkpatch success total: 0 errors, 0 warnings, 0 checks, 444 lines checked
netdev/build_clang_rust success No Rust files in patch. Skipping build
netdev/kdoc success Errors and warnings before: 0 this patch: 0
netdev/source_inline success Was 0 now: 0

Commit Message

Justin Lai May 8, 2024, 12:39 p.m. UTC
Implement the .ndo_open function to set default hardware settings
and initialize the descriptor ring and interrupts. Among them,
when requesting irq, because the first group of interrupts needs to
process more events, the overall structure will be different from
other groups of interrupts, so it needs to be processed separately.

Signed-off-by: Justin Lai <justinlai0215@realtek.com>
---
 .../net/ethernet/realtek/rtase/rtase_main.c   | 420 ++++++++++++++++++
 1 file changed, 420 insertions(+)

Comments

Ratheesh Kannoth May 9, 2024, 6:57 a.m. UTC | #1
On 2024-05-08 at 18:09:34, Justin Lai (justinlai0215@realtek.com) wrote:
>
> +static int rtase_alloc_desc(struct rtase_private *tp)
> +{
> +	struct pci_dev *pdev = tp->pdev;
> +	u32 i;
> +
> +	/* rx and tx descriptors needs 256 bytes alignment.
> +	 * dma_alloc_coherent provides more.
> +	 */
> +	for (i = 0; i < tp->func_tx_queue_num; i++) {
> +		tp->tx_ring[i].desc =
> +				dma_alloc_coherent(&pdev->dev,
> +						   RTASE_TX_RING_DESC_SIZE,
> +						   &tp->tx_ring[i].phy_addr,
> +						   GFP_KERNEL);
> +		if (!tp->tx_ring[i].desc)
You have handled errors gracefully very where else. why not here ?
> +			return -ENOMEM;
> +	}
> +
> +	for (i = 0; i < tp->func_rx_queue_num; i++) {
> +		tp->rx_ring[i].desc =
> +				dma_alloc_coherent(&pdev->dev,
> +						   RTASE_RX_RING_DESC_SIZE,
> +						   &tp->rx_ring[i].phy_addr,
> +						   GFP_KERNEL);
> +		if (!tp->rx_ring[i].desc)
> +			return -ENOMEM;
> +	}
> +
> +	return 0;
> +}
> +
> +static void rtase_free_desc(struct rtase_private *tp)
> +{
> +	struct pci_dev *pdev = tp->pdev;
> +	u32 i;
> +
> +	for (i = 0; i < tp->func_tx_queue_num; i++) {
> +		if (!tp->tx_ring[i].desc)
> +			continue;
> +
> +		dma_free_coherent(&pdev->dev, RTASE_TX_RING_DESC_SIZE,
> +				  tp->tx_ring[i].desc,
> +				  tp->tx_ring[i].phy_addr);
> +		tp->tx_ring[i].desc = NULL;
> +	}
> +
> +	for (i = 0; i < tp->func_rx_queue_num; i++) {
> +		if (!tp->rx_ring[i].desc)
> +			continue;
> +
> +		dma_free_coherent(&pdev->dev, RTASE_RX_RING_DESC_SIZE,
> +				  tp->rx_ring[i].desc,
> +				  tp->rx_ring[i].phy_addr);
> +		tp->rx_ring[i].desc = NULL;
> +	}
> +}
> +
> +static void rtase_mark_to_asic(union rtase_rx_desc *desc, u32 rx_buf_sz)
> +{
> +	u32 eor = le32_to_cpu(desc->desc_cmd.opts1) & RTASE_RING_END;
> +
> +	desc->desc_status.opts2 = 0;
desc->desc_cmd.addr to be written before desc->desc_status.opts2 ? Just a question
whether below dma_wmb() suffice for both ?
> +	/* force memory writes to complete before releasing descriptor */
> +	dma_wmb();
> +	WRITE_ONCE(desc->desc_cmd.opts1,
> +		   cpu_to_le32(RTASE_DESC_OWN | eor | rx_buf_sz));
> +}
> +
> +static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx)
> +{
> +	struct rtase_ring *ring = &tp->tx_ring[idx];
> +	struct rtase_tx_desc *desc;
> +	u32 i;
> +
> +	memset(ring->desc, 0x0, RTASE_TX_RING_DESC_SIZE);
> +	memset(ring->skbuff, 0x0, sizeof(ring->skbuff));
> +	ring->cur_idx = 0;
> +	ring->dirty_idx = 0;
> +	ring->index = idx;
> +
> +	for (i = 0; i < RTASE_NUM_DESC; i++) {
> +		ring->mis.len[i] = 0;
> +		if ((RTASE_NUM_DESC - 1) == i) {
> +			desc = ring->desc + sizeof(struct rtase_tx_desc) * i;
> +			desc->opts1 = cpu_to_le32(RTASE_RING_END);
> +		}
> +	}
> +
> +	ring->ring_handler = tx_handler;
> +	if (idx < 4) {
> +		ring->ivec = &tp->int_vector[idx];
> +		list_add_tail(&ring->ring_entry,
> +			      &tp->int_vector[idx].ring_list);
> +	} else {
> +		ring->ivec = &tp->int_vector[0];
> +		list_add_tail(&ring->ring_entry, &tp->int_vector[0].ring_list);
> +	}
> +}
> +
> +static void rtase_map_to_asic(union rtase_rx_desc *desc, dma_addr_t mapping,
> +			      u32 rx_buf_sz)
> +{
> +	desc->desc_cmd.addr = cpu_to_le64(mapping);
> +	/* make sure the physical address has been updated */
> +	wmb();
why not dma_wmb();
> +	rtase_mark_to_asic(desc, rx_buf_sz);
> +}
> +
> +static void rtase_make_unusable_by_asic(union rtase_rx_desc *desc)
> +{
> +	desc->desc_cmd.addr = cpu_to_le64(RTK_MAGIC_NUMBER);
> +	desc->desc_cmd.opts1 &= ~cpu_to_le32(RTASE_DESC_OWN | RSVD_MASK);
> +}
> +
> +static int rtase_alloc_rx_skb(const struct rtase_ring *ring,
> +			      struct sk_buff **p_sk_buff,
> +			      union rtase_rx_desc *desc,
> +			      dma_addr_t *rx_phy_addr, u8 in_intr)
> +{
> +	struct rtase_int_vector *ivec = ring->ivec;
> +	const struct rtase_private *tp = ivec->tp;
> +	struct sk_buff *skb = NULL;
> +	dma_addr_t mapping;
> +	struct page *page;
> +	void *buf_addr;
> +	int ret = 0;
> +
> +	page = page_pool_dev_alloc_pages(tp->page_pool);
> +	if (!page) {
> +		netdev_err(tp->dev, "failed to alloc page\n");
> +		goto err_out;
> +	}
> +
> +	buf_addr = page_address(page);
> +	mapping = page_pool_get_dma_addr(page);
> +
> +	skb = build_skb(buf_addr, PAGE_SIZE);
> +	if (!skb) {
> +		page_pool_put_full_page(tp->page_pool, page, true);
> +		netdev_err(tp->dev, "failed to build skb\n");
> +		goto err_out;
> +	}
Did you mark the skb for recycle ? Hmm ... did i miss to find the code ?

> +
> +	*p_sk_buff = skb;
> +	*rx_phy_addr = mapping;
> +	rtase_map_to_asic(desc, mapping, tp->rx_buf_sz);
> +
> +	return ret;
> +
> +err_out:
> +	if (skb)
> +		dev_kfree_skb(skb);
> +
> +	ret = -ENOMEM;
> +	rtase_make_unusable_by_asic(desc);
> +
> +	return ret;
> +}
> +
> +static u32 rtase_rx_ring_fill(struct rtase_ring *ring, u32 ring_start,
> +			      u32 ring_end, u8 in_intr)
> +{
> +	union rtase_rx_desc *desc_base = ring->desc;
> +	u32 cur;
> +
> +	for (cur = ring_start; ring_end - cur > 0; cur++) {
> +		u32 i = cur % RTASE_NUM_DESC;
> +		union rtase_rx_desc *desc = desc_base + i;
> +		int ret;
> +
> +		if (ring->skbuff[i])
> +			continue;
> +
> +		ret = rtase_alloc_rx_skb(ring, &ring->skbuff[i], desc,
> +					 &ring->mis.data_phy_addr[i],
> +					 in_intr);
> +		if (ret)
> +			break;
> +	}
> +
> +	return cur - ring_start;
> +}
> +
> +static void rtase_mark_as_last_descriptor(union rtase_rx_desc *desc)
> +{
> +	desc->desc_cmd.opts1 |= cpu_to_le32(RTASE_RING_END);
> +}
> +
> +static void rtase_rx_ring_clear(struct rtase_ring *ring)
> +{
> +	union rtase_rx_desc *desc;
> +	u32 i;
> +
> +	for (i = 0; i < RTASE_NUM_DESC; i++) {
> +		desc = ring->desc + sizeof(union rtase_rx_desc) * i;
> +
> +		if (!ring->skbuff[i])
> +			continue;
> +
> +		skb_mark_for_recycle(ring->skbuff[i]);
> +
> +		dev_kfree_skb(ring->skbuff[i]);
> +
> +		ring->skbuff[i] = NULL;
> +
> +		rtase_make_unusable_by_asic(desc);
> +	}
> +}
> +
> +static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
> +{
> +	struct rtase_ring *ring = &tp->rx_ring[idx];
> +	u16 i;
> +
> +	memset(ring->desc, 0x0, RTASE_RX_RING_DESC_SIZE);
> +	memset(ring->skbuff, 0x0, sizeof(ring->skbuff));
> +	ring->cur_idx = 0;
> +	ring->dirty_idx = 0;
> +	ring->index = idx;
> +
> +	for (i = 0; i < RTASE_NUM_DESC; i++)
> +		ring->mis.data_phy_addr[i] = 0;
> +
> +	ring->ring_handler = rx_handler;
> +	ring->ivec = &tp->int_vector[idx];
> +	list_add_tail(&ring->ring_entry, &tp->int_vector[idx].ring_list);
> +}
> +
> +static void rtase_rx_clear(struct rtase_private *tp)
> +{
> +	u32 i;
> +
> +	for (i = 0; i < tp->func_rx_queue_num; i++)
> +		rtase_rx_ring_clear(&tp->rx_ring[i]);
> +
> +	page_pool_destroy(tp->page_pool);
> +	tp->page_pool = NULL;
> +}
> +
> +static int rtase_init_ring(const struct net_device *dev)
> +{
> +	struct rtase_private *tp = netdev_priv(dev);
> +	struct page_pool_params pp_params = { 0 };
> +	struct page_pool *page_pool;
> +	u32 num;
> +	u16 i;
> +
> +	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
> +	pp_params.order = 0;
> +	pp_params.pool_size = RTASE_NUM_DESC * tp->func_rx_queue_num;
> +	pp_params.nid = dev_to_node(&tp->pdev->dev);
> +	pp_params.dev = &tp->pdev->dev;
> +	pp_params.dma_dir = DMA_FROM_DEVICE;
> +	pp_params.max_len = PAGE_SIZE;
> +	pp_params.offset = 0;
> +
> +	page_pool = page_pool_create(&pp_params);
> +	if (IS_ERR(page_pool)) {
> +		netdev_err(tp->dev, "failed to create page pool\n");
> +		return -ENOMEM;
> +	}
> +
> +	tp->page_pool = page_pool;
> +
> +	for (i = 0; i < tp->func_tx_queue_num; i++)
> +		rtase_tx_desc_init(tp, i);
> +
> +	for (i = 0; i < tp->func_rx_queue_num; i++) {
> +		rtase_rx_desc_init(tp, i);
> +		num = rtase_rx_ring_fill(&tp->rx_ring[i], 0,
> +					 RTASE_NUM_DESC, 0);
> +		if (num != RTASE_NUM_DESC)
> +			goto err_out;
> +
> +		rtase_mark_as_last_descriptor(tp->rx_ring[i].desc +
> +					      sizeof(union rtase_rx_desc) *
> +					      (RTASE_NUM_DESC - 1));
> +	}
> +
> +	return 0;
> +
> +err_out:
> +	rtase_rx_clear(tp);
> +	return -ENOMEM;
> +}
> +
>  static void rtase_tally_counter_clear(const struct rtase_private *tp)
>  {
>  	u32 cmd = lower_32_bits(tp->tally_paddr);
> @@ -138,6 +424,130 @@ static void rtase_tally_counter_clear(const struct rtase_private *tp)
>  	rtase_w32(tp, RTASE_DTCCR0, cmd | RTASE_COUNTER_RESET);
>  }
>
> +static void rtase_nic_enable(const struct net_device *dev)
> +{
> +	const struct rtase_private *tp = netdev_priv(dev);
> +	u16 rcr = rtase_r16(tp, RTASE_RX_CONFIG_1);
> +	u8 val;
> +
> +	rtase_w16(tp, RTASE_RX_CONFIG_1, rcr & ~RTASE_PCIE_RELOAD_EN);
> +	rtase_w16(tp, RTASE_RX_CONFIG_1, rcr | RTASE_PCIE_RELOAD_EN);
> +
> +	val = rtase_r8(tp, RTASE_CHIP_CMD);
> +	rtase_w8(tp, RTASE_CHIP_CMD, val | RTASE_TE | RTASE_RE);
> +
> +	val = rtase_r8(tp, RTASE_MISC);
> +	rtase_w8(tp, RTASE_MISC, val & ~RTASE_RX_DV_GATE_EN);
> +}
> +
> +static void rtase_enable_hw_interrupt(const struct rtase_private *tp)
> +{
> +	const struct rtase_int_vector *ivec = &tp->int_vector[0];
> +	u32 i;
> +
> +	rtase_w32(tp, ivec->imr_addr, ivec->imr);
> +
> +	for (i = 1; i < tp->int_nums; i++) {
> +		ivec = &tp->int_vector[i];
> +		rtase_w16(tp, ivec->imr_addr, ivec->imr);
> +	}
> +}
> +
> +static void rtase_hw_start(const struct net_device *dev)
> +{
> +	const struct rtase_private *tp = netdev_priv(dev);
> +
> +	rtase_nic_enable(dev);
> +	rtase_enable_hw_interrupt(tp);
> +}
> +
> +static int rtase_open(struct net_device *dev)
> +{
> +	struct rtase_private *tp = netdev_priv(dev);
> +	const struct pci_dev *pdev = tp->pdev;
> +	struct rtase_int_vector *ivec;
> +	u16 i = 0, j;
> +	int ret;
> +
> +	ivec = &tp->int_vector[0];
> +	tp->rx_buf_sz = RTASE_RX_BUF_SIZE;
> +
> +	ret = rtase_alloc_desc(tp);
> +	if (ret)
> +		goto err_free_all_allocated_mem;
> +
> +	ret = rtase_init_ring(dev);
> +	if (ret)
> +		goto err_free_all_allocated_mem;
> +
> +	rtase_hw_config(dev);
> +
> +	if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) {
> +		ret = request_irq(ivec->irq, rtase_interrupt, 0,
> +				  dev->name, ivec);
> +		if (ret)
> +			goto err_free_all_allocated_irq;
> +
> +		/* request other interrupts to handle multiqueue */
> +		for (i = 1; i < tp->int_nums; i++) {
> +			ivec = &tp->int_vector[i];
> +			snprintf(ivec->name, sizeof(ivec->name), "%s_int%i",
> +				 tp->dev->name, i);
> +			ret = request_irq(ivec->irq, rtase_q_interrupt, 0,
> +					  ivec->name, ivec);
> +			if (ret)
> +				goto err_free_all_allocated_irq;
> +		}
> +	} else {
> +		ret = request_irq(pdev->irq, rtase_interrupt, 0, dev->name,
> +				  ivec);
> +		if (ret)
> +			goto err_free_all_allocated_mem;
> +	}
> +
> +	rtase_hw_start(dev);
> +
> +	for (i = 0; i < tp->int_nums; i++) {
> +		ivec = &tp->int_vector[i];
> +		napi_enable(&ivec->napi);
> +	}
> +
> +	netif_carrier_on(dev);
> +	netif_wake_queue(dev);
> +
> +	return 0;
> +
> +err_free_all_allocated_irq:
You are allocating from i = 1, but freeing from j = 0;
> +	for (j = 0; j < i; j++)
> +		free_irq(tp->int_vector[j].irq, &tp->int_vector[j]);
> +
> +err_free_all_allocated_mem:
> +	rtase_free_desc(tp);
> +
> +	return ret;
> +}
> +
> +static int rtase_close(struct net_device *dev)
> +{
> +	struct rtase_private *tp = netdev_priv(dev);
> +	const struct pci_dev *pdev = tp->pdev;
> +	u32 i;
> +
> +	rtase_down(dev);
> +
> +	if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) {
> +		for (i = 0; i < tp->int_nums; i++)
> +			free_irq(tp->int_vector[i].irq, &tp->int_vector[i]);
> +
> +	} else {
> +		free_irq(pdev->irq, &tp->int_vector[0]);
> +	}
> +
> +	rtase_free_desc(tp);
> +
> +	return 0;
> +}
> +
>  static void rtase_enable_eem_write(const struct rtase_private *tp)
>  {
>  	u8 val;
> @@ -170,6 +580,11 @@ static void rtase_rar_set(const struct rtase_private *tp, const u8 *addr)
>  	rtase_w16(tp, RTASE_LBK_CTRL, RTASE_LBK_ATLD | RTASE_LBK_CLR);
>  }
>
> +static const struct net_device_ops rtase_netdev_ops = {
> +	.ndo_open = rtase_open,
> +	.ndo_stop = rtase_close,
> +};
> +
>  static void rtase_get_mac_address(struct net_device *dev)
>  {
>  	struct rtase_private *tp = netdev_priv(dev);
> @@ -190,6 +605,11 @@ static void rtase_get_mac_address(struct net_device *dev)
>  	rtase_rar_set(tp, dev->dev_addr);
>  }
>
> +static void rtase_init_netdev_ops(struct net_device *dev)
> +{
> +	dev->netdev_ops = &rtase_netdev_ops;
> +}
> +
>  static void rtase_reset_interrupt(struct pci_dev *pdev,
>  				  const struct rtase_private *tp)
>  {
> --
> 2.34.1
>
Justin Lai May 9, 2024, 8:58 a.m. UTC | #2
> 
> On 2024-05-08 at 18:09:34, Justin Lai (justinlai0215@realtek.com) wrote:
> >
> > +static int rtase_alloc_desc(struct rtase_private *tp) {
> > +     struct pci_dev *pdev = tp->pdev;
> > +     u32 i;
> > +
> > +     /* rx and tx descriptors needs 256 bytes alignment.
> > +      * dma_alloc_coherent provides more.
> > +      */
> > +     for (i = 0; i < tp->func_tx_queue_num; i++) {
> > +             tp->tx_ring[i].desc =
> > +                             dma_alloc_coherent(&pdev->dev,
> > +
> RTASE_TX_RING_DESC_SIZE,
> > +
> &tp->tx_ring[i].phy_addr,
> > +                                                GFP_KERNEL);
> > +             if (!tp->tx_ring[i].desc)
> You have handled errors gracefully very where else. why not here ?

I would like to ask you, are you referring to other places where there are
error description messages, but not here?

> > +                     return -ENOMEM;
> > +     }
> > +
> > +     for (i = 0; i < tp->func_rx_queue_num; i++) {
> > +             tp->rx_ring[i].desc =
> > +                             dma_alloc_coherent(&pdev->dev,
> > +
> RTASE_RX_RING_DESC_SIZE,
> > +
> &tp->rx_ring[i].phy_addr,
> > +                                                GFP_KERNEL);
> > +             if (!tp->rx_ring[i].desc)
> > +                     return -ENOMEM;
> > +     }
> > +
> > +     return 0;
> > +}
> > +
> > +static void rtase_free_desc(struct rtase_private *tp) {
> > +     struct pci_dev *pdev = tp->pdev;
> > +     u32 i;
> > +
> > +     for (i = 0; i < tp->func_tx_queue_num; i++) {
> > +             if (!tp->tx_ring[i].desc)
> > +                     continue;
> > +
> > +             dma_free_coherent(&pdev->dev,
> RTASE_TX_RING_DESC_SIZE,
> > +                               tp->tx_ring[i].desc,
> > +                               tp->tx_ring[i].phy_addr);
> > +             tp->tx_ring[i].desc = NULL;
> > +     }
> > +
> > +     for (i = 0; i < tp->func_rx_queue_num; i++) {
> > +             if (!tp->rx_ring[i].desc)
> > +                     continue;
> > +
> > +             dma_free_coherent(&pdev->dev,
> RTASE_RX_RING_DESC_SIZE,
> > +                               tp->rx_ring[i].desc,
> > +                               tp->rx_ring[i].phy_addr);
> > +             tp->rx_ring[i].desc = NULL;
> > +     }
> > +}
> > +
> > +static void rtase_mark_to_asic(union rtase_rx_desc *desc, u32
> > +rx_buf_sz) {
> > +     u32 eor = le32_to_cpu(desc->desc_cmd.opts1) & RTASE_RING_END;
> > +
> > +     desc->desc_status.opts2 = 0;
> desc->desc_cmd.addr to be written before desc->desc_status.opts2 ? Just
> desc->a question
> whether below dma_wmb() suffice for both ?

Thank you for your suggestion, this seems feasible,
I will modify it again.

> > +     /* force memory writes to complete before releasing descriptor */
> > +     dma_wmb();
> > +     WRITE_ONCE(desc->desc_cmd.opts1,
> > +                cpu_to_le32(RTASE_DESC_OWN | eor | rx_buf_sz)); }
> > +
> > +static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx) {
> > +     struct rtase_ring *ring = &tp->tx_ring[idx];
> > +     struct rtase_tx_desc *desc;
> > +     u32 i;
> > +
> > +     memset(ring->desc, 0x0, RTASE_TX_RING_DESC_SIZE);
> > +     memset(ring->skbuff, 0x0, sizeof(ring->skbuff));
> > +     ring->cur_idx = 0;
> > +     ring->dirty_idx = 0;
> > +     ring->index = idx;
> > +
> > +     for (i = 0; i < RTASE_NUM_DESC; i++) {
> > +             ring->mis.len[i] = 0;
> > +             if ((RTASE_NUM_DESC - 1) == i) {
> > +                     desc = ring->desc + sizeof(struct rtase_tx_desc) *
> i;
> > +                     desc->opts1 = cpu_to_le32(RTASE_RING_END);
> > +             }
> > +     }
> > +
> > +     ring->ring_handler = tx_handler;
> > +     if (idx < 4) {
> > +             ring->ivec = &tp->int_vector[idx];
> > +             list_add_tail(&ring->ring_entry,
> > +                           &tp->int_vector[idx].ring_list);
> > +     } else {
> > +             ring->ivec = &tp->int_vector[0];
> > +             list_add_tail(&ring->ring_entry,
> &tp->int_vector[0].ring_list);
> > +     }
> > +}
> > +
> > +static void rtase_map_to_asic(union rtase_rx_desc *desc, dma_addr_t
> mapping,
> > +                           u32 rx_buf_sz) {
> > +     desc->desc_cmd.addr = cpu_to_le64(mapping);
> > +     /* make sure the physical address has been updated */
> > +     wmb();
> why not dma_wmb();

dma_wmb() is already done in rtase_mark_to_asic(), so I will remove wmb().

> > +     rtase_mark_to_asic(desc, rx_buf_sz); }
> > +
> > +static void rtase_make_unusable_by_asic(union rtase_rx_desc *desc) {
> > +     desc->desc_cmd.addr = cpu_to_le64(RTK_MAGIC_NUMBER);
> > +     desc->desc_cmd.opts1 &= ~cpu_to_le32(RTASE_DESC_OWN |
> > +RSVD_MASK); }
> > +
> > +static int rtase_alloc_rx_skb(const struct rtase_ring *ring,
> > +                           struct sk_buff **p_sk_buff,
> > +                           union rtase_rx_desc *desc,
> > +                           dma_addr_t *rx_phy_addr, u8 in_intr) {
> > +     struct rtase_int_vector *ivec = ring->ivec;
> > +     const struct rtase_private *tp = ivec->tp;
> > +     struct sk_buff *skb = NULL;
> > +     dma_addr_t mapping;
> > +     struct page *page;
> > +     void *buf_addr;
> > +     int ret = 0;
> > +
> > +     page = page_pool_dev_alloc_pages(tp->page_pool);
> > +     if (!page) {
> > +             netdev_err(tp->dev, "failed to alloc page\n");
> > +             goto err_out;
> > +     }
> > +
> > +     buf_addr = page_address(page);
> > +     mapping = page_pool_get_dma_addr(page);
> > +
> > +     skb = build_skb(buf_addr, PAGE_SIZE);
> > +     if (!skb) {
> > +             page_pool_put_full_page(tp->page_pool, page, true);
> > +             netdev_err(tp->dev, "failed to build skb\n");
> > +             goto err_out;
> > +     }
> Did you mark the skb for recycle ? Hmm ... did i miss to find the code ?
> 
We have done this part when using the skb and before finally releasing
the skb resource. Do you think it would be better to do this part of the
process when allocating the skb?

> > +
> > +     *p_sk_buff = skb;
> > +     *rx_phy_addr = mapping;
> > +     rtase_map_to_asic(desc, mapping, tp->rx_buf_sz);
> > +
> > +     return ret;
> > +
> > +err_out:
> > +     if (skb)
> > +             dev_kfree_skb(skb);
> > +
> > +     ret = -ENOMEM;
> > +     rtase_make_unusable_by_asic(desc);
> > +
> > +     return ret;
> > +}
> > +
> > +
> > +
> > +static int rtase_open(struct net_device *dev) {
> > +     struct rtase_private *tp = netdev_priv(dev);
> > +     const struct pci_dev *pdev = tp->pdev;
> > +     struct rtase_int_vector *ivec;
> > +     u16 i = 0, j;
> > +     int ret;
> > +
> > +     ivec = &tp->int_vector[0];
> > +     tp->rx_buf_sz = RTASE_RX_BUF_SIZE;
> > +
> > +     ret = rtase_alloc_desc(tp);
> > +     if (ret)
> > +             goto err_free_all_allocated_mem;
> > +
> > +     ret = rtase_init_ring(dev);
> > +     if (ret)
> > +             goto err_free_all_allocated_mem;
> > +
> > +     rtase_hw_config(dev);
> > +
> > +     if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) {
> > +             ret = request_irq(ivec->irq, rtase_interrupt, 0,
> > +                               dev->name, ivec);
> > +             if (ret)
> > +                     goto err_free_all_allocated_irq;
> > +
> > +             /* request other interrupts to handle multiqueue */
> > +             for (i = 1; i < tp->int_nums; i++) {
> > +                     ivec = &tp->int_vector[i];
> > +                     snprintf(ivec->name, sizeof(ivec->name),
> "%s_int%i",
> > +                              tp->dev->name, i);
> > +                     ret = request_irq(ivec->irq, rtase_q_interrupt, 0,
> > +                                       ivec->name, ivec);
> > +                     if (ret)
> > +                             goto err_free_all_allocated_irq;
> > +             }
> > +     } else {
> > +             ret = request_irq(pdev->irq, rtase_interrupt, 0, dev->name,
> > +                               ivec);
> > +             if (ret)
> > +                     goto err_free_all_allocated_mem;
> > +     }
> > +
> > +     rtase_hw_start(dev);
> > +
> > +     for (i = 0; i < tp->int_nums; i++) {
> > +             ivec = &tp->int_vector[i];
> > +             napi_enable(&ivec->napi);
> > +     }
> > +
> > +     netif_carrier_on(dev);
> > +     netif_wake_queue(dev);
> > +
> > +     return 0;
> > +
> > +err_free_all_allocated_irq:
> You are allocating from i = 1, but freeing from j = 0;

Hi Ratheesh,
I have done request_irq() once before the for loop,
so there should be no problem starting free from j=0 here.

> > +     for (j = 0; j < i; j++)
> > +             free_irq(tp->int_vector[j].irq, &tp->int_vector[j]);
> > +
> > +err_free_all_allocated_mem:
> > +     rtase_free_desc(tp);
> > +
> > +     return ret;
> > +}
> > +
> >
Ratheesh Kannoth May 9, 2024, 9:09 a.m. UTC | #3
> From: Justin Lai <justinlai0215@realtek.com>
> Sent: Thursday, May 9, 2024 2:29 PM
> > > +
> > > +     /* rx and tx descriptors needs 256 bytes alignment.
> > > +      * dma_alloc_coherent provides more.
> > > +      */
> > > +     for (i = 0; i < tp->func_tx_queue_num; i++) {
> > > +             tp->tx_ring[i].desc =
> > > +                             dma_alloc_coherent(&pdev->dev,
> > > +
> > RTASE_TX_RING_DESC_SIZE,
> > > +
> > &tp->tx_ring[i].phy_addr,
> > > +                                                GFP_KERNEL);
> > > +             if (!tp->tx_ring[i].desc)
> > You have handled errors gracefully very where else. why not here ?
> 
> I would like to ask you, are you referring to other places where there are error
> description messages, but not here?
other functions, you are freeing allocated resources in case of failure, but here, you are returning error directly.


> > Did you mark the skb for recycle ? Hmm ... did i miss to find the code ?
> >
> We have done this part when using the skb and before finally releasing the skb
> resource. Do you think it would be better to do this part of the process when
> allocating the skb?
i think, you added skb_for_recycle() in the following patch. Sorry I missed it . ignore my comment. 

> 
> > > +
> > > +err_free_all_allocated_irq:
> > You are allocating from i = 1, but freeing from j = 0;
> 
> Hi Ratheesh,
> I have done request_irq() once before the for loop, so there should be no
> problem starting free from j=0 here.
Thanks for pointing out.
Justin Lai May 9, 2024, 9:59 a.m. UTC | #4
> 
> > From: Justin Lai <justinlai0215@realtek.com>
> > Sent: Thursday, May 9, 2024 2:29 PM
> > > > +
> > > > +     /* rx and tx descriptors needs 256 bytes alignment.
> > > > +      * dma_alloc_coherent provides more.
> > > > +      */
> > > > +     for (i = 0; i < tp->func_tx_queue_num; i++) {
> > > > +             tp->tx_ring[i].desc =
> > > > +                             dma_alloc_coherent(&pdev->dev,
> > > > +
> > > RTASE_TX_RING_DESC_SIZE,
> > > > +
> > > &tp->tx_ring[i].phy_addr,
> > > > +
> GFP_KERNEL);
> > > > +             if (!tp->tx_ring[i].desc)
> > > You have handled errors gracefully very where else. why not here ?
> >
> > I would like to ask you, are you referring to other places where there
> > are error description messages, but not here?
> other functions, you are freeing allocated resources in case of failure, but here,
> you are returning error directly.
> 
After returning the error, I will do the corresponding error handling in rtase_open.
.
> 
> > > Did you mark the skb for recycle ? Hmm ... did i miss to find the code ?
> > >
> > We have done this part when using the skb and before finally releasing
> > the skb resource. Do you think it would be better to do this part of
> > the process when allocating the skb?
> i think, you added skb_for_recycle() in the following patch. Sorry I missed it .
> ignore my comment.
> 

OK, thank you for your feedback.

> >
> > > > +
> > > > +err_free_all_allocated_irq:
> > > You are allocating from i = 1, but freeing from j = 0;
> >
> > Hi Ratheesh,
> > I have done request_irq() once before the for loop, so there should be
> > no problem starting free from j=0 here.
> Thanks for pointing out.

Thank you also for your review.
diff mbox series

Patch

diff --git a/drivers/net/ethernet/realtek/rtase/rtase_main.c b/drivers/net/ethernet/realtek/rtase/rtase_main.c
index 8bb3b96a57aa..8fcfb53fbfda 100644
--- a/drivers/net/ethernet/realtek/rtase/rtase_main.c
+++ b/drivers/net/ethernet/realtek/rtase/rtase_main.c
@@ -130,6 +130,292 @@  static u32 rtase_r32(const struct rtase_private *tp, u16 reg)
 	return readl(tp->mmio_addr + reg);
 }
 
+static int rtase_alloc_desc(struct rtase_private *tp)
+{
+	struct pci_dev *pdev = tp->pdev;
+	u32 i;
+
+	/* rx and tx descriptors needs 256 bytes alignment.
+	 * dma_alloc_coherent provides more.
+	 */
+	for (i = 0; i < tp->func_tx_queue_num; i++) {
+		tp->tx_ring[i].desc =
+				dma_alloc_coherent(&pdev->dev,
+						   RTASE_TX_RING_DESC_SIZE,
+						   &tp->tx_ring[i].phy_addr,
+						   GFP_KERNEL);
+		if (!tp->tx_ring[i].desc)
+			return -ENOMEM;
+	}
+
+	for (i = 0; i < tp->func_rx_queue_num; i++) {
+		tp->rx_ring[i].desc =
+				dma_alloc_coherent(&pdev->dev,
+						   RTASE_RX_RING_DESC_SIZE,
+						   &tp->rx_ring[i].phy_addr,
+						   GFP_KERNEL);
+		if (!tp->rx_ring[i].desc)
+			return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static void rtase_free_desc(struct rtase_private *tp)
+{
+	struct pci_dev *pdev = tp->pdev;
+	u32 i;
+
+	for (i = 0; i < tp->func_tx_queue_num; i++) {
+		if (!tp->tx_ring[i].desc)
+			continue;
+
+		dma_free_coherent(&pdev->dev, RTASE_TX_RING_DESC_SIZE,
+				  tp->tx_ring[i].desc,
+				  tp->tx_ring[i].phy_addr);
+		tp->tx_ring[i].desc = NULL;
+	}
+
+	for (i = 0; i < tp->func_rx_queue_num; i++) {
+		if (!tp->rx_ring[i].desc)
+			continue;
+
+		dma_free_coherent(&pdev->dev, RTASE_RX_RING_DESC_SIZE,
+				  tp->rx_ring[i].desc,
+				  tp->rx_ring[i].phy_addr);
+		tp->rx_ring[i].desc = NULL;
+	}
+}
+
+static void rtase_mark_to_asic(union rtase_rx_desc *desc, u32 rx_buf_sz)
+{
+	u32 eor = le32_to_cpu(desc->desc_cmd.opts1) & RTASE_RING_END;
+
+	desc->desc_status.opts2 = 0;
+	/* force memory writes to complete before releasing descriptor */
+	dma_wmb();
+	WRITE_ONCE(desc->desc_cmd.opts1,
+		   cpu_to_le32(RTASE_DESC_OWN | eor | rx_buf_sz));
+}
+
+static void rtase_tx_desc_init(struct rtase_private *tp, u16 idx)
+{
+	struct rtase_ring *ring = &tp->tx_ring[idx];
+	struct rtase_tx_desc *desc;
+	u32 i;
+
+	memset(ring->desc, 0x0, RTASE_TX_RING_DESC_SIZE);
+	memset(ring->skbuff, 0x0, sizeof(ring->skbuff));
+	ring->cur_idx = 0;
+	ring->dirty_idx = 0;
+	ring->index = idx;
+
+	for (i = 0; i < RTASE_NUM_DESC; i++) {
+		ring->mis.len[i] = 0;
+		if ((RTASE_NUM_DESC - 1) == i) {
+			desc = ring->desc + sizeof(struct rtase_tx_desc) * i;
+			desc->opts1 = cpu_to_le32(RTASE_RING_END);
+		}
+	}
+
+	ring->ring_handler = tx_handler;
+	if (idx < 4) {
+		ring->ivec = &tp->int_vector[idx];
+		list_add_tail(&ring->ring_entry,
+			      &tp->int_vector[idx].ring_list);
+	} else {
+		ring->ivec = &tp->int_vector[0];
+		list_add_tail(&ring->ring_entry, &tp->int_vector[0].ring_list);
+	}
+}
+
+static void rtase_map_to_asic(union rtase_rx_desc *desc, dma_addr_t mapping,
+			      u32 rx_buf_sz)
+{
+	desc->desc_cmd.addr = cpu_to_le64(mapping);
+	/* make sure the physical address has been updated */
+	wmb();
+	rtase_mark_to_asic(desc, rx_buf_sz);
+}
+
+static void rtase_make_unusable_by_asic(union rtase_rx_desc *desc)
+{
+	desc->desc_cmd.addr = cpu_to_le64(RTK_MAGIC_NUMBER);
+	desc->desc_cmd.opts1 &= ~cpu_to_le32(RTASE_DESC_OWN | RSVD_MASK);
+}
+
+static int rtase_alloc_rx_skb(const struct rtase_ring *ring,
+			      struct sk_buff **p_sk_buff,
+			      union rtase_rx_desc *desc,
+			      dma_addr_t *rx_phy_addr, u8 in_intr)
+{
+	struct rtase_int_vector *ivec = ring->ivec;
+	const struct rtase_private *tp = ivec->tp;
+	struct sk_buff *skb = NULL;
+	dma_addr_t mapping;
+	struct page *page;
+	void *buf_addr;
+	int ret = 0;
+
+	page = page_pool_dev_alloc_pages(tp->page_pool);
+	if (!page) {
+		netdev_err(tp->dev, "failed to alloc page\n");
+		goto err_out;
+	}
+
+	buf_addr = page_address(page);
+	mapping = page_pool_get_dma_addr(page);
+
+	skb = build_skb(buf_addr, PAGE_SIZE);
+	if (!skb) {
+		page_pool_put_full_page(tp->page_pool, page, true);
+		netdev_err(tp->dev, "failed to build skb\n");
+		goto err_out;
+	}
+
+	*p_sk_buff = skb;
+	*rx_phy_addr = mapping;
+	rtase_map_to_asic(desc, mapping, tp->rx_buf_sz);
+
+	return ret;
+
+err_out:
+	if (skb)
+		dev_kfree_skb(skb);
+
+	ret = -ENOMEM;
+	rtase_make_unusable_by_asic(desc);
+
+	return ret;
+}
+
+static u32 rtase_rx_ring_fill(struct rtase_ring *ring, u32 ring_start,
+			      u32 ring_end, u8 in_intr)
+{
+	union rtase_rx_desc *desc_base = ring->desc;
+	u32 cur;
+
+	for (cur = ring_start; ring_end - cur > 0; cur++) {
+		u32 i = cur % RTASE_NUM_DESC;
+		union rtase_rx_desc *desc = desc_base + i;
+		int ret;
+
+		if (ring->skbuff[i])
+			continue;
+
+		ret = rtase_alloc_rx_skb(ring, &ring->skbuff[i], desc,
+					 &ring->mis.data_phy_addr[i],
+					 in_intr);
+		if (ret)
+			break;
+	}
+
+	return cur - ring_start;
+}
+
+static void rtase_mark_as_last_descriptor(union rtase_rx_desc *desc)
+{
+	desc->desc_cmd.opts1 |= cpu_to_le32(RTASE_RING_END);
+}
+
+static void rtase_rx_ring_clear(struct rtase_ring *ring)
+{
+	union rtase_rx_desc *desc;
+	u32 i;
+
+	for (i = 0; i < RTASE_NUM_DESC; i++) {
+		desc = ring->desc + sizeof(union rtase_rx_desc) * i;
+
+		if (!ring->skbuff[i])
+			continue;
+
+		skb_mark_for_recycle(ring->skbuff[i]);
+
+		dev_kfree_skb(ring->skbuff[i]);
+
+		ring->skbuff[i] = NULL;
+
+		rtase_make_unusable_by_asic(desc);
+	}
+}
+
+static void rtase_rx_desc_init(struct rtase_private *tp, u16 idx)
+{
+	struct rtase_ring *ring = &tp->rx_ring[idx];
+	u16 i;
+
+	memset(ring->desc, 0x0, RTASE_RX_RING_DESC_SIZE);
+	memset(ring->skbuff, 0x0, sizeof(ring->skbuff));
+	ring->cur_idx = 0;
+	ring->dirty_idx = 0;
+	ring->index = idx;
+
+	for (i = 0; i < RTASE_NUM_DESC; i++)
+		ring->mis.data_phy_addr[i] = 0;
+
+	ring->ring_handler = rx_handler;
+	ring->ivec = &tp->int_vector[idx];
+	list_add_tail(&ring->ring_entry, &tp->int_vector[idx].ring_list);
+}
+
+static void rtase_rx_clear(struct rtase_private *tp)
+{
+	u32 i;
+
+	for (i = 0; i < tp->func_rx_queue_num; i++)
+		rtase_rx_ring_clear(&tp->rx_ring[i]);
+
+	page_pool_destroy(tp->page_pool);
+	tp->page_pool = NULL;
+}
+
+static int rtase_init_ring(const struct net_device *dev)
+{
+	struct rtase_private *tp = netdev_priv(dev);
+	struct page_pool_params pp_params = { 0 };
+	struct page_pool *page_pool;
+	u32 num;
+	u16 i;
+
+	pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
+	pp_params.order = 0;
+	pp_params.pool_size = RTASE_NUM_DESC * tp->func_rx_queue_num;
+	pp_params.nid = dev_to_node(&tp->pdev->dev);
+	pp_params.dev = &tp->pdev->dev;
+	pp_params.dma_dir = DMA_FROM_DEVICE;
+	pp_params.max_len = PAGE_SIZE;
+	pp_params.offset = 0;
+
+	page_pool = page_pool_create(&pp_params);
+	if (IS_ERR(page_pool)) {
+		netdev_err(tp->dev, "failed to create page pool\n");
+		return -ENOMEM;
+	}
+
+	tp->page_pool = page_pool;
+
+	for (i = 0; i < tp->func_tx_queue_num; i++)
+		rtase_tx_desc_init(tp, i);
+
+	for (i = 0; i < tp->func_rx_queue_num; i++) {
+		rtase_rx_desc_init(tp, i);
+		num = rtase_rx_ring_fill(&tp->rx_ring[i], 0,
+					 RTASE_NUM_DESC, 0);
+		if (num != RTASE_NUM_DESC)
+			goto err_out;
+
+		rtase_mark_as_last_descriptor(tp->rx_ring[i].desc +
+					      sizeof(union rtase_rx_desc) *
+					      (RTASE_NUM_DESC - 1));
+	}
+
+	return 0;
+
+err_out:
+	rtase_rx_clear(tp);
+	return -ENOMEM;
+}
+
 static void rtase_tally_counter_clear(const struct rtase_private *tp)
 {
 	u32 cmd = lower_32_bits(tp->tally_paddr);
@@ -138,6 +424,130 @@  static void rtase_tally_counter_clear(const struct rtase_private *tp)
 	rtase_w32(tp, RTASE_DTCCR0, cmd | RTASE_COUNTER_RESET);
 }
 
+static void rtase_nic_enable(const struct net_device *dev)
+{
+	const struct rtase_private *tp = netdev_priv(dev);
+	u16 rcr = rtase_r16(tp, RTASE_RX_CONFIG_1);
+	u8 val;
+
+	rtase_w16(tp, RTASE_RX_CONFIG_1, rcr & ~RTASE_PCIE_RELOAD_EN);
+	rtase_w16(tp, RTASE_RX_CONFIG_1, rcr | RTASE_PCIE_RELOAD_EN);
+
+	val = rtase_r8(tp, RTASE_CHIP_CMD);
+	rtase_w8(tp, RTASE_CHIP_CMD, val | RTASE_TE | RTASE_RE);
+
+	val = rtase_r8(tp, RTASE_MISC);
+	rtase_w8(tp, RTASE_MISC, val & ~RTASE_RX_DV_GATE_EN);
+}
+
+static void rtase_enable_hw_interrupt(const struct rtase_private *tp)
+{
+	const struct rtase_int_vector *ivec = &tp->int_vector[0];
+	u32 i;
+
+	rtase_w32(tp, ivec->imr_addr, ivec->imr);
+
+	for (i = 1; i < tp->int_nums; i++) {
+		ivec = &tp->int_vector[i];
+		rtase_w16(tp, ivec->imr_addr, ivec->imr);
+	}
+}
+
+static void rtase_hw_start(const struct net_device *dev)
+{
+	const struct rtase_private *tp = netdev_priv(dev);
+
+	rtase_nic_enable(dev);
+	rtase_enable_hw_interrupt(tp);
+}
+
+static int rtase_open(struct net_device *dev)
+{
+	struct rtase_private *tp = netdev_priv(dev);
+	const struct pci_dev *pdev = tp->pdev;
+	struct rtase_int_vector *ivec;
+	u16 i = 0, j;
+	int ret;
+
+	ivec = &tp->int_vector[0];
+	tp->rx_buf_sz = RTASE_RX_BUF_SIZE;
+
+	ret = rtase_alloc_desc(tp);
+	if (ret)
+		goto err_free_all_allocated_mem;
+
+	ret = rtase_init_ring(dev);
+	if (ret)
+		goto err_free_all_allocated_mem;
+
+	rtase_hw_config(dev);
+
+	if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) {
+		ret = request_irq(ivec->irq, rtase_interrupt, 0,
+				  dev->name, ivec);
+		if (ret)
+			goto err_free_all_allocated_irq;
+
+		/* request other interrupts to handle multiqueue */
+		for (i = 1; i < tp->int_nums; i++) {
+			ivec = &tp->int_vector[i];
+			snprintf(ivec->name, sizeof(ivec->name), "%s_int%i",
+				 tp->dev->name, i);
+			ret = request_irq(ivec->irq, rtase_q_interrupt, 0,
+					  ivec->name, ivec);
+			if (ret)
+				goto err_free_all_allocated_irq;
+		}
+	} else {
+		ret = request_irq(pdev->irq, rtase_interrupt, 0, dev->name,
+				  ivec);
+		if (ret)
+			goto err_free_all_allocated_mem;
+	}
+
+	rtase_hw_start(dev);
+
+	for (i = 0; i < tp->int_nums; i++) {
+		ivec = &tp->int_vector[i];
+		napi_enable(&ivec->napi);
+	}
+
+	netif_carrier_on(dev);
+	netif_wake_queue(dev);
+
+	return 0;
+
+err_free_all_allocated_irq:
+	for (j = 0; j < i; j++)
+		free_irq(tp->int_vector[j].irq, &tp->int_vector[j]);
+
+err_free_all_allocated_mem:
+	rtase_free_desc(tp);
+
+	return ret;
+}
+
+static int rtase_close(struct net_device *dev)
+{
+	struct rtase_private *tp = netdev_priv(dev);
+	const struct pci_dev *pdev = tp->pdev;
+	u32 i;
+
+	rtase_down(dev);
+
+	if (tp->sw_flag & RTASE_SWF_MSIX_ENABLED) {
+		for (i = 0; i < tp->int_nums; i++)
+			free_irq(tp->int_vector[i].irq, &tp->int_vector[i]);
+
+	} else {
+		free_irq(pdev->irq, &tp->int_vector[0]);
+	}
+
+	rtase_free_desc(tp);
+
+	return 0;
+}
+
 static void rtase_enable_eem_write(const struct rtase_private *tp)
 {
 	u8 val;
@@ -170,6 +580,11 @@  static void rtase_rar_set(const struct rtase_private *tp, const u8 *addr)
 	rtase_w16(tp, RTASE_LBK_CTRL, RTASE_LBK_ATLD | RTASE_LBK_CLR);
 }
 
+static const struct net_device_ops rtase_netdev_ops = {
+	.ndo_open = rtase_open,
+	.ndo_stop = rtase_close,
+};
+
 static void rtase_get_mac_address(struct net_device *dev)
 {
 	struct rtase_private *tp = netdev_priv(dev);
@@ -190,6 +605,11 @@  static void rtase_get_mac_address(struct net_device *dev)
 	rtase_rar_set(tp, dev->dev_addr);
 }
 
+static void rtase_init_netdev_ops(struct net_device *dev)
+{
+	dev->netdev_ops = &rtase_netdev_ops;
+}
+
 static void rtase_reset_interrupt(struct pci_dev *pdev,
 				  const struct rtase_private *tp)
 {