From patchwork Thu Aug 20 08:46:15 2015 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Taku Izumi X-Patchwork-Id: 7042241 Return-Path: X-Original-To: patchwork-platform-driver-x86@patchwork.kernel.org Delivered-To: patchwork-parsemail@patchwork2.web.kernel.org Received: from mail.kernel.org (mail.kernel.org [198.145.29.136]) by patchwork2.web.kernel.org (Postfix) with ESMTP id E5902C05AC for ; Thu, 20 Aug 2015 08:53:21 +0000 (UTC) Received: from mail.kernel.org (localhost [127.0.0.1]) by mail.kernel.org (Postfix) with ESMTP id B19F12035B for ; Thu, 20 Aug 2015 08:53:20 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [209.132.180.67]) by mail.kernel.org (Postfix) with ESMTP id 77F772026C for ; Thu, 20 Aug 2015 08:53:19 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753116AbbHTIvl (ORCPT ); Thu, 20 Aug 2015 04:51:41 -0400 Received: from mgwym01.jp.fujitsu.com ([211.128.242.40]:32678 "EHLO mgwym01.jp.fujitsu.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752993AbbHTIvh (ORCPT ); Thu, 20 Aug 2015 04:51:37 -0400 Received: from yt-mxoi1.gw.nic.fujitsu.com (unknown [192.168.229.67]) by mgwym01.jp.fujitsu.com with smtp id 7150_0413_1568ad04_976c_4b02_81bf_5e59e46abb85; Thu, 20 Aug 2015 17:51:35 +0900 Received: from m3051.s.css.fujitsu.com (m3051.s.css.fujitsu.com [10.134.21.209]) by yt-mxoi1.gw.nic.fujitsu.com (Postfix) with ESMTP id 30825AC0430; Thu, 20 Aug 2015 17:51:35 +0900 (JST) Received: from localhost.localdomain (unknown [10.124.196.197]) by m3051.s.css.fujitsu.com (Postfix) with ESMTP id E3C0EEA; Thu, 20 Aug 2015 17:51:34 +0900 (JST) From: Taku Izumi To: netdev@vger.kernel.org, davem@davemloft.net Cc: platform-driver-x86@vger.kernel.org, dvhart@infradead.org, rkhan@redhat.com, alexander.h.duyck@redhat.com, linux-acpi@vger.kernel.org, joe@perches.com, sergei.shtylyov@cogentembedded.com, stephen@networkplumber.org, yasu.isimatu@gmail.com, Taku Izumi Subject: [PATCH v2.2 11/22] fjes: NAPI polling function Date: Thu, 20 Aug 2015 17:46:15 +0900 Message-Id: <1440060386-13189-11-git-send-email-izumi.taku@jp.fujitsu.com> X-Mailer: git-send-email 1.9.1 In-Reply-To: <1440060386-13189-1-git-send-email-izumi.taku@jp.fujitsu.com> References: <1440060306-13040-1-git-send-email-izumi.taku@jp.fujitsu.com> <1440060386-13189-1-git-send-email-izumi.taku@jp.fujitsu.com> X-TM-AS-MML: disable Sender: platform-driver-x86-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: platform-driver-x86@vger.kernel.org X-Spam-Status: No, score=-7.5 required=5.0 tests=BAYES_00, RCVD_IN_DNSWL_HI, RP_MATCHES_RCVD, UNPARSEABLE_RELAY autolearn=unavailable version=3.3.1 X-Spam-Checker-Version: SpamAssassin 3.3.1 (2010-03-16) on mail.kernel.org X-Virus-Scanned: ClamAV using ClamSMTP This patch adds NAPI polling function and receive related work. Signed-off-by: Taku Izumi --- drivers/net/fjes/fjes_hw.c | 40 ++++++++++ drivers/net/fjes/fjes_hw.h | 5 ++ drivers/net/fjes/fjes_main.c | 172 ++++++++++++++++++++++++++++++++++++++++++- 3 files changed, 215 insertions(+), 2 deletions(-) diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c index 4965791..ae7b7cd 100644 --- a/drivers/net/fjes/fjes_hw.c +++ b/drivers/net/fjes/fjes_hw.c @@ -825,6 +825,46 @@ bool fjes_hw_check_vlan_id(struct epbuf_handler *epbh, u16 vlan_id) return ret; } +bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *epbh) +{ + union ep_buffer_info *info = epbh->info; + + if (info->v1i.count_max == 0) + return true; + + return EP_RING_EMPTY(info->v1i.head, info->v1i.tail, + info->v1i.count_max); +} + +void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *epbh, + size_t *psize) +{ + union ep_buffer_info *info = epbh->info; + struct esmem_frame *ring_frame; + void *frame; + + ring_frame = (struct esmem_frame *)&(epbh->ring[EP_RING_INDEX + (info->v1i.head, + info->v1i.count_max) * + info->v1i.frame_max]); + + *psize = (size_t)ring_frame->frame_size; + + frame = ring_frame->frame_data; + + return frame; +} + +void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *epbh) +{ + union ep_buffer_info *info = epbh->info; + + if (fjes_hw_epbuf_rx_is_empty(epbh)) + return; + + EP_RING_INDEX_INC(epbh->info->v1i.head, info->v1i.count_max); +} + int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *epbh, void *frame, size_t size) { diff --git a/drivers/net/fjes/fjes_hw.h b/drivers/net/fjes/fjes_hw.h index 2b49421..0b43bc3 100644 --- a/drivers/net/fjes/fjes_hw.h +++ b/drivers/net/fjes/fjes_hw.h @@ -69,6 +69,8 @@ struct fjes_hw; ((_num) = EP_RING_INDEX((_num) + 1, (_max))) #define EP_RING_FULL(_head, _tail, _max) \ (0 == EP_RING_INDEX(((_tail) - (_head)), (_max))) +#define EP_RING_EMPTY(_head, _tail, _max) \ + (1 == EP_RING_INDEX(((_tail) - (_head)), (_max))) #define FJES_MTU_TO_BUFFER_SIZE(mtu) \ (ETH_HLEN + VLAN_HLEN + (mtu) + ETH_FCS_LEN) @@ -320,6 +322,9 @@ int fjes_hw_epid_is_shared(struct fjes_device_shared_info *, int); bool fjes_hw_check_epbuf_version(struct epbuf_handler *, u32); bool fjes_hw_check_mtu(struct epbuf_handler *, u32); bool fjes_hw_check_vlan_id(struct epbuf_handler *, u16); +bool fjes_hw_epbuf_rx_is_empty(struct epbuf_handler *); +void *fjes_hw_epbuf_rx_curpkt_get_addr(struct epbuf_handler *, size_t *); +void fjes_hw_epbuf_rx_curpkt_drop(struct epbuf_handler *); int fjes_hw_epbuf_tx_pkt_send(struct epbuf_handler *, void *, size_t); #endif /* FJES_HW_H_ */ diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c index 8cc687e..186197d 100644 --- a/drivers/net/fjes/fjes_main.c +++ b/drivers/net/fjes/fjes_main.c @@ -66,6 +66,9 @@ static int fjes_remove(struct platform_device *); static int fjes_sw_init(struct fjes_adapter *); static void fjes_netdev_setup(struct net_device *); +static void fjes_rx_irq(struct fjes_adapter *, int); +static int fjes_poll(struct napi_struct *, int); + static const struct acpi_device_id fjes_acpi_ids[] = { {"PNP0C02", 0}, {"", 0}, @@ -235,6 +238,8 @@ static int fjes_open(struct net_device *netdev) hw->txrx_stop_req_bit = 0; hw->epstop_req_bit = 0; + napi_enable(&adapter->napi); + fjes_hw_capture_interrupt_status(hw); result = fjes_request_irq(adapter); @@ -250,6 +255,7 @@ static int fjes_open(struct net_device *netdev) err_req_irq: fjes_free_irq(adapter); + napi_disable(&adapter->napi); err_setup_res: fjes_free_resources(adapter); @@ -268,6 +274,8 @@ static int fjes_close(struct net_device *netdev) fjes_hw_raise_epstop(hw); + napi_disable(&adapter->napi); + for (epidx = 0; epidx < hw->max_epid; epidx++) { if (epidx == hw->my_epid) continue; @@ -703,14 +711,168 @@ static irqreturn_t fjes_intr(int irq, void *data) icr = fjes_hw_capture_interrupt_status(hw); - if (icr & REG_IS_MASK_IS_ASSERT) + if (icr & REG_IS_MASK_IS_ASSERT) { + if (icr & REG_ICTL_MASK_RX_DATA) + fjes_rx_irq(adapter, icr & REG_IS_MASK_EPID); + ret = IRQ_HANDLED; - else + } else { ret = IRQ_NONE; + } return ret; } +static int fjes_rxframe_search_exist(struct fjes_adapter *adapter, + int start_epid) +{ + struct fjes_hw *hw = &adapter->hw; + int cur_epid; + int max_epid; + int i; + enum ep_partner_status pstatus; + + max_epid = hw->max_epid; + start_epid = (start_epid + 1 + max_epid) % max_epid; + + for (i = 0; i < max_epid; i++) { + cur_epid = (start_epid + i) % max_epid; + if (cur_epid == hw->my_epid) + continue; + + pstatus = fjes_hw_get_partner_ep_status(hw, cur_epid); + if (pstatus == EP_PARTNER_SHARED) { + if (!fjes_hw_epbuf_rx_is_empty( + &hw->ep_shm_info[cur_epid].rx)) + return cur_epid; + } + } + return -1; +} + +static void *fjes_rxframe_get(struct fjes_adapter *adapter, size_t *psize, + int *cur_epid) +{ + void *frame; + + *cur_epid = fjes_rxframe_search_exist(adapter, *cur_epid); + if (*cur_epid < 0) + return NULL; + + frame = + fjes_hw_epbuf_rx_curpkt_get_addr( + &adapter->hw.ep_shm_info[*cur_epid].rx, psize); + + return frame; +} + +static void fjes_rxframe_release(struct fjes_adapter *adapter, int cur_epid) +{ + fjes_hw_epbuf_rx_curpkt_drop(&adapter->hw.ep_shm_info[cur_epid].rx); +} + +static void fjes_rx_irq(struct fjes_adapter *adapter, int src_epid) +{ + struct fjes_hw *hw = &adapter->hw; + + fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, true); + + adapter->unset_rx_last = true; + napi_schedule(&adapter->napi); +} + +static int fjes_poll(struct napi_struct *napi, int budget) +{ + struct fjes_adapter *adapter = + container_of(napi, struct fjes_adapter, napi); + struct fjes_hw *hw = &adapter->hw; + struct net_device *netdev = napi->dev; + int work_done = 0; + struct sk_buff *skb; + void *frame; + size_t frame_len; + int cur_epid = 0; + int epidx = 0; + + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx == hw->my_epid) + continue; + + adapter->hw.ep_shm_info[epidx].tx.info->v1i.rx_status |= + FJES_RX_POLL_WORK; + } + + while (work_done < budget) { + prefetch(&adapter->hw); + frame = fjes_rxframe_get(adapter, &frame_len, &cur_epid); + + if (frame) { + skb = napi_alloc_skb(napi, frame_len); + if (!skb) { + adapter->stats64.rx_dropped += 1; + hw->ep_shm_info[cur_epid].net_stats + .rx_dropped += 1; + adapter->stats64.rx_errors += 1; + hw->ep_shm_info[cur_epid].net_stats + .rx_errors += 1; + } else { + memcpy(skb_put(skb, frame_len), + frame, frame_len); + skb->protocol = eth_type_trans(skb, netdev); + skb->ip_summed = CHECKSUM_UNNECESSARY; + + netif_receive_skb(skb); + + work_done++; + + adapter->stats64.rx_packets += 1; + hw->ep_shm_info[cur_epid].net_stats + .rx_packets += 1; + adapter->stats64.rx_bytes += frame_len; + hw->ep_shm_info[cur_epid].net_stats + .rx_bytes += frame_len; + + if (is_multicast_ether_addr( + ((struct ethhdr *)frame)->h_dest)) { + adapter->stats64.multicast += 1; + hw->ep_shm_info[cur_epid].net_stats + .multicast += 1; + } + } + + fjes_rxframe_release(adapter, cur_epid); + adapter->unset_rx_last = true; + } else { + break; + } + } + + if (work_done < budget) { + napi_complete(napi); + + if (adapter->unset_rx_last) { + adapter->rx_last_jiffies = jiffies; + adapter->unset_rx_last = false; + } + + if (((long)jiffies - (long)adapter->rx_last_jiffies) < 3) { + napi_reschedule(napi); + } else { + for (epidx = 0; epidx < hw->max_epid; epidx++) { + if (epidx == hw->my_epid) + continue; + adapter->hw.ep_shm_info[epidx] + .tx.info->v1i.rx_status &= + ~FJES_RX_POLL_WORK; + } + + fjes_hw_set_irqmask(hw, REG_ICTL_MASK_RX_DATA, false); + } + } + + return work_done; +} + /* fjes_probe - Device Initialization Routine */ static int fjes_probe(struct platform_device *plat_dev) { @@ -800,6 +962,8 @@ static int fjes_remove(struct platform_device *plat_dev) fjes_hw_exit(hw); + netif_napi_del(&adapter->napi); + free_netdev(netdev); return 0; @@ -807,6 +971,10 @@ static int fjes_remove(struct platform_device *plat_dev) static int fjes_sw_init(struct fjes_adapter *adapter) { + struct net_device *netdev = adapter->netdev; + + netif_napi_add(netdev, &adapter->napi, fjes_poll, 64); + return 0; }