@@ -1919,7 +1919,7 @@ static int mtk_poll_rx(struct napi_struct *napi, int budget,
reason = FIELD_GET(MTK_RXD4_PPE_CPU_REASON, trxd.rxd4);
if (reason == MTK_PPE_CPU_REASON_HIT_UNBIND_RATE_REACHED)
- mtk_ppe_check_skb(eth->ppe, skb, hash);
+ mtk_ppe_check_skb(eth->ppe[0], skb, hash);
if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) {
@@ -2983,15 +2983,18 @@ static int mtk_open(struct net_device *dev)
/* we run 2 netdevs on the same dma ring so we only bring it up once */
if (!refcount_read(ð->dma_refcnt)) {
const struct mtk_soc_data *soc = eth->soc;
- u32 gdm_config = MTK_GDMA_TO_PDMA;
+ u32 gdm_config;
+ int i;
err = mtk_start_dma(eth);
if (err)
return err;
- if (soc->offload_version && mtk_ppe_start(eth->ppe) == 0)
- gdm_config = soc->reg_map->gdma_to_ppe;
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_start(eth->ppe[i]);
+ gdm_config = soc->offload_version ? soc->reg_map->gdma_to_ppe
+ : MTK_GDMA_TO_PDMA;
mtk_gdm_config(eth, gdm_config);
napi_enable(ð->tx_napi);
@@ -3035,6 +3038,7 @@ static int mtk_stop(struct net_device *dev)
{
struct mtk_mac *mac = netdev_priv(dev);
struct mtk_eth *eth = mac->hw;
+ int i;
phylink_stop(mac->phylink);
@@ -3062,8 +3066,8 @@ static int mtk_stop(struct net_device *dev)
mtk_dma_free(eth);
- if (eth->soc->offload_version)
- mtk_ppe_stop(eth->ppe);
+ for (i = 0; i < ARRAY_SIZE(eth->ppe); i++)
+ mtk_ppe_stop(eth->ppe[i]);
return 0;
}
@@ -4103,12 +4107,19 @@ static int mtk_probe(struct platform_device *pdev)
}
if (eth->soc->offload_version) {
- u32 ppe_addr = eth->soc->reg_map->ppe_base;
-
- eth->ppe = mtk_ppe_init(eth, eth->base + ppe_addr, 2);
- if (!eth->ppe) {
- err = -ENOMEM;
- goto err_free_dev;
+ u32 num_ppe;
+
+ num_ppe = MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2) ? 2 : 1;
+ num_ppe = min_t(u32, ARRAY_SIZE(eth->ppe), num_ppe);
+ for (i = 0; i < num_ppe; i++) {
+ u32 ppe_addr = eth->soc->reg_map->ppe_base + i * 0x400;
+
+ eth->ppe[i] = mtk_ppe_init(eth, eth->base + ppe_addr,
+ eth->soc->offload_version, i);
+ if (!eth->ppe[i]) {
+ err = -ENOMEM;
+ goto err_free_dev;
+ }
}
err = mtk_eth_offload_init(eth);
@@ -1114,7 +1114,7 @@ struct mtk_eth {
int ip_align;
- struct mtk_ppe *ppe;
+ struct mtk_ppe *ppe[2];
struct rhashtable flow_table;
struct bpf_prog __rcu *prog;
@@ -682,7 +682,7 @@ int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry)
}
struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
- int version)
+ int version, int index)
{
const struct mtk_soc_data *soc = eth->soc;
struct device *dev = eth->dev;
@@ -717,7 +717,7 @@ struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
if (!ppe->foe_flow)
return NULL;
- mtk_ppe_debugfs_init(ppe);
+ mtk_ppe_debugfs_init(ppe, index);
return ppe;
}
@@ -738,10 +738,13 @@ static void mtk_ppe_init_foe_table(struct mtk_ppe *ppe)
ppe->foe_table[i + skip[k]].ib1 |= MTK_FOE_IB1_STATIC;
}
-int mtk_ppe_start(struct mtk_ppe *ppe)
+void mtk_ppe_start(struct mtk_ppe *ppe)
{
u32 val;
+ if (!ppe)
+ return;
+
mtk_ppe_init_foe_table(ppe);
ppe_w32(ppe, MTK_PPE_TB_BASE, ppe->foe_phys);
@@ -809,8 +812,6 @@ int mtk_ppe_start(struct mtk_ppe *ppe)
ppe_w32(ppe, MTK_PPE_GLO_CFG, val);
ppe_w32(ppe, MTK_PPE_DEFAULT_CPU_PORT, 0);
-
- return 0;
}
int mtk_ppe_stop(struct mtk_ppe *ppe)
@@ -818,6 +819,9 @@ int mtk_ppe_stop(struct mtk_ppe *ppe)
u32 val;
int i;
+ if (!ppe)
+ return 0;
+
for (i = 0; i < MTK_PPE_ENTRIES; i++)
ppe->foe_table[i].ib1 = FIELD_PREP(MTK_FOE_IB1_STATE,
MTK_FOE_STATE_INVALID);
@@ -247,6 +247,7 @@ struct mtk_flow_entry {
};
u8 type;
s8 wed_index;
+ u8 ppe_index;
u16 hash;
union {
struct mtk_foe_entry data;
@@ -265,6 +266,7 @@ struct mtk_ppe {
struct device *dev;
void __iomem *base;
int version;
+ char dirname[5];
struct mtk_foe_entry *foe_table;
dma_addr_t foe_phys;
@@ -277,8 +279,9 @@ struct mtk_ppe {
void *acct_table;
};
-struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base, int version);
-int mtk_ppe_start(struct mtk_ppe *ppe);
+struct mtk_ppe *mtk_ppe_init(struct mtk_eth *eth, void __iomem *base,
+ int version, int index);
+void mtk_ppe_start(struct mtk_ppe *ppe);
int mtk_ppe_stop(struct mtk_ppe *ppe);
void __mtk_ppe_check_skb(struct mtk_ppe *ppe, struct sk_buff *skb, u16 hash);
@@ -320,6 +323,6 @@ int mtk_foe_entry_set_wdma(struct mtk_foe_entry *entry, int wdma_idx, int txq,
int mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
void mtk_foe_entry_clear(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
int mtk_foe_entry_idle_time(struct mtk_ppe *ppe, struct mtk_flow_entry *entry);
-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe);
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index);
#endif
@@ -187,7 +187,7 @@ mtk_ppe_debugfs_foe_open_bind(struct inode *inode, struct file *file)
inode->i_private);
}
-int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
+int mtk_ppe_debugfs_init(struct mtk_ppe *ppe, int index)
{
static const struct file_operations fops_all = {
.open = mtk_ppe_debugfs_foe_open_all,
@@ -195,17 +195,17 @@ int mtk_ppe_debugfs_init(struct mtk_ppe *ppe)
.llseek = seq_lseek,
.release = single_release,
};
-
static const struct file_operations fops_bind = {
.open = mtk_ppe_debugfs_foe_open_bind,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
-
struct dentry *root;
- root = debugfs_create_dir("mtk_ppe", NULL);
+ snprintf(ppe->dirname, sizeof(ppe->dirname), "ppe%d", index);
+
+ root = debugfs_create_dir(ppe->dirname, NULL);
debugfs_create_file("entries", S_IRUGO, root, ppe, &fops_all);
debugfs_create_file("bind", S_IRUGO, root, ppe, &fops_bind);
@@ -434,7 +434,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
memcpy(&entry->data, &foe, sizeof(entry->data));
entry->wed_index = wed_index;
- err = mtk_foe_entry_commit(eth->ppe, entry);
+ err = mtk_foe_entry_commit(eth->ppe[entry->ppe_index], entry);
if (err < 0)
goto free;
@@ -446,7 +446,7 @@ mtk_flow_offload_replace(struct mtk_eth *eth, struct flow_cls_offload *f)
return 0;
clear:
- mtk_foe_entry_clear(eth->ppe, entry);
+ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
free:
kfree(entry);
if (wed_index >= 0)
@@ -464,7 +464,7 @@ mtk_flow_offload_destroy(struct mtk_eth *eth, struct flow_cls_offload *f)
if (!entry)
return -ENOENT;
- mtk_foe_entry_clear(eth->ppe, entry);
+ mtk_foe_entry_clear(eth->ppe[entry->ppe_index], entry);
rhashtable_remove_fast(ð->flow_table, &entry->node,
mtk_flow_ht_params);
if (entry->wed_index >= 0)
@@ -485,7 +485,7 @@ mtk_flow_offload_stats(struct mtk_eth *eth, struct flow_cls_offload *f)
if (!entry)
return -ENOENT;
- idle = mtk_foe_entry_idle_time(eth->ppe, entry);
+ idle = mtk_foe_entry_idle_time(eth->ppe[entry->ppe_index], entry);
f->stats.lastused = jiffies - idle * HZ;
return 0;
@@ -537,7 +537,7 @@ mtk_eth_setup_tc_block(struct net_device *dev, struct flow_block_offload *f)
struct flow_block_cb *block_cb;
flow_setup_cb_t *cb;
- if (!eth->ppe || !eth->ppe->foe_table)
+ if (!eth->soc->offload_version)
return -EOPNOTSUPP;
if (f->binder_type != FLOW_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
@@ -589,8 +589,5 @@ int mtk_eth_setup_tc(struct net_device *dev, enum tc_setup_type type,
int mtk_eth_offload_init(struct mtk_eth *eth)
{
- if (!eth->ppe || !eth->ppe->foe_table)
- return 0;
-
return rhashtable_init(ð->flow_table, &mtk_flow_ht_params);
}