@@ -3919,101 +3919,6 @@ void ieee80211_txq_schedule_start(struct ieee80211_hw *hw, u8 ac)
}
EXPORT_SYMBOL(ieee80211_txq_schedule_start);
-void __ieee80211_subif_start_xmit(struct sk_buff *skb,
- struct net_device *dev,
- u32 info_flags,
- u32 ctrl_flags,
- u64 *cookie)
-{
- struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
- struct ieee80211_local *local = sdata->local;
- struct sta_info *sta;
- struct sk_buff *next;
-
- if (unlikely(skb->len < ETH_HLEN)) {
- kfree_skb(skb);
- return;
- }
-
- rcu_read_lock();
-
- if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
- goto out_free;
-
- if (IS_ERR(sta))
- sta = NULL;
-
- if (local->ops->wake_tx_queue) {
- u16 queue = __ieee80211_select_queue(sdata, sta, skb);
- skb_set_queue_mapping(skb, queue);
- skb_get_hash(skb);
- }
-
- if (sta) {
- struct ieee80211_fast_tx *fast_tx;
-
- sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
-
- fast_tx = rcu_dereference(sta->fast_tx);
-
- if (fast_tx &&
- ieee80211_xmit_fast(sdata, sta, fast_tx, skb))
- goto out;
- }
-
- if (skb_is_gso(skb)) {
- struct sk_buff *segs;
-
- segs = skb_gso_segment(skb, 0);
- if (IS_ERR(segs)) {
- goto out_free;
- } else if (segs) {
- consume_skb(skb);
- skb = segs;
- }
- } else {
- /* we cannot process non-linear frames on this path */
- if (skb_linearize(skb)) {
- kfree_skb(skb);
- goto out;
- }
-
- /* the frame could be fragmented, software-encrypted, and other
- * things so we cannot really handle checksum offload with it -
- * fix it up in software before we handle anything else.
- */
- if (skb->ip_summed == CHECKSUM_PARTIAL) {
- skb_set_transport_header(skb,
- skb_checksum_start_offset(skb));
- if (skb_checksum_help(skb))
- goto out_free;
- }
- }
-
- skb_list_walk_safe(skb, skb, next) {
- skb_mark_not_on_list(skb);
-
- if (skb->protocol == sdata->control_port_protocol)
- ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
-
- skb = ieee80211_build_hdr(sdata, skb, info_flags,
- sta, ctrl_flags, cookie);
- if (IS_ERR(skb)) {
- kfree_skb_list(next);
- goto out;
- }
-
- ieee80211_tx_stats(dev, skb->len);
-
- ieee80211_xmit(sdata, sta, skb);
- }
- goto out;
- out_free:
- kfree_skb(skb);
- out:
- rcu_read_unlock();
-}
-
static int ieee80211_change_da(struct sk_buff *skb, struct sta_info *sta)
{
struct ethhdr *eth;
@@ -4267,6 +4172,112 @@ static void ieee80211_8023_xmit(struct ieee80211_sub_if_data *sdata,
kfree_skb(skb);
}
+void __ieee80211_subif_start_xmit(struct sk_buff *skb,
+ struct net_device *dev,
+ u32 info_flags,
+ u32 ctrl_flags,
+ u64 *cookie)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ struct sta_info *sta;
+ struct sk_buff *next;
+ struct ieee80211_sub_if_data *ap_sdata;
+
+ if (unlikely(skb->len < ETH_HLEN)) {
+ kfree_skb(skb);
+ return;
+ }
+
+ rcu_read_lock();
+
+ if (ieee80211_lookup_ra_sta(sdata, skb, &sta))
+ goto out_free;
+
+ if (IS_ERR(sta))
+ sta = NULL;
+
+ if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+ ap_sdata = container_of(sdata->bss,
+ struct ieee80211_sub_if_data, u.ap);
+ if (ap_sdata->hw_80211_encap && !is_multicast_ether_addr(skb->data)) {
+ ieee80211_8023_xmit(sdata, dev, sta, skb);
+ rcu_read_unlock();
+ return;
+ }
+ }
+
+ if (local->ops->wake_tx_queue) {
+ u16 queue = __ieee80211_select_queue(sdata, sta, skb);
+ skb_set_queue_mapping(skb, queue);
+ skb_get_hash(skb);
+ }
+
+ if (sta) {
+ struct ieee80211_fast_tx *fast_tx;
+
+ sk_pacing_shift_update(skb->sk, sdata->local->hw.tx_sk_pacing_shift);
+
+ fast_tx = rcu_dereference(sta->fast_tx);
+
+ if (fast_tx &&
+ ieee80211_xmit_fast(sdata, sta, fast_tx, skb))
+ goto out;
+ }
+
+ if (skb_is_gso(skb)) {
+ struct sk_buff *segs;
+
+ segs = skb_gso_segment(skb, 0);
+ if (IS_ERR(segs)) {
+ goto out_free;
+ } else if (segs) {
+ consume_skb(skb);
+ skb = segs;
+ }
+ } else {
+ /* we cannot process non-linear frames on this path */
+ if (skb_linearize(skb)) {
+ kfree_skb(skb);
+ goto out;
+ }
+
+ /* the frame could be fragmented, software-encrypted, and other
+ * things so we cannot really handle checksum offload with it -
+ * fix it up in software before we handle anything else.
+ */
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ skb_set_transport_header(skb,
+ skb_checksum_start_offset(skb));
+ if (skb_checksum_help(skb))
+ goto out_free;
+ }
+ }
+
+ skb_list_walk_safe(skb, skb, next) {
+ skb_mark_not_on_list(skb);
+
+ if (skb->protocol == sdata->control_port_protocol)
+ ctrl_flags |= IEEE80211_TX_CTRL_SKIP_MPATH_LOOKUP;
+
+ skb = ieee80211_build_hdr(sdata, skb, info_flags,
+ sta, ctrl_flags, cookie);
+ if (IS_ERR(skb)) {
+ kfree_skb_list(next);
+ goto out;
+ }
+
+ ieee80211_tx_stats(dev, skb->len);
+
+ ieee80211_xmit(sdata, sta, skb);
+ }
+ goto out;
+ out_free:
+ kfree_skb(skb);
+ out:
+ rcu_read_unlock();
+}
+
netdev_tx_t ieee80211_subif_start_xmit_8023(struct sk_buff *skb,
struct net_device *dev)
{
AP-VLAN multicast/broadcast packets are expected to be encrypted in software. Those packets should follow 802.11 xmit path. AP-VLAN unicast packets can go with encryption in driver/hardware. Redirect these packets to 'ieee80211_8023_xmit' from '__ieee80211_subif_start_xmit' if encapsulation offload is enabled on AP interface. Signed-off-by: Seevalamuthu Mariappan <seevalam@codeaurora.org> --- net/mac80211/tx.c | 201 ++++++++++++++++++++++++++++-------------------------- 1 file changed, 106 insertions(+), 95 deletions(-)