@@ -495,7 +495,7 @@ static int xfrm_output_one(struct sk_buff *skb, int err)
struct xfrm_state *x = dst->xfrm;
struct net *net = xs_net(x);
- if (err <= 0 || x->xso.type == XFRM_DEV_OFFLOAD_PACKET)
+ if (err <= 0)
goto resume;
do {
@@ -612,6 +612,40 @@ int xfrm_output_resume(struct sock *sk, struct sk_buff *skb, int err)
}
EXPORT_SYMBOL_GPL(xfrm_output_resume);
+static int xfrm_dev_direct_output(struct sock *sk, struct xfrm_state *x,
+ struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ struct net *net = xs_net(x);
+ int err;
+
+ dst = skb_dst_pop(skb);
+ if (!dst) {
+ XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+ kfree_skb(skb);
+ return -EHOSTUNREACH;
+ }
+ skb_dst_set(skb, dst);
+ nf_reset_ct(skb);
+
+ err = skb_dst(skb)->ops->local_out(net, sk, skb);
+ if (unlikely(err != 1)) {
+ kfree_skb(skb);
+ return err;
+ }
+
+ /* In transport mode, network destination is
+ * directly reachable, while in tunnel mode,
+ * inner packet network may not be. In packet
+ * offload type, HW is responsible for hard
+ * header packet mangling so directly xmit skb
+ * to netdevice.
+ */
+ skb->dev = x->xso.dev;
+ __skb_push(skb, skb->dev->hard_header_len);
+ return dev_queue_xmit(skb);
+}
+
static int xfrm_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
{
return xfrm_output_resume(sk, skb, 1);
@@ -735,7 +769,7 @@ int xfrm_output(struct sock *sk, struct sk_buff *skb)
return -EHOSTUNREACH;
}
- return xfrm_output_resume(sk, skb, 0);
+ return xfrm_dev_direct_output(sk, x, skb);
}
secpath_reset(skb);