@@ -60,7 +60,7 @@ struct ovs_frag_data {
static DEFINE_PER_CPU(struct ovs_frag_data, ovs_frag_data_storage);
#define OVS_RECURSION_LIMIT 5
-#define NR_FLOW_KEYS 5
+#define NR_FLOW_KEYS 10
#define DEFERRED_ACTION_FIFO_SIZE 10
struct action_fifo {
@@ -85,7 +85,7 @@ static struct action_fifo __percpu *action_fifos;
* ovs_flow_key_alloc provides a per-CPU sw_flow_key allocator. keys must be
* freed in the reverse order that they were allocated in (i.e., a stack).
*/
-static struct sw_flow_key *ovs_flow_key_alloc(void)
+struct sw_flow_key *ovs_flow_key_alloc(void)
{
struct flow_key_stack *keys = this_cpu_ptr(flow_key_stack);
int level = this_cpu_read(flow_keys_allocated);
@@ -98,7 +98,7 @@ static struct sw_flow_key *ovs_flow_key_alloc(void)
return &keys->key[level];
}
-static void ovs_flow_key_free(struct sw_flow_key *key)
+void ovs_flow_key_free(struct sw_flow_key *key)
{
struct flow_key_stack *keys = this_cpu_ptr(flow_key_stack);
int level = this_cpu_read(flow_keys_allocated);
@@ -285,6 +285,9 @@ void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
void ovs_flow_stats_clear(struct sw_flow *);
u64 ovs_flow_used_time(unsigned long flow_jiffies);
+struct sw_flow_key *ovs_flow_key_alloc(void);
+void ovs_flow_key_free(struct sw_flow_key *key);
+
int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key);
int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key);
int ovs_flow_key_extract(const struct ip_tunnel_info *tun_info,
@@ -494,7 +494,7 @@ u32 ovs_vport_find_upcall_portid(const struct vport *vport,
int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
const struct ip_tunnel_info *tun_info)
{
- struct sw_flow_key key;
+ struct sw_flow_key *key;
int error;
OVS_CB(skb)->input_vport = vport;
@@ -509,14 +509,27 @@ int ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
tun_info = NULL;
}
- /* Extract flow from 'skb' into 'key'. */
- error = ovs_flow_key_extract(tun_info, skb, &key);
- if (unlikely(error)) {
- kfree_skb(skb);
- return error;
+ key = ovs_flow_key_alloc();
+ if (unlikely(!key)) {
+ error = -ENOMEM;
+ goto err_skb;
}
- ovs_dp_process_packet(skb, &key);
+
+ /* Extract flow from 'skb' into 'key'. */
+ error = ovs_flow_key_extract(tun_info, skb, key);
+ if (unlikely(error))
+ goto err_key;
+
+ ovs_dp_process_packet(skb, key);
+ ovs_flow_key_free(key);
+
return 0;
+
+err_key:
+ ovs_flow_key_free(key);
+err_skb:
+ kfree_skb(skb);
+ return error;
}
static int packet_length(const struct sk_buff *skb,
Rather than allocate the flow key on stack in ovs_vport_receive, use the per-cpu flow key allocator introduced with the previous change. The number of keys are increased because ovs_vport_receive can be in the recursion path too. This brings ovs_vport_receive stack usage from 544 bytes to 64 bytes on ppc64le. Signed-off-by: Nicholas Piggin <npiggin@gmail.com> --- net/openvswitch/actions.c | 6 +++--- net/openvswitch/flow.h | 3 +++ net/openvswitch/vport.c | 27 ++++++++++++++++++++------- 3 files changed, 26 insertions(+), 10 deletions(-)