diff mbox

[1/1] IB/core: create struct ib_port_cache

Message ID CAMGffEn7QsNVhDcV10Qwc-U7em3L47eOgc0CS1oRxXSFAfLGkQ@mail.gmail.com (mailing list archive)
State Superseded
Headers show

Commit Message

Jinpu Wang Jan. 4, 2017, 2:01 p.m. UTC
Hi Folks,

This is a follow up for http://www.spinics.net/lists/linux-rdma/msg44536.html.
As Jason suggested, I'm moving 4 elments for per port arrays into a seperate
ib_port_cache structure.

Please apply after the first 4 patches.


From 66e011a67bcae22ed1f5c180b5b40c085ed10fd3 Mon Sep 17 00:00:00 2001
From: Jack Wang <jinpu.wang@profitbricks.com>
Date: Wed, 4 Jan 2017 14:09:05 +0100
Subject: [PATCH 1/1] IB/core: create struct ib_port_cache

As Jason suggested, we have 4 elements for per port arrays,
it's better to have a seperate structure to represent them.

Signed-off-by: Jack Wang <jinpu.wang@profitbricks.com>
---
 drivers/infiniband/core/cache.c | 74 ++++++++++++++++++++---------------------
 include/rdma/ib_verbs.h         | 10 ++++--
 2 files changed, 44 insertions(+), 40 deletions(-)

Comments

Jason Gunthorpe Jan. 4, 2017, 6:36 p.m. UTC | #1
On Wed, Jan 04, 2017 at 03:01:38PM +0100, Jinpu Wang wrote:

> -struct ib_cache {
> -	rwlock_t                lock;
> -	struct ib_event_handler event_handler;
> +struct ib_port_cache {
>  	struct ib_pkey_cache  **pkey_cache;
>  	struct ib_gid_table   **gid_cache;
>  	u8                     *lmc_cache;
>  	enum ib_port_state     *port_state_cache;
>  };
>  
> +struct ib_cache {
> +	rwlock_t                lock;
> +	struct ib_event_handler event_handler;
> +	struct ib_port_cache    port_cache;
> +};

That isn't really what I ment..

struct ib_port_cache {
   enum ib_port_state port_state;
   struct ib_pkey_cache *pkey;
   struct ib_gid_table *gid;
   u8 lmc;
};

struct ib_cache {
   struct ib_port_cache *ports;
};

Jason
--
To unsubscribe from this list: send the line "unsubscribe linux-rdma" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Jinpu Wang Jan. 5, 2017, 8:52 a.m. UTC | #2
On Wed, Jan 4, 2017 at 7:36 PM, Jason Gunthorpe
<jgunthorpe@obsidianresearch.com> wrote:
> On Wed, Jan 04, 2017 at 03:01:38PM +0100, Jinpu Wang wrote:
>
>> -struct ib_cache {
>> -     rwlock_t                lock;
>> -     struct ib_event_handler event_handler;
>> +struct ib_port_cache {
>>       struct ib_pkey_cache  **pkey_cache;
>>       struct ib_gid_table   **gid_cache;
>>       u8                     *lmc_cache;
>>       enum ib_port_state     *port_state_cache;
>>  };
>>
>> +struct ib_cache {
>> +     rwlock_t                lock;
>> +     struct ib_event_handler event_handler;
>> +     struct ib_port_cache    port_cache;
>> +};
>
> That isn't really what I ment..
>
> struct ib_port_cache {
>    enum ib_port_state port_state;
>    struct ib_pkey_cache *pkey;
>    struct ib_gid_table *gid;
>    u8 lmc;
> };
>
> struct ib_cache {
>    struct ib_port_cache *ports;
> };
>
> Jason

Ah, okay, I will rework this patch and resubmit.

Thanks Jason
diff mbox

Patch

From 66e011a67bcae22ed1f5c180b5b40c085ed10fd3 Mon Sep 17 00:00:00 2001
From: Jack Wang <jinpu.wang@profitbricks.com>
Date: Wed, 4 Jan 2017 14:09:05 +0100
Subject: [PATCH 5/5] IB/core: create struct ib_port_cache

As Jason suggested, we have 4 elements for per port arrays,
it's better to have a seperate structure to represent them.

Signed-off-by: Jack Wang <jinpu.wang@profitbricks.com>
---
 drivers/infiniband/core/cache.c | 74 ++++++++++++++++++++---------------------
 include/rdma/ib_verbs.h         | 10 ++++--
 2 files changed, 44 insertions(+), 40 deletions(-)

diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index f91886b..740229b 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -314,7 +314,7 @@  static void make_default_gid(struct  net_device *dev, union ib_gid *gid)
 int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
 		     union ib_gid *gid, struct ib_gid_attr *attr)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+	struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
 	struct ib_gid_table *table;
 	int ix;
 	int ret = 0;
@@ -369,7 +369,7 @@  int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
 int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
 		     union ib_gid *gid, struct ib_gid_attr *attr)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+	struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
 	struct ib_gid_table *table;
 	int ix;
 
@@ -399,7 +399,7 @@  int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
 int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
 				     struct net_device *ndev)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+	struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
 	struct ib_gid_table *table;
 	int ix;
 	bool deleted = false;
@@ -428,7 +428,7 @@  int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
 static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
 			      union ib_gid *gid, struct ib_gid_attr *attr)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+	struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
 	struct ib_gid_table *table;
 
 	table = ports_table[port - rdma_start_port(ib_dev)];
@@ -455,7 +455,7 @@  static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
 				    unsigned long mask,
 				    u8 *port, u16 *index)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+	struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
 	struct ib_gid_table *table;
 	u8 p;
 	int local_index;
@@ -503,7 +503,7 @@  int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
 			       u16 *index)
 {
 	int local_index;
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+	struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
 	struct ib_gid_table *table;
 	unsigned long mask = GID_ATTR_FIND_MASK_GID |
 			     GID_ATTR_FIND_MASK_GID_TYPE;
@@ -562,7 +562,7 @@  static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
 				       void *context,
 				       u16 *index)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+	struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
 	struct ib_gid_table *table;
 	unsigned int i;
 	unsigned long flags;
@@ -668,7 +668,7 @@  void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
 				  unsigned long gid_type_mask,
 				  enum ib_cache_gid_default_mode mode)
 {
-	struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+	struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
 	union ib_gid gid;
 	struct ib_gid_attr gid_attr;
 	struct ib_gid_attr zattr_type = zattr;
@@ -791,7 +791,7 @@  static int _gid_table_setup_one(struct ib_device *ib_dev)
 			goto rollback_table_setup;
 	}
 
-	ib_dev->cache.gid_cache = table;
+	ib_dev->cache.port_cache.gid_cache = table;
 	return 0;
 
 rollback_table_setup:
@@ -807,7 +807,7 @@  static int _gid_table_setup_one(struct ib_device *ib_dev)
 
 static void gid_table_release_one(struct ib_device *ib_dev)
 {
-	struct ib_gid_table **table = ib_dev->cache.gid_cache;
+	struct ib_gid_table **table = ib_dev->cache.port_cache.gid_cache;
 	u8 port;
 
 	if (!table)
@@ -817,12 +817,12 @@  static void gid_table_release_one(struct ib_device *ib_dev)
 		release_gid_table(table[port]);
 
 	kfree(table);
-	ib_dev->cache.gid_cache = NULL;
+	ib_dev->cache.port_cache.gid_cache = NULL;
 }
 
 static void gid_table_cleanup_one(struct ib_device *ib_dev)
 {
-	struct ib_gid_table **table = ib_dev->cache.gid_cache;
+	struct ib_gid_table **table = ib_dev->cache.port_cache.gid_cache;
 	u8 port;
 
 	if (!table)
@@ -860,7 +860,7 @@  int ib_get_cached_gid(struct ib_device *device,
 {
 	int res;
 	unsigned long flags;
-	struct ib_gid_table **ports_table = device->cache.gid_cache;
+	struct ib_gid_table **ports_table = device->cache.port_cache.gid_cache;
 	struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
 
 	if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
@@ -917,7 +917,7 @@  int ib_get_cached_pkey(struct ib_device *device,
 
 	read_lock_irqsave(&device->cache.lock, flags);
 
-	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+	cache = device->cache.port_cache.pkey_cache[port_num - rdma_start_port(device)];
 
 	if (index < 0 || index >= cache->table_len)
 		ret = -EINVAL;
@@ -946,7 +946,7 @@  int ib_find_cached_pkey(struct ib_device *device,
 
 	read_lock_irqsave(&device->cache.lock, flags);
 
-	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+	cache = device->cache.port_cache.pkey_cache[port_num - rdma_start_port(device)];
 
 	*index = -1;
 
@@ -986,7 +986,7 @@  int ib_find_exact_cached_pkey(struct ib_device *device,
 
 	read_lock_irqsave(&device->cache.lock, flags);
 
-	cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+	cache = device->cache.port_cache.pkey_cache[port_num - rdma_start_port(device)];
 
 	*index = -1;
 
@@ -1014,7 +1014,7 @@  int ib_get_cached_lmc(struct ib_device *device,
 		return -EINVAL;
 
 	read_lock_irqsave(&device->cache.lock, flags);
-	*lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
+	*lmc = device->cache.port_cache.lmc_cache[port_num - rdma_start_port(device)];
 	read_unlock_irqrestore(&device->cache.lock, flags);
 
 	return ret;
@@ -1032,7 +1032,7 @@  int ib_get_cached_port_state(struct ib_device   *device,
 		return -EINVAL;
 
 	read_lock_irqsave(&device->cache.lock, flags);
-	*port_state = device->cache.port_state_cache[port_num - rdma_start_port(device)];
+	*port_state = device->cache.port_cache.port_state_cache[port_num - rdma_start_port(device)];
 	read_unlock_irqrestore(&device->cache.lock, flags);
 
 	return ret;
@@ -1051,7 +1051,7 @@  static void ib_cache_update(struct ib_device *device,
 	int                        i;
 	int                        ret;
 	struct ib_gid_table	  *table;
-	struct ib_gid_table	 **ports_table = device->cache.gid_cache;
+	struct ib_gid_table	 **ports_table = device->cache.port_cache.gid_cache;
 	bool			   use_roce_gid_table =
 					rdma_cap_roce_gid_table(device, port);
 
@@ -1110,9 +1110,9 @@  static void ib_cache_update(struct ib_device *device,
 
 	write_lock_irq(&device->cache.lock);
 
-	old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
+	old_pkey_cache = device->cache.port_cache.pkey_cache[port - rdma_start_port(device)];
 
-	device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
+	device->cache.port_cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
 	if (!use_roce_gid_table) {
 		write_lock(&table->rwlock);
 		for (i = 0; i < gid_cache->table_len; i++) {
@@ -1122,8 +1122,8 @@  static void ib_cache_update(struct ib_device *device,
 		write_unlock(&table->rwlock);
 	}
 
-	device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
-	device->cache.port_state_cache[port - rdma_start_port(device)] =
+	device->cache.port_cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
+	device->cache.port_cache.port_state_cache[port - rdma_start_port(device)] =
 		tprops->state;
 
 	write_unlock_irq(&device->cache.lock);
@@ -1177,19 +1177,19 @@  int ib_cache_setup_one(struct ib_device *device)
 
 	rwlock_init(&device->cache.lock);
 
-	device->cache.pkey_cache =
-		kzalloc(sizeof *device->cache.pkey_cache *
+	device->cache.port_cache.pkey_cache =
+		kzalloc(sizeof *device->cache.port_cache.pkey_cache *
 			(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
-	device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
+	device->cache.port_cache.lmc_cache = kmalloc(sizeof *device->cache.port_cache.lmc_cache *
 					  (rdma_end_port(device) -
 					   rdma_start_port(device) + 1),
 					  GFP_KERNEL);
-	device->cache.port_state_cache = kmalloc(sizeof *device->cache.port_state_cache *
+	device->cache.port_cache.port_state_cache = kmalloc(sizeof *device->cache.port_cache.port_state_cache *
 					  (rdma_end_port(device) -
 					   rdma_start_port(device) + 1),
 					  GFP_KERNEL);
-	if (!device->cache.pkey_cache || !device->cache.port_state_cache ||
-	    !device->cache.lmc_cache) {
+	if (!device->cache.port_cache.pkey_cache || !device->cache.port_cache.port_state_cache ||
+	    !device->cache.port_cache.lmc_cache) {
 		err = -ENOMEM;
 		goto free;
 	}
@@ -1212,9 +1212,9 @@  int ib_cache_setup_one(struct ib_device *device)
 err:
 	gid_table_cleanup_one(device);
 free:
-	kfree(device->cache.pkey_cache);
-	kfree(device->cache.lmc_cache);
-	kfree(device->cache.port_state_cache);
+	kfree(device->cache.port_cache.pkey_cache);
+	kfree(device->cache.port_cache.lmc_cache);
+	kfree(device->cache.port_cache.port_state_cache);
 	return err;
 }
 
@@ -1228,15 +1228,15 @@  void ib_cache_release_one(struct ib_device *device)
 	 * all the device's resources when the cache could no
 	 * longer be accessed.
 	 */
-	if (device->cache.pkey_cache)
+	if (device->cache.port_cache.pkey_cache)
 		for (p = 0;
 		     p <= rdma_end_port(device) - rdma_start_port(device); ++p)
-			kfree(device->cache.pkey_cache[p]);
+			kfree(device->cache.port_cache.pkey_cache[p]);
 
 	gid_table_release_one(device);
-	kfree(device->cache.pkey_cache);
-	kfree(device->cache.lmc_cache);
-	kfree(device->cache.port_state_cache);
+	kfree(device->cache.port_cache.pkey_cache);
+	kfree(device->cache.port_cache.lmc_cache);
+	kfree(device->cache.port_cache.port_state_cache);
 }
 
 void ib_cache_cleanup_one(struct ib_device *device)
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index fafa988..4443770 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -1775,15 +1775,19 @@  enum ib_mad_result {
 
 #define IB_DEVICE_NAME_MAX 64
 
-struct ib_cache {
-	rwlock_t                lock;
-	struct ib_event_handler event_handler;
+struct ib_port_cache {
 	struct ib_pkey_cache  **pkey_cache;
 	struct ib_gid_table   **gid_cache;
 	u8                     *lmc_cache;
 	enum ib_port_state     *port_state_cache;
 };
 
+struct ib_cache {
+	rwlock_t                lock;
+	struct ib_event_handler event_handler;
+	struct ib_port_cache    port_cache;
+};
+
 struct ib_dma_mapping_ops {
 	int		(*mapping_error)(struct ib_device *dev,
 					 u64 dma_addr);
-- 
2.7.4