From 66e011a67bcae22ed1f5c180b5b40c085ed10fd3 Mon Sep 17 00:00:00 2001
From: Jack Wang <jinpu.wang@profitbricks.com>
Date: Wed, 4 Jan 2017 14:09:05 +0100
Subject: [PATCH 5/5] IB/core: create struct ib_port_cache
As Jason suggested, we have 4 elements for per port arrays,
it's better to have a seperate structure to represent them.
Signed-off-by: Jack Wang <jinpu.wang@profitbricks.com>
---
drivers/infiniband/core/cache.c | 74 ++++++++++++++++++++---------------------
include/rdma/ib_verbs.h | 10 ++++--
2 files changed, 44 insertions(+), 40 deletions(-)
@@ -314,7 +314,7 @@ static void make_default_gid(struct net_device *dev, union ib_gid *gid)
int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
union ib_gid *gid, struct ib_gid_attr *attr)
{
- struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
struct ib_gid_table *table;
int ix;
int ret = 0;
@@ -369,7 +369,7 @@ int ib_cache_gid_add(struct ib_device *ib_dev, u8 port,
int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
union ib_gid *gid, struct ib_gid_attr *attr)
{
- struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
struct ib_gid_table *table;
int ix;
@@ -399,7 +399,7 @@ int ib_cache_gid_del(struct ib_device *ib_dev, u8 port,
int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
struct net_device *ndev)
{
- struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
struct ib_gid_table *table;
int ix;
bool deleted = false;
@@ -428,7 +428,7 @@ int ib_cache_gid_del_all_netdev_gids(struct ib_device *ib_dev, u8 port,
static int __ib_cache_gid_get(struct ib_device *ib_dev, u8 port, int index,
union ib_gid *gid, struct ib_gid_attr *attr)
{
- struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
struct ib_gid_table *table;
table = ports_table[port - rdma_start_port(ib_dev)];
@@ -455,7 +455,7 @@ static int _ib_cache_gid_table_find(struct ib_device *ib_dev,
unsigned long mask,
u8 *port, u16 *index)
{
- struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
struct ib_gid_table *table;
u8 p;
int local_index;
@@ -503,7 +503,7 @@ int ib_find_cached_gid_by_port(struct ib_device *ib_dev,
u16 *index)
{
int local_index;
- struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
struct ib_gid_table *table;
unsigned long mask = GID_ATTR_FIND_MASK_GID |
GID_ATTR_FIND_MASK_GID_TYPE;
@@ -562,7 +562,7 @@ static int ib_cache_gid_find_by_filter(struct ib_device *ib_dev,
void *context,
u16 *index)
{
- struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
struct ib_gid_table *table;
unsigned int i;
unsigned long flags;
@@ -668,7 +668,7 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
unsigned long gid_type_mask,
enum ib_cache_gid_default_mode mode)
{
- struct ib_gid_table **ports_table = ib_dev->cache.gid_cache;
+ struct ib_gid_table **ports_table = ib_dev->cache.port_cache.gid_cache;
union ib_gid gid;
struct ib_gid_attr gid_attr;
struct ib_gid_attr zattr_type = zattr;
@@ -791,7 +791,7 @@ static int _gid_table_setup_one(struct ib_device *ib_dev)
goto rollback_table_setup;
}
- ib_dev->cache.gid_cache = table;
+ ib_dev->cache.port_cache.gid_cache = table;
return 0;
rollback_table_setup:
@@ -807,7 +807,7 @@ static int _gid_table_setup_one(struct ib_device *ib_dev)
static void gid_table_release_one(struct ib_device *ib_dev)
{
- struct ib_gid_table **table = ib_dev->cache.gid_cache;
+ struct ib_gid_table **table = ib_dev->cache.port_cache.gid_cache;
u8 port;
if (!table)
@@ -817,12 +817,12 @@ static void gid_table_release_one(struct ib_device *ib_dev)
release_gid_table(table[port]);
kfree(table);
- ib_dev->cache.gid_cache = NULL;
+ ib_dev->cache.port_cache.gid_cache = NULL;
}
static void gid_table_cleanup_one(struct ib_device *ib_dev)
{
- struct ib_gid_table **table = ib_dev->cache.gid_cache;
+ struct ib_gid_table **table = ib_dev->cache.port_cache.gid_cache;
u8 port;
if (!table)
@@ -860,7 +860,7 @@ int ib_get_cached_gid(struct ib_device *device,
{
int res;
unsigned long flags;
- struct ib_gid_table **ports_table = device->cache.gid_cache;
+ struct ib_gid_table **ports_table = device->cache.port_cache.gid_cache;
struct ib_gid_table *table = ports_table[port_num - rdma_start_port(device)];
if (port_num < rdma_start_port(device) || port_num > rdma_end_port(device))
@@ -917,7 +917,7 @@ int ib_get_cached_pkey(struct ib_device *device,
read_lock_irqsave(&device->cache.lock, flags);
- cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+ cache = device->cache.port_cache.pkey_cache[port_num - rdma_start_port(device)];
if (index < 0 || index >= cache->table_len)
ret = -EINVAL;
@@ -946,7 +946,7 @@ int ib_find_cached_pkey(struct ib_device *device,
read_lock_irqsave(&device->cache.lock, flags);
- cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+ cache = device->cache.port_cache.pkey_cache[port_num - rdma_start_port(device)];
*index = -1;
@@ -986,7 +986,7 @@ int ib_find_exact_cached_pkey(struct ib_device *device,
read_lock_irqsave(&device->cache.lock, flags);
- cache = device->cache.pkey_cache[port_num - rdma_start_port(device)];
+ cache = device->cache.port_cache.pkey_cache[port_num - rdma_start_port(device)];
*index = -1;
@@ -1014,7 +1014,7 @@ int ib_get_cached_lmc(struct ib_device *device,
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
- *lmc = device->cache.lmc_cache[port_num - rdma_start_port(device)];
+ *lmc = device->cache.port_cache.lmc_cache[port_num - rdma_start_port(device)];
read_unlock_irqrestore(&device->cache.lock, flags);
return ret;
@@ -1032,7 +1032,7 @@ int ib_get_cached_port_state(struct ib_device *device,
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
- *port_state = device->cache.port_state_cache[port_num - rdma_start_port(device)];
+ *port_state = device->cache.port_cache.port_state_cache[port_num - rdma_start_port(device)];
read_unlock_irqrestore(&device->cache.lock, flags);
return ret;
@@ -1051,7 +1051,7 @@ static void ib_cache_update(struct ib_device *device,
int i;
int ret;
struct ib_gid_table *table;
- struct ib_gid_table **ports_table = device->cache.gid_cache;
+ struct ib_gid_table **ports_table = device->cache.port_cache.gid_cache;
bool use_roce_gid_table =
rdma_cap_roce_gid_table(device, port);
@@ -1110,9 +1110,9 @@ static void ib_cache_update(struct ib_device *device,
write_lock_irq(&device->cache.lock);
- old_pkey_cache = device->cache.pkey_cache[port - rdma_start_port(device)];
+ old_pkey_cache = device->cache.port_cache.pkey_cache[port - rdma_start_port(device)];
- device->cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
+ device->cache.port_cache.pkey_cache[port - rdma_start_port(device)] = pkey_cache;
if (!use_roce_gid_table) {
write_lock(&table->rwlock);
for (i = 0; i < gid_cache->table_len; i++) {
@@ -1122,8 +1122,8 @@ static void ib_cache_update(struct ib_device *device,
write_unlock(&table->rwlock);
}
- device->cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
- device->cache.port_state_cache[port - rdma_start_port(device)] =
+ device->cache.port_cache.lmc_cache[port - rdma_start_port(device)] = tprops->lmc;
+ device->cache.port_cache.port_state_cache[port - rdma_start_port(device)] =
tprops->state;
write_unlock_irq(&device->cache.lock);
@@ -1177,19 +1177,19 @@ int ib_cache_setup_one(struct ib_device *device)
rwlock_init(&device->cache.lock);
- device->cache.pkey_cache =
- kzalloc(sizeof *device->cache.pkey_cache *
+ device->cache.port_cache.pkey_cache =
+ kzalloc(sizeof *device->cache.port_cache.pkey_cache *
(rdma_end_port(device) - rdma_start_port(device) + 1), GFP_KERNEL);
- device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
+ device->cache.port_cache.lmc_cache = kmalloc(sizeof *device->cache.port_cache.lmc_cache *
(rdma_end_port(device) -
rdma_start_port(device) + 1),
GFP_KERNEL);
- device->cache.port_state_cache = kmalloc(sizeof *device->cache.port_state_cache *
+ device->cache.port_cache.port_state_cache = kmalloc(sizeof *device->cache.port_cache.port_state_cache *
(rdma_end_port(device) -
rdma_start_port(device) + 1),
GFP_KERNEL);
- if (!device->cache.pkey_cache || !device->cache.port_state_cache ||
- !device->cache.lmc_cache) {
+ if (!device->cache.port_cache.pkey_cache || !device->cache.port_cache.port_state_cache ||
+ !device->cache.port_cache.lmc_cache) {
err = -ENOMEM;
goto free;
}
@@ -1212,9 +1212,9 @@ int ib_cache_setup_one(struct ib_device *device)
err:
gid_table_cleanup_one(device);
free:
- kfree(device->cache.pkey_cache);
- kfree(device->cache.lmc_cache);
- kfree(device->cache.port_state_cache);
+ kfree(device->cache.port_cache.pkey_cache);
+ kfree(device->cache.port_cache.lmc_cache);
+ kfree(device->cache.port_cache.port_state_cache);
return err;
}
@@ -1228,15 +1228,15 @@ void ib_cache_release_one(struct ib_device *device)
* all the device's resources when the cache could no
* longer be accessed.
*/
- if (device->cache.pkey_cache)
+ if (device->cache.port_cache.pkey_cache)
for (p = 0;
p <= rdma_end_port(device) - rdma_start_port(device); ++p)
- kfree(device->cache.pkey_cache[p]);
+ kfree(device->cache.port_cache.pkey_cache[p]);
gid_table_release_one(device);
- kfree(device->cache.pkey_cache);
- kfree(device->cache.lmc_cache);
- kfree(device->cache.port_state_cache);
+ kfree(device->cache.port_cache.pkey_cache);
+ kfree(device->cache.port_cache.lmc_cache);
+ kfree(device->cache.port_cache.port_state_cache);
}
void ib_cache_cleanup_one(struct ib_device *device)
@@ -1775,15 +1775,19 @@ enum ib_mad_result {
#define IB_DEVICE_NAME_MAX 64
-struct ib_cache {
- rwlock_t lock;
- struct ib_event_handler event_handler;
+struct ib_port_cache {
struct ib_pkey_cache **pkey_cache;
struct ib_gid_table **gid_cache;
u8 *lmc_cache;
enum ib_port_state *port_state_cache;
};
+struct ib_cache {
+ rwlock_t lock;
+ struct ib_event_handler event_handler;
+ struct ib_port_cache port_cache;
+};
+
struct ib_dma_mapping_ops {
int (*mapping_error)(struct ib_device *dev,
u64 dma_addr);
--
2.7.4