@@ -31,7 +31,6 @@
static struct class *dca_class;
static struct idr dca_idr;
-static spinlock_t dca_idr_lock;
int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot)
{
@@ -55,23 +54,15 @@ int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev)
struct device *cd;
int ret;
- idr_preload(GFP_KERNEL);
- spin_lock(&dca_idr_lock);
-
- ret = idr_alloc(&dca_idr, dca, GFP_NOWAIT);
- if (ret >= 0)
- dca->id = ret;
-
- spin_unlock(&dca_idr_lock);
- idr_preload_end();
+ ret = idr_alloc(&dca_idr, dca, GFP_KERNEL);
if (ret < 0)
return ret;
+ dca->id = ret;
+
cd = device_create(dca_class, dev, MKDEV(0, 0), NULL, "dca%d", dca->id);
if (IS_ERR(cd)) {
- spin_lock(&dca_idr_lock);
idr_remove(&dca_idr, dca->id);
- spin_unlock(&dca_idr_lock);
return PTR_ERR(cd);
}
dca->cd = cd;
@@ -82,15 +73,12 @@ void dca_sysfs_remove_provider(struct dca_provider *dca)
{
device_unregister(dca->cd);
dca->cd = NULL;
- spin_lock(&dca_idr_lock);
idr_remove(&dca_idr, dca->id);
- spin_unlock(&dca_idr_lock);
}
int __init dca_sysfs_init(void)
{
idr_init(&dca_idr);
- spin_lock_init(&dca_idr_lock);
dca_class = class_create(THIS_MODULE, "dca");
if (IS_ERR(dca_class)) {
@@ -448,11 +448,9 @@ int qxl_surface_id_alloc(struct qxl_device *qdev,
int idr_ret;
int count = 0;
again:
- idr_preload(GFP_ATOMIC);
spin_lock(&qdev->surf_id_idr_lock);
- idr_ret = idr_alloc_range(&qdev->surf_id_idr, NULL, 1, 0, GFP_NOWAIT);
+ idr_ret = idr_alloc_range(&qdev->surf_id_idr, NULL, 1, 0, GFP_ATOMIC);
spin_unlock(&qdev->surf_id_idr_lock);
- idr_preload_end();
if (idr_ret < 0)
return idr_ret;
handle = idr_ret;
@@ -277,7 +277,6 @@ struct qxl_device {
uint64_t va_slot_mask;
struct idr release_idr;
- spinlock_t release_idr_lock;
struct mutex async_io_mutex;
unsigned int last_sent_io_cmd;
@@ -190,7 +190,6 @@ int qxl_device_init(struct qxl_device *qdev,
GFP_KERNEL);
idr_init(&qdev->release_idr);
- spin_lock_init(&qdev->release_idr_lock);
idr_init(&qdev->surf_id_idr);
spin_lock_init(&qdev->surf_id_idr_lock);
@@ -43,7 +43,6 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
struct qxl_release **ret)
{
struct qxl_release *release;
- int handle;
size_t size = sizeof(*release);
int idr_ret;
@@ -57,20 +56,16 @@ qxl_release_alloc(struct qxl_device *qdev, int type,
release->release_offset = 0;
release->surface_release_id = 0;
- idr_preload(GFP_KERNEL);
- spin_lock(&qdev->release_idr_lock);
- idr_ret = idr_alloc_range(&qdev->release_idr, release, 1, 0, GFP_NOWAIT);
- spin_unlock(&qdev->release_idr_lock);
- idr_preload_end();
- handle = idr_ret;
+ idr_ret = idr_alloc_range(&qdev->release_idr, release,
+ 1, 0, GFP_KERNEL);
if (idr_ret < 0)
goto release_fail;
*ret = release;
- QXL_INFO(qdev, "allocated release %lld\n", handle);
- release->id = handle;
+ QXL_INFO(qdev, "allocated release %lld\n", idr_ret);
+ release->id = idr_ret;
release_fail:
- return handle;
+ return idr_ret;
}
void
@@ -92,9 +87,7 @@ qxl_release_free(struct qxl_device *qdev,
qxl_fence_remove_release(&release->bos[i]->fence, release->id);
qxl_bo_unref(&release->bos[i]);
}
- spin_lock(&qdev->release_idr_lock);
idr_remove(&qdev->release_idr, release->id);
- spin_unlock(&qdev->release_idr_lock);
kfree(release);
}
@@ -266,9 +259,7 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev,
{
struct qxl_release *release;
- spin_lock(&qdev->release_idr_lock);
release = idr_find(&qdev->release_idr, id);
- spin_unlock(&qdev->release_idr_lock);
if (!release) {
DRM_ERROR("failed to find id in release_idr\n");
return NULL;
@@ -264,7 +264,6 @@ struct c2_pd_table {
struct c2_qp_table {
struct idr idr;
- spinlock_t lock;
};
struct c2_element {
@@ -380,36 +380,21 @@ static int destroy_qp(struct c2_dev *c2dev, struct c2_qp *qp)
static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)
{
- int ret;
-
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&c2dev->qp_table.lock);
-
- ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_NOWAIT);
+ int ret = idr_alloc_cyclic(&c2dev->qp_table.idr, qp, 0, 0, GFP_KERNEL);
if (ret >= 0)
qp->qpn = ret;
- spin_unlock_irq(&c2dev->qp_table.lock);
- idr_preload_end();
return ret < 0 ? ret : 0;
}
static void c2_free_qpn(struct c2_dev *c2dev, int qpn)
{
- spin_lock_irq(&c2dev->qp_table.lock);
idr_remove(&c2dev->qp_table.idr, qpn);
- spin_unlock_irq(&c2dev->qp_table.lock);
}
struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn)
{
- unsigned long flags;
- struct c2_qp *qp;
-
- spin_lock_irqsave(&c2dev->qp_table.lock, flags);
- qp = idr_find(&c2dev->qp_table.idr, qpn);
- spin_unlock_irqrestore(&c2dev->qp_table.lock, flags);
- return qp;
+ return idr_find(&c2dev->qp_table.idr, qpn);
}
int c2_alloc_qp(struct c2_dev *c2dev,
@@ -1014,7 +999,6 @@ out:
void c2_init_qp_table(struct c2_dev *c2dev)
{
- spin_lock_init(&c2dev->qp_table.lock);
idr_init(&c2dev->qp_table.idr);
}
@@ -1775,9 +1775,7 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
*---------------------------------------------------------------*/
static void free_minor(int minor)
{
- spin_lock(&_minor_lock);
idr_remove(&_minor_idr, minor);
- spin_unlock(&_minor_lock);
}
/*
@@ -1790,13 +1788,8 @@ static int specific_minor(int minor)
if (minor >= (1 << MINORBITS))
return -EINVAL;
- idr_preload(GFP_KERNEL);
- spin_lock(&_minor_lock);
-
- r = idr_alloc_range(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
-
- spin_unlock(&_minor_lock);
- idr_preload_end();
+ r = idr_alloc_range(&_minor_idr, MINOR_ALLOCED, minor,
+ minor + 1, GFP_KERNEL);
if (r < 0)
return r == -ENOSPC ? -EBUSY : r;
return 0;
@@ -1806,13 +1799,8 @@ static int next_free_minor(int *minor)
{
int r;
- idr_preload(GFP_KERNEL);
- spin_lock(&_minor_lock);
-
- r = idr_alloc_range(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
-
- spin_unlock(&_minor_lock);
- idr_preload_end();
+ r = idr_alloc_range(&_minor_idr, MINOR_ALLOCED,
+ 0, 1 << MINORBITS, GFP_KERNEL);
if (r < 0)
return r;
*minor = r;
@@ -1921,9 +1909,7 @@ static struct mapped_device *alloc_dev(int minor)
md->flush_bio.bi_rw = WRITE_FLUSH;
/* Populate the mapping, nobody knows we exist yet */
- spin_lock(&_minor_lock);
old_md = idr_replace(&_minor_idr, md, minor);
- spin_unlock(&_minor_lock);
BUG_ON(old_md != MINOR_ALLOCED);
@@ -26,7 +26,6 @@ module_param(cmd_retries, uint, 0644);
static struct workqueue_struct *workqueue;
static DEFINE_IDR(memstick_host_idr);
-static DEFINE_SPINLOCK(memstick_host_lock);
static int memstick_dev_match(struct memstick_dev *card,
struct memstick_device_id *id)
@@ -512,25 +511,17 @@ int memstick_add_host(struct memstick_host *host)
{
int rc;
- idr_preload(GFP_KERNEL);
- spin_lock(&memstick_host_lock);
-
- rc = idr_alloc(&memstick_host_idr, host, GFP_NOWAIT);
- if (rc >= 0)
- host->id = rc;
-
- spin_unlock(&memstick_host_lock);
- idr_preload_end();
+ rc = idr_alloc(&memstick_host_idr, host, GFP_KERNEL);
if (rc < 0)
return rc;
+ host->id = rc;
+
dev_set_name(&host->dev, "memstick%u", host->id);
rc = device_add(&host->dev);
if (rc) {
- spin_lock(&memstick_host_lock);
idr_remove(&memstick_host_idr, host->id);
- spin_unlock(&memstick_host_lock);
return rc;
}
@@ -554,9 +545,7 @@ void memstick_remove_host(struct memstick_host *host)
host->set_param(host, MEMSTICK_POWER, MEMSTICK_POWER_OFF);
mutex_unlock(&host->lock);
- spin_lock(&memstick_host_lock);
idr_remove(&memstick_host_idr, host->id);
- spin_unlock(&memstick_host_lock);
device_del(&host->dev);
}
EXPORT_SYMBOL(memstick_remove_host);
@@ -40,7 +40,6 @@ module_param(msi_en, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(msi_en, "Enable MSI");
static DEFINE_IDR(rtsx_pci_idr);
-static DEFINE_SPINLOCK(rtsx_pci_lock);
static struct mfd_cell rtsx_pcr_cells[] = {
[RTSX_SD_CARD] = {
@@ -1096,16 +1095,12 @@ static int rtsx_pci_probe(struct pci_dev *pcidev,
}
handle->pcr = pcr;
- idr_preload(GFP_KERNEL);
- spin_lock(&rtsx_pci_lock);
- ret = idr_alloc(&rtsx_pci_idr, pcr, GFP_NOWAIT);
- if (ret >= 0)
- pcr->id = ret;
- spin_unlock(&rtsx_pci_lock);
- idr_preload_end();
+ ret = idr_alloc(&rtsx_pci_idr, pcr, GFP_KERNEL);
if (ret < 0)
goto free_handle;
+ pcr->id = ret;
+
pcr->pci = pcidev;
dev_set_drvdata(&pcidev->dev, handle);
@@ -1211,9 +1206,7 @@ static void rtsx_pci_remove(struct pci_dev *pcidev)
pci_release_regions(pcidev);
pci_disable_device(pcidev);
- spin_lock(&rtsx_pci_lock);
idr_remove(&rtsx_pci_idr, pcr->id);
- spin_unlock(&rtsx_pci_lock);
kfree(pcr->slots);
kfree(pcr);
@@ -27,7 +27,6 @@
#define DRIVER_NAME "c2port"
#define DRIVER_VERSION "0.51.0"
-static DEFINE_SPINLOCK(c2port_idr_lock);
static DEFINE_IDR(c2port_idr);
/*
@@ -897,11 +896,7 @@ struct c2port_device *c2port_device_register(char *name,
if (unlikely(!c2dev))
return ERR_PTR(-ENOMEM);
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&c2port_idr_lock);
- ret = idr_alloc(&c2port_idr, c2dev, GFP_NOWAIT);
- spin_unlock_irq(&c2port_idr_lock);
- idr_preload_end();
+ ret = idr_alloc(&c2port_idr, c2dev, GFP_KERNEL);
if (ret < 0)
goto error_idr_alloc;
@@ -941,9 +936,7 @@ error_device_create_bin_file:
device_destroy(c2port_class, 0);
error_device_create:
- spin_lock_irq(&c2port_idr_lock);
idr_remove(&c2port_idr, c2dev->id);
- spin_unlock_irq(&c2port_idr_lock);
error_idr_alloc:
kfree(c2dev);
@@ -960,9 +953,7 @@ void c2port_device_unregister(struct c2port_device *c2dev)
dev_info(c2dev->dev, "C2 port %s removed\n", c2dev->name);
device_remove_bin_file(c2dev->dev, &c2port_bin_attrs);
- spin_lock_irq(&c2port_idr_lock);
idr_remove(&c2port_idr, c2dev->id);
- spin_unlock_irq(&c2port_idr_lock);
device_destroy(c2port_class, c2dev->id);
@@ -20,7 +20,6 @@
static struct workqueue_struct *workqueue;
static DEFINE_IDR(tifm_adapter_idr);
-static DEFINE_SPINLOCK(tifm_adapter_lock);
static const char *tifm_media_type_name(unsigned char type, unsigned char nt)
{
@@ -196,22 +195,16 @@ int tifm_add_adapter(struct tifm_adapter *fm)
{
int rc;
- idr_preload(GFP_KERNEL);
- spin_lock(&tifm_adapter_lock);
- rc = idr_alloc(&tifm_adapter_idr, fm, GFP_NOWAIT);
- if (rc >= 0)
- fm->id = rc;
- spin_unlock(&tifm_adapter_lock);
- idr_preload_end();
+ rc = idr_alloc(&tifm_adapter_idr, fm, GFP_KERNEL);
if (rc < 0)
return rc;
+ fm->id = rc;
+
dev_set_name(&fm->dev, "tifm%u", fm->id);
rc = device_add(&fm->dev);
if (rc) {
- spin_lock(&tifm_adapter_lock);
idr_remove(&tifm_adapter_idr, fm->id);
- spin_unlock(&tifm_adapter_lock);
}
return rc;
@@ -228,9 +221,7 @@ void tifm_remove_adapter(struct tifm_adapter *fm)
device_unregister(&fm->sockets[cnt]->dev);
}
- spin_lock(&tifm_adapter_lock);
idr_remove(&tifm_adapter_idr, fm->id);
- spin_unlock(&tifm_adapter_lock);
device_del(&fm->dev);
}
EXPORT_SYMBOL(tifm_remove_adapter);
@@ -55,7 +55,6 @@ void mmc_unregister_host_class(void)
}
static DEFINE_IDR(mmc_host_idr);
-static DEFINE_SPINLOCK(mmc_host_lock);
#ifdef CONFIG_MMC_CLKGATE
static ssize_t clkgate_delay_show(struct device *dev,
@@ -435,16 +434,11 @@ struct mmc_host *mmc_alloc_host(int extra, struct device *dev)
/* scanning will be enabled when we're ready */
host->rescan_disable = 1;
- idr_preload(GFP_KERNEL);
- spin_lock(&mmc_host_lock);
- err = idr_alloc(&mmc_host_idr, host, GFP_NOWAIT);
- if (err >= 0)
- host->index = err;
- spin_unlock(&mmc_host_lock);
- idr_preload_end();
+ err = idr_alloc(&mmc_host_idr, host, GFP_KERNEL);
if (err < 0)
goto free;
+ host->index = err;
dev_set_name(&host->class_dev, "mmc%d", host->index);
host->parent = dev;
@@ -552,10 +546,7 @@ EXPORT_SYMBOL(mmc_remove_host);
*/
void mmc_free_host(struct mmc_host *host)
{
- spin_lock(&mmc_host_lock);
idr_remove(&mmc_host_idr, host->index);
- spin_unlock(&mmc_host_lock);
-
put_device(&host->class_dev);
}
@@ -115,7 +115,6 @@ typedef struct {
} scsi_changer;
static DEFINE_IDR(ch_index_idr);
-static DEFINE_SPINLOCK(ch_index_lock);
static const struct {
unsigned char sense;
@@ -582,15 +581,12 @@ ch_open(struct inode *inode, struct file *file)
int minor = iminor(inode);
mutex_lock(&ch_mutex);
- spin_lock(&ch_index_lock);
ch = idr_find(&ch_index_idr, minor);
if (NULL == ch || scsi_device_get(ch->device)) {
- spin_unlock(&ch_index_lock);
mutex_unlock(&ch_mutex);
return -ENXIO;
}
- spin_unlock(&ch_index_lock);
file->private_data = ch;
mutex_unlock(&ch_mutex);
@@ -905,12 +901,8 @@ static int ch_probe(struct device *dev)
if (NULL == ch)
return -ENOMEM;
- idr_preload(GFP_KERNEL);
- spin_lock(&ch_index_lock);
- ret = idr_alloc_range(&ch_index_idr, ch, 0, CH_MAX_DEVS + 1, GFP_NOWAIT);
- spin_unlock(&ch_index_lock);
- idr_preload_end();
-
+ ret = idr_alloc_range(&ch_index_idr, ch, 0,
+ CH_MAX_DEVS + 1, GFP_KERNEL);
if (ret < 0) {
if (ret == -ENOSPC)
ret = -ENODEV;
@@ -951,9 +943,7 @@ static int ch_remove(struct device *dev)
{
scsi_changer *ch = dev_get_drvdata(dev);
- spin_lock(&ch_index_lock);
idr_remove(&ch_index_idr, ch->minor);
- spin_unlock(&ch_index_lock);
device_destroy(ch_sysfs_class, MKDEV(SCSI_CHANGER_MAJOR,ch->minor));
kfree(ch->dt);
@@ -214,7 +214,6 @@ static void scsi_tape_release(struct kref *);
#define to_scsi_tape(obj) container_of(obj, struct scsi_tape, kref)
static DEFINE_MUTEX(st_ref_mutex);
-static DEFINE_SPINLOCK(st_index_lock);
static DEFINE_SPINLOCK(st_use_lock);
static DEFINE_IDR(st_index_idr);
@@ -235,7 +234,6 @@ static struct scsi_tape *scsi_tape_get(int dev)
struct scsi_tape *STp = NULL;
mutex_lock(&st_ref_mutex);
- spin_lock(&st_index_lock);
STp = idr_find(&st_index_idr, dev);
if (!STp) goto out;
@@ -254,7 +252,6 @@ out_put:
kref_put(&STp->kref, scsi_tape_release);
STp = NULL;
out:
- spin_unlock(&st_index_lock);
mutex_unlock(&st_ref_mutex);
return STp;
}
@@ -4182,11 +4179,7 @@ static int st_probe(struct device *dev)
tpnt->blksize_changed = 0;
mutex_init(&tpnt->lock);
- idr_preload(GFP_KERNEL);
- spin_lock(&st_index_lock);
- error = idr_alloc_range(&st_index_idr, tpnt, 0, ST_MAX_TAPES + 1, GFP_NOWAIT);
- spin_unlock(&st_index_lock);
- idr_preload_end();
+ error = idr_alloc_range(&st_index_idr, tpnt, 0, ST_MAX_TAPES + 1, GFP_KERNEL);
if (error < 0) {
pr_warn("st: idr allocation failed: %d\n", error);
goto out_put_queue;
@@ -4212,9 +4205,7 @@ static int st_probe(struct device *dev)
out_remove_devs:
remove_cdevs(tpnt);
- spin_lock(&st_index_lock);
idr_remove(&st_index_idr, tpnt->index);
- spin_unlock(&st_index_lock);
out_put_queue:
blk_put_queue(disk->queue);
out_put_disk:
@@ -4238,9 +4229,7 @@ static int st_remove(struct device *dev)
mutex_lock(&st_ref_mutex);
kref_put(&tpnt->kref, scsi_tape_release);
mutex_unlock(&st_ref_mutex);
- spin_lock(&st_index_lock);
idr_remove(&st_index_idr, index);
- spin_unlock(&st_index_lock);
return 0;
}
@@ -59,7 +59,6 @@ static DEFINE_SPINLOCK(np_lock);
static struct idr tiqn_idr;
struct idr sess_idr;
struct mutex auth_id_lock;
-spinlock_t sess_idr_lock;
struct iscsit_global *iscsit_global;
@@ -147,22 +146,17 @@ struct iscsi_tiqn *iscsit_add_tiqn(unsigned char *buf)
tiqn->tiqn_state = TIQN_STATE_ACTIVE;
- idr_preload(GFP_KERNEL);
- spin_lock(&tiqn_lock);
-
- ret = idr_alloc(&tiqn_idr, NULL, GFP_NOWAIT);
+ ret = idr_alloc(&tiqn_idr, NULL, GFP_KERNEL);
if (ret < 0) {
pr_err("idr_alloc() failed for tiqn->tiqn_index\n");
- spin_unlock(&tiqn_lock);
- idr_preload_end();
kfree(tiqn);
return ERR_PTR(ret);
}
tiqn->tiqn_index = ret;
- list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
+ spin_lock(&tiqn_lock);
+ list_add_tail(&tiqn->tiqn_list, &g_tiqn_list);
spin_unlock(&tiqn_lock);
- idr_preload_end();
pr_debug("CORE[0] - Added iSCSI Target IQN: %s\n", tiqn->tiqn);
@@ -201,8 +195,8 @@ void iscsit_del_tiqn(struct iscsi_tiqn *tiqn)
spin_lock(&tiqn_lock);
list_del(&tiqn->tiqn_list);
- idr_remove(&tiqn_idr, tiqn->tiqn_index);
spin_unlock(&tiqn_lock);
+ idr_remove(&tiqn_idr, tiqn->tiqn_index);
pr_debug("CORE[0] - Deleted iSCSI Target IQN: %s\n",
tiqn->tiqn);
@@ -519,7 +513,6 @@ static int __init iscsi_target_init_module(void)
return -1;
}
mutex_init(&auth_id_lock);
- spin_lock_init(&sess_idr_lock);
idr_init(&tiqn_idr);
idr_init(&sess_idr);
@@ -4417,9 +4410,7 @@ int iscsit_close_session(struct iscsi_session *sess)
pr_debug("Decremented number of active iSCSI Sessions on"
" iSCSI TPG: %hu to %u\n", tpg->tpgt, tpg->nsessions);
- spin_lock(&sess_idr_lock);
idr_remove(&sess_idr, sess->session_index);
- spin_unlock(&sess_idr_lock);
kfree(sess->sess_ops);
sess->sess_ops = NULL;
@@ -43,7 +43,6 @@ extern struct kmem_cache *lio_r2t_cache;
extern struct idr sess_idr;
extern struct mutex auth_id_lock;
-extern spinlock_t sess_idr_lock;
#endif /*** ISCSI_TARGET_H ***/
@@ -289,14 +289,7 @@ static int iscsi_login_zero_tsih_s1(
spin_lock_init(&sess->session_usage_lock);
spin_lock_init(&sess->ttt_lock);
- idr_preload(GFP_KERNEL);
- spin_lock_bh(&sess_idr_lock);
- ret = idr_alloc(&sess_idr, NULL, GFP_NOWAIT);
- if (ret >= 0)
- sess->session_index = ret;
- spin_unlock_bh(&sess_idr_lock);
- idr_preload_end();
-
+ ret = idr_alloc(&sess_idr, NULL, GFP_KERNEL);
if (ret < 0) {
pr_err("idr_alloc() for sess_idr failed\n");
iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
@@ -305,6 +298,7 @@ static int iscsi_login_zero_tsih_s1(
return -ENOMEM;
}
+ sess->session_index = ret;
sess->creation_time = get_jiffies_64();
spin_lock_init(&sess->session_stats_lock);
/*
@@ -1315,9 +1309,7 @@ new_sess_out:
if (conn->sess->se_sess)
transport_free_session(conn->sess->se_sess);
if (conn->sess->session_index != 0) {
- spin_lock_bh(&sess_idr_lock);
idr_remove(&sess_idr, conn->sess->session_index);
- spin_unlock_bh(&sess_idr_lock);
}
kfree(conn->sess->sess_ops);
kfree(conn->sess);
@@ -621,7 +621,6 @@ struct cgroup_subsys {
struct list_head sibling;
/* used when use_id == true */
struct idr idr;
- spinlock_t id_lock;
/* list of cftype_sets */
struct list_head cftsets;
@@ -603,7 +603,6 @@ extern struct proto sctpv6_prot;
void sctp_put_port(struct sock *sk);
extern struct idr sctp_assocs_id;
-extern spinlock_t sctp_assocs_id_lock;
/* Static inline functions. */
@@ -5143,9 +5143,7 @@ void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
rcu_assign_pointer(id->css, NULL);
rcu_assign_pointer(css->id, NULL);
- spin_lock(&ss->id_lock);
idr_remove(&ss->idr, id->id);
- spin_unlock(&ss->id_lock);
kfree_rcu(id, rcu_head);
}
EXPORT_SYMBOL_GPL(free_css_id);
@@ -5167,12 +5165,8 @@ static struct css_id *get_new_cssid(struct cgroup_subsys *ss, int depth)
if (!newid)
return ERR_PTR(-ENOMEM);
- idr_preload(GFP_KERNEL);
- spin_lock(&ss->id_lock);
/* Don't use 0. allocates an ID of 1-65535 */
- ret = idr_alloc_range(&ss->idr, newid, 1, CSS_ID_MAX + 1, GFP_NOWAIT);
- spin_unlock(&ss->id_lock);
- idr_preload_end();
+ ret = idr_alloc_range(&ss->idr, newid, 1, CSS_ID_MAX + 1, GFP_KERNEL);
/* Returns error when there are no free spaces for new ID.*/
if (ret < 0)
@@ -5192,7 +5186,6 @@ static int __init_or_module cgroup_init_idr(struct cgroup_subsys *ss,
{
struct css_id *newid;
- spin_lock_init(&ss->id_lock);
idr_init(&ss->idr);
newid = get_new_cssid(ss, 0);
@@ -1690,13 +1690,7 @@ static struct worker *create_worker(struct worker_pool *pool)
* ID is needed to determine kthread name. Allocate ID first
* without installing the pointer.
*/
- idr_preload(GFP_KERNEL);
- spin_lock_irq(&pool->lock);
-
- id = idr_alloc(&pool->worker_idr, NULL, GFP_NOWAIT);
-
- spin_unlock_irq(&pool->lock);
- idr_preload_end();
+ id = idr_alloc(&pool->worker_idr, NULL, GFP_KERNEL);
if (id < 0)
goto fail;
@@ -1737,18 +1731,13 @@ static struct worker *create_worker(struct worker_pool *pool)
worker->flags |= WORKER_UNBOUND;
/* successful, commit the pointer to idr */
- spin_lock_irq(&pool->lock);
idr_replace(&pool->worker_idr, worker, worker->id);
- spin_unlock_irq(&pool->lock);
return worker;
fail:
- if (id >= 0) {
- spin_lock_irq(&pool->lock);
+ if (id >= 0)
idr_remove(&pool->worker_idr, id);
- spin_unlock_irq(&pool->lock);
- }
kfree(worker);
return NULL;
}
@@ -41,7 +41,6 @@
*/
struct p9_idpool {
- spinlock_t lock;
struct idr pool;
};
@@ -58,7 +57,6 @@ struct p9_idpool *p9_idpool_create(void)
if (!p)
return ERR_PTR(-ENOMEM);
- spin_lock_init(&p->lock);
idr_init(&p->pool);
return p;
@@ -88,16 +86,9 @@ EXPORT_SYMBOL(p9_idpool_destroy);
int p9_idpool_get(struct p9_idpool *p)
{
int i;
- unsigned long flags;
-
- idr_preload(GFP_NOFS);
- spin_lock_irqsave(&p->lock, flags);
/* no need to store exactly p, we just need something non-null */
- i = idr_alloc(&p->pool, p, GFP_NOWAIT);
-
- spin_unlock_irqrestore(&p->lock, flags);
- idr_preload_end();
+ i = idr_alloc(&p->pool, p, GFP_NOFS);
if (i < 0)
return -1;
@@ -117,13 +108,9 @@ EXPORT_SYMBOL(p9_idpool_get);
void p9_idpool_put(int id, struct p9_idpool *p)
{
- unsigned long flags;
-
p9_debug(P9_DEBUG_MUX, " id %d pool %p\n", id, p);
- spin_lock_irqsave(&p->lock, flags);
idr_remove(&p->pool, id);
- spin_unlock_irqrestore(&p->lock, flags);
}
EXPORT_SYMBOL(p9_idpool_put);
@@ -467,11 +467,8 @@ static void sctp_association_destroy(struct sctp_association *asoc)
sctp_endpoint_put(asoc->ep);
sock_put(asoc->base.sk);
- if (asoc->assoc_id != 0) {
- spin_lock_bh(&sctp_assocs_id_lock);
+ if (asoc->assoc_id != 0)
idr_remove(&sctp_assocs_id, asoc->assoc_id);
- spin_unlock_bh(&sctp_assocs_id_lock);
- }
WARN_ON(atomic_read(&asoc->rmem_alloc));
@@ -1580,21 +1577,14 @@ int sctp_assoc_lookup_laddr(struct sctp_association *asoc,
/* Set an association id for a given association */
int sctp_assoc_set_id(struct sctp_association *asoc, gfp_t gfp)
{
- bool preload = gfp & __GFP_WAIT;
int ret;
/* If the id is already assigned, keep it. */
if (asoc->assoc_id)
return 0;
- if (preload)
- idr_preload(gfp);
- spin_lock_bh(&sctp_assocs_id_lock);
/* 0 is not a valid assoc_id, must be >= 1 */
- ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, GFP_NOWAIT);
- spin_unlock_bh(&sctp_assocs_id_lock);
- if (preload)
- idr_preload_end();
+ ret = idr_alloc_cyclic(&sctp_assocs_id, asoc, 1, 0, gfp);
if (ret < 0)
return ret;
@@ -71,7 +71,6 @@
struct sctp_globals sctp_globals __read_mostly;
struct idr sctp_assocs_id;
-DEFINE_SPINLOCK(sctp_assocs_id_lock);
static struct sctp_pf *sctp_pf_inet6_specific;
static struct sctp_pf *sctp_pf_inet_specific;
@@ -230,9 +230,7 @@ struct sctp_association *sctp_id2assoc(struct sock *sk, sctp_assoc_t id)
if (!id || (id == (sctp_assoc_t)-1))
return NULL;
- spin_lock_bh(&sctp_assocs_id_lock);
asoc = (struct sctp_association *)idr_find(&sctp_assocs_id, (int)id);
- spin_unlock_bh(&sctp_assocs_id_lock);
if (!asoc || (asoc->base.sk != sk) || asoc->base.dead)
return NULL;