Message ID | 20180613221417.GA22778@xldev-tmpl.dev.purestorage.com (mailing list archive) |
---|---|
State | Not Applicable |
Headers | show |
On Wed, Jun 13, 2018 at 04:14:18PM -0600, Anatoliy Glagolev wrote: > The existing implementation allows races between bsg_unregister and > bsg_open paths. bsg_ungegister and request_queue cleanup and > deletion may start and complete right after bsg_get_device (in bsg_open path) > retrieves bsg_class_device and releases the mutex. Then bsg_open path > touches freed memory of bsg_class_device and request_queue. > > One possible fix is to hold the mutex all the way through bsg_get_device > instead of releasing it after bsg_class_device retrieval. This looks generally fine to me. Nitpicks below: > @@ -746,16 +745,18 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) > */ > mutex_lock(&bsg_mutex); > bcd = idr_find(&bsg_minor_idr, iminor(inode)); > - mutex_unlock(&bsg_mutex); > > if (!bcd) > return ERR_PTR(-ENODEV); This needs to unlock the mutex. E.g. if (!bcd) { bd = ERR_PTR(-ENODEV); goto out_unlock; } > bd = __bsg_get_device(iminor(inode), bcd->queue); > + if (bd) { > + mutex_unlock(&bsg_mutex); > return bd; > + } > > bd = bsg_add_device(inode, bcd->queue, file); > + mutex_unlock(&bsg_mutex); > > return bd; I'd simply do: bd = __bsg_get_device(iminor(inode), bcd->queue); if (!bd) bd = bsg_add_device(inode, bcd->queue, file); out_unlock: mutex_unlock(&bsg_mutex); return bd;
diff --git a/block/bsg.c b/block/bsg.c index 132e657..10bc6a4 100644 --- a/block/bsg.c +++ b/block/bsg.c @@ -693,6 +693,8 @@ static struct bsg_device *bsg_add_device(struct inode *inode, struct bsg_device *bd; unsigned char buf[32]; + lockdep_assert_held(&bsg_mutex); + if (!blk_get_queue(rq)) return ERR_PTR(-ENXIO); @@ -707,14 +709,12 @@ static struct bsg_device *bsg_add_device(struct inode *inode, bsg_set_block(bd, file); atomic_set(&bd->ref_count, 1); - mutex_lock(&bsg_mutex); hlist_add_head(&bd->dev_list, bsg_dev_idx_hash(iminor(inode))); strncpy(bd->name, dev_name(rq->bsg_dev.class_dev), sizeof(bd->name) - 1); bsg_dbg(bd, "bound to <%s>, max queue %d\n", format_dev_t(buf, inode->i_rdev), bd->max_queue); - mutex_unlock(&bsg_mutex); return bd; } @@ -722,7 +722,7 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) { struct bsg_device *bd; - mutex_lock(&bsg_mutex); + lockdep_assert_held(&bsg_mutex); hlist_for_each_entry(bd, bsg_dev_idx_hash(minor), dev_list) { if (bd->queue == q) { @@ -732,7 +732,6 @@ static struct bsg_device *__bsg_get_device(int minor, struct request_queue *q) } bd = NULL; found: - mutex_unlock(&bsg_mutex); return bd; } @@ -746,16 +745,18 @@ static struct bsg_device *bsg_get_device(struct inode *inode, struct file *file) */ mutex_lock(&bsg_mutex); bcd = idr_find(&bsg_minor_idr, iminor(inode)); - mutex_unlock(&bsg_mutex); if (!bcd) return ERR_PTR(-ENODEV); bd = __bsg_get_device(iminor(inode), bcd->queue); - if (bd) + if (bd) { + mutex_unlock(&bsg_mutex); return bd; + } bd = bsg_add_device(inode, bcd->queue, file); + mutex_unlock(&bsg_mutex); return bd; }