@@ -67,13 +67,13 @@ struct rpmh_request {
* struct rpmh_ctrlr: our representation of the controller
*
* @cache: the list of cached requests
- * @cache_lock: synchronize access to the cache data
+ * @lock: synchronize access to the controller data
* @dirty: was the cache updated since flush
* @batch_cache: Cache sleep and wake requests sent as batch
*/
struct rpmh_ctrlr {
struct list_head cache;
- spinlock_t cache_lock;
+ spinlock_t lock;
bool dirty;
struct list_head batch_cache;
};
@@ -656,7 +656,7 @@ static int rpmh_rsc_probe(struct platform_device *pdev)
/* Enable the active TCS to send requests immediately */
write_tcs_reg(drv, RSC_DRV_IRQ_ENABLE, 0, drv->tcs[ACTIVE_TCS].mask);
- spin_lock_init(&drv->client.cache_lock);
+ spin_lock_init(&drv->client.lock);
INIT_LIST_HEAD(&drv->client.cache);
INIT_LIST_HEAD(&drv->client.batch_cache);
@@ -118,9 +118,8 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
struct tcs_cmd *cmd)
{
struct cache_req *req;
- unsigned long flags;
- spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ spin_lock(&ctrlr->lock);
req = __find_req(ctrlr, cmd->addr);
if (req)
goto existing;
@@ -154,7 +153,7 @@ static struct cache_req *cache_rpm_request(struct rpmh_ctrlr *ctrlr,
ctrlr->dirty = true;
unlock:
- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+ spin_unlock(&ctrlr->lock);
return req;
}
@@ -283,23 +282,20 @@ EXPORT_SYMBOL(rpmh_write);
static void cache_batch(struct rpmh_ctrlr *ctrlr, struct batch_cache_req *req)
{
- unsigned long flags;
-
- spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ spin_lock(&ctrlr->lock);
list_add_tail(&req->list, &ctrlr->batch_cache);
- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+ spin_unlock(&ctrlr->lock);
}
static int flush_batch(struct rpmh_ctrlr *ctrlr)
{
struct batch_cache_req *req;
const struct rpmh_request *rpm_msg;
- unsigned long flags;
int ret = 0;
int i;
/* Send Sleep/Wake requests to the controller, expect no response */
- spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ spin_lock(&ctrlr->lock);
list_for_each_entry(req, &ctrlr->batch_cache, list) {
for (i = 0; i < req->count; i++) {
rpm_msg = req->rpm_msgs + i;
@@ -309,7 +305,7 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr)
break;
}
}
- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+ spin_unlock(&ctrlr->lock);
return ret;
}
@@ -317,13 +313,12 @@ static int flush_batch(struct rpmh_ctrlr *ctrlr)
static void invalidate_batch(struct rpmh_ctrlr *ctrlr)
{
struct batch_cache_req *req, *tmp;
- unsigned long flags;
- spin_lock_irqsave(&ctrlr->cache_lock, flags);
+ spin_lock(&ctrlr->lock);
list_for_each_entry_safe(req, tmp, &ctrlr->batch_cache, list)
kfree(req);
INIT_LIST_HEAD(&ctrlr->batch_cache);
- spin_unlock_irqrestore(&ctrlr->cache_lock, flags);
+ spin_unlock(&ctrlr->lock);
}
/**