@@ -648,11 +648,11 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_get_capset *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- int max_size = vgdev->capsets[idx].max_size;
+ int max_size;
struct virtio_gpu_drv_cap_cache *cache_ent;
void *resp_buf;
- if (idx > vgdev->num_capsets)
+ if (idx >= vgdev->num_capsets)
return -EINVAL;
if (version > vgdev->capsets[idx].max_version)
@@ -662,6 +662,7 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
if (!cache_ent)
return -ENOMEM;
+ max_size = vgdev->capsets[idx].max_size;
cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
if (!cache_ent->caps_cache) {
kfree(cache_ent);
This doesn't affect runtime because in the current code "idx" is always valid. First, we read from "vgdev->capsets[idx].max_size" before checking whether "idx" is within bounds. And secondly the bounds check is off by one so we could end up reading one element beyond the end of the vgdev->capsets[] array. Fixes: 62fb7a5e1096 ("virtio-gpu: add 3d/virgl support") Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>