@@ -89,13 +89,14 @@ nvkm_chid_new(const struct nvkm_event_func *func, struct nvkm_subdev *subdev,
struct nvkm_chid *chid;
int id;
- if (!(chid = *pchid = kzalloc(struct_size(chid, used, nr), GFP_KERNEL)))
+ if (!(chid = *pchid = kzalloc(struct_size(chid, used, 2 * nr), GFP_KERNEL)))
return -ENOMEM;
kref_init(&chid->kref);
chid->nr = nr;
chid->mask = chid->nr - 1;
spin_lock_init(&chid->lock);
+ chid->reserved = chid->used + nr;
if (!(chid->data = kvzalloc(sizeof(*chid->data) * nr, GFP_KERNEL))) {
nvkm_chid_unref(pchid);
@@ -109,3 +110,49 @@ nvkm_chid_new(const struct nvkm_event_func *func, struct nvkm_subdev *subdev,
return nvkm_event_init(func, subdev, 1, nr, &chid->event);
}
+
+void
+nvkm_chid_reserved_free(struct nvkm_chid *chid, int first, int count)
+{
+ int id;
+
+ for (id = first; id < count; id++)
+ __clear_bit(id, chid->reserved);
+}
+
+int
+nvkm_chid_reserved_alloc(struct nvkm_chid *chid, int count)
+{
+ int id, start, end;
+
+ start = end = 0;
+
+ while (start != chid->nr) {
+ start = find_next_zero_bit(chid->reserved, chid->nr, end);
+ end = find_next_bit(chid->reserved, chid->nr, start);
+
+ if (end - start >= count) {
+ for (id = start; id < start + count; id++)
+ __set_bit(id, chid->reserved);
+ return start;
+ }
+ }
+
+ return -1;
+}
+
+void
+nvkm_chid_reserve(struct nvkm_chid *chid, int first, int count)
+{
+ int id;
+
+ if (WARN_ON(first + count - 1 >= chid->nr))
+ return;
+
+ for (id = 0; id < first; id++)
+ __set_bit(id, chid->reserved);
+ for (id = first + count; id < chid->nr; id++)
+ __set_bit(id, chid->reserved);
+ for (id = first; id < count; id++)
+ __set_bit(id, chid->used);
+}
@@ -13,6 +13,7 @@ struct nvkm_chid {
void **data;
spinlock_t lock;
+ unsigned long *reserved;
unsigned long used[];
};
@@ -22,4 +23,7 @@ struct nvkm_chid *nvkm_chid_ref(struct nvkm_chid *);
void nvkm_chid_unref(struct nvkm_chid **);
int nvkm_chid_get(struct nvkm_chid *, void *data);
void nvkm_chid_put(struct nvkm_chid *, int id, spinlock_t *data_lock);
+int nvkm_chid_reserved_alloc(struct nvkm_chid *chid, int count);
+void nvkm_chid_reserved_free(struct nvkm_chid *chid, int first, int count);
+void nvkm_chid_reserve(struct nvkm_chid *chid, int first, int count);
#endif
@@ -548,6 +548,9 @@ r535_fifo_runl_ctor(struct nvkm_fifo *fifo)
(ret = nvkm_chid_new(&nvkm_chan_event, subdev, chids, 0, chids, &fifo->chid)))
return ret;
+ if (nvkm_vgpu_mgr_is_supported(subdev->device))
+ nvkm_chid_reserve(fifo->chid, 512, 1536);
+
ctrl = nvkm_gsp_rm_ctrl_rd(&gsp->internal.device.subdevice,
NV2080_CTRL_CMD_FIFO_GET_DEVICE_INFO_TABLE, sizeof(*ctrl));
if (WARN_ON(IS_ERR(ctrl)))
Creating a vGPU requires a dedicated portion of the channels. As nvkm manages all the channels, the vGPU host needs to reserve the channels from nvkm when vGPU is enabled, and allocate the reserved channels from the reserved channel pool when creating vGPUs. Introduce a simple reserved channel allocator. Reserve 1536 channels for vGPUs from nvkm and leave 512 CHIDs for nvkm when vGPU is enabled. Signed-off-by: Zhi Wang <zhiw@nvidia.com> --- .../gpu/drm/nouveau/nvkm/engine/fifo/chid.c | 49 ++++++++++++++++++- .../gpu/drm/nouveau/nvkm/engine/fifo/chid.h | 4 ++ .../gpu/drm/nouveau/nvkm/engine/fifo/r535.c | 3 ++ 3 files changed, 55 insertions(+), 1 deletion(-)