diff mbox

[3/4] drm/amdkfd: Encapsulate KQ functions in ops structure

Message ID 1421147958-15283-4-git-send-email-oded.gabbay@amd.com (mailing list archive)
State New, archived
Headers show

Commit Message

Oded Gabbay Jan. 13, 2015, 11:19 a.m. UTC
This patch does some re-org on the kernel_queue structure. It takes out
all the function pointers from the structure and puts them in a new structure,
called kernel_queue_ops. Then, it puts an instance of that structure
inside kernel_queue.

This re-org is done to prepare the KQ module to support more than one AMD APU
(Kaveri).

Signed-off-by: Oded Gabbay <oded.gabbay@amd.com>
---
 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c   | 24 +++++++++----------
 drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h   | 31 +++++++++++++++++++++++--
 drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 26 ++++++++++-----------
 3 files changed, 54 insertions(+), 27 deletions(-)
diff mbox

Patch

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index add0fb4..731635d 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -293,14 +293,14 @@  struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
 	if (!kq)
 		return NULL;
 
-	kq->initialize = initialize;
-	kq->uninitialize = uninitialize;
-	kq->acquire_packet_buffer = acquire_packet_buffer;
-	kq->submit_packet = submit_packet;
-	kq->sync_with_hw = sync_with_hw;
-	kq->rollback_packet = rollback_packet;
-
-	if (kq->initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
+	kq->ops.initialize = initialize;
+	kq->ops.uninitialize = uninitialize;
+	kq->ops.acquire_packet_buffer = acquire_packet_buffer;
+	kq->ops.submit_packet = submit_packet;
+	kq->ops.sync_with_hw = sync_with_hw;
+	kq->ops.rollback_packet = rollback_packet;
+
+	if (kq->ops.initialize(kq, dev, type, KFD_KERNEL_QUEUE_SIZE) == false) {
 		pr_err("kfd: failed to init kernel queue\n");
 		kfree(kq);
 		return NULL;
@@ -312,7 +312,7 @@  void kernel_queue_uninit(struct kernel_queue *kq)
 {
 	BUG_ON(!kq);
 
-	kq->uninitialize(kq);
+	kq->ops.uninitialize(kq);
 	kfree(kq);
 }
 
@@ -329,12 +329,12 @@  static __attribute__((unused)) void test_kq(struct kfd_dev *dev)
 	kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_HIQ);
 	BUG_ON(!kq);
 
-	retval = kq->acquire_packet_buffer(kq, 5, &buffer);
+	retval = kq->ops.acquire_packet_buffer(kq, 5, &buffer);
 	BUG_ON(retval != 0);
 	for (i = 0; i < 5; i++)
 		buffer[i] = kq->nop_packet;
-	kq->submit_packet(kq);
-	kq->sync_with_hw(kq, 1000);
+	kq->ops.submit_packet(kq);
+	kq->ops.sync_with_hw(kq, 1000);
 
 	pr_debug("kfd: ending kernel queue test\n");
 }
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index dcd2bdb..e01b77b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -28,8 +28,31 @@ 
 #include <linux/types.h>
 #include "kfd_priv.h"
 
-struct kernel_queue {
-	/* interface */
+/**
+ * struct kernel_queue_ops
+ *
+ * @initialize: Initialize a kernel queue, including allocations of GART memory
+ * needed for the queue.
+ *
+ * @uninitialize: Uninitialize a kernel queue and free all its memory usages.
+ *
+ * @acquire_packet_buffer: Returns a pointer to the location in the kernel
+ * queue ring buffer where the calling function can write its packet. It is
+ * Guaranteed that there is enough space for that packet. It also updates the
+ * pending write pointer to that location so subsequent calls to
+ * acquire_packet_buffer will get a correct write pointer
+ *
+ * @submit_packet: Update the write pointer and doorbell of a kernel queue.
+ *
+ * @sync_with_hw: Wait until the write pointer and the read pointer of a kernel
+ * queue are equal, which means the CP has read all the submitted packets.
+ *
+ * @rollback_packet: This routine is called if we failed to build an acquired
+ * packet for some reason. It just overwrites the pending wptr with the current
+ * one
+ *
+ */
+struct kernel_queue_ops {
 	bool	(*initialize)(struct kernel_queue *kq, struct kfd_dev *dev,
 			enum kfd_queue_type type, unsigned int queue_size);
 	void	(*uninitialize)(struct kernel_queue *kq);
@@ -41,6 +64,10 @@  struct kernel_queue {
 	int	(*sync_with_hw)(struct kernel_queue *kq,
 				unsigned long timeout_ms);
 	void	(*rollback_packet)(struct kernel_queue *kq);
+};
+
+struct kernel_queue {
+	struct kernel_queue_ops ops;
 
 	/* data */
 	struct kfd_dev		*dev;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index 3cda952..5fb5c03 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -348,7 +348,7 @@  int pm_send_set_resources(struct packet_manager *pm,
 	pr_debug("kfd: In func %s\n", __func__);
 
 	mutex_lock(&pm->lock);
-	pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
+	pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
 					sizeof(*packet) / sizeof(uint32_t),
 			(unsigned int **)&packet);
 	if (packet == NULL) {
@@ -375,8 +375,8 @@  int pm_send_set_resources(struct packet_manager *pm,
 	packet->queue_mask_lo = lower_32_bits(res->queue_mask);
 	packet->queue_mask_hi = upper_32_bits(res->queue_mask);
 
-	pm->priv_queue->submit_packet(pm->priv_queue);
-	pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+	pm->priv_queue->ops.submit_packet(pm->priv_queue);
+	pm->priv_queue->ops.sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
 
 	mutex_unlock(&pm->lock);
 
@@ -402,7 +402,7 @@  int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
 	packet_size_dwords = sizeof(struct pm4_runlist) / sizeof(uint32_t);
 	mutex_lock(&pm->lock);
 
-	retval = pm->priv_queue->acquire_packet_buffer(pm->priv_queue,
+	retval = pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
 					packet_size_dwords, &rl_buffer);
 	if (retval != 0)
 		goto fail_acquire_packet_buffer;
@@ -412,15 +412,15 @@  int pm_send_runlist(struct packet_manager *pm, struct list_head *dqm_queues)
 	if (retval != 0)
 		goto fail_create_runlist;
 
-	pm->priv_queue->submit_packet(pm->priv_queue);
-	pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+	pm->priv_queue->ops.submit_packet(pm->priv_queue);
+	pm->priv_queue->ops.sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
 
 	mutex_unlock(&pm->lock);
 
 	return retval;
 
 fail_create_runlist:
-	pm->priv_queue->rollback_packet(pm->priv_queue);
+	pm->priv_queue->ops.rollback_packet(pm->priv_queue);
 fail_acquire_packet_buffer:
 	mutex_unlock(&pm->lock);
 fail_create_runlist_ib:
@@ -438,7 +438,7 @@  int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
 	BUG_ON(!pm || !fence_address);
 
 	mutex_lock(&pm->lock);
-	retval = pm->priv_queue->acquire_packet_buffer(
+	retval = pm->priv_queue->ops.acquire_packet_buffer(
 			pm->priv_queue,
 			sizeof(struct pm4_query_status) / sizeof(uint32_t),
 			(unsigned int **)&packet);
@@ -459,8 +459,8 @@  int pm_send_query_status(struct packet_manager *pm, uint64_t fence_address,
 	packet->data_hi = upper_32_bits((uint64_t)fence_value);
 	packet->data_lo = lower_32_bits((uint64_t)fence_value);
 
-	pm->priv_queue->submit_packet(pm->priv_queue);
-	pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+	pm->priv_queue->ops.submit_packet(pm->priv_queue);
+	pm->priv_queue->ops.sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
 	mutex_unlock(&pm->lock);
 
 	return 0;
@@ -482,7 +482,7 @@  int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
 	BUG_ON(!pm);
 
 	mutex_lock(&pm->lock);
-	retval = pm->priv_queue->acquire_packet_buffer(
+	retval = pm->priv_queue->ops.acquire_packet_buffer(
 			pm->priv_queue,
 			sizeof(struct pm4_unmap_queues) / sizeof(uint32_t),
 			&buffer);
@@ -537,8 +537,8 @@  int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_queue_type type,
 		break;
 	};
 
-	pm->priv_queue->submit_packet(pm->priv_queue);
-	pm->priv_queue->sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
+	pm->priv_queue->ops.submit_packet(pm->priv_queue);
+	pm->priv_queue->ops.sync_with_hw(pm->priv_queue, KFD_HIQ_TIMEOUT);
 
 	mutex_unlock(&pm->lock);
 	return 0;