diff mbox series

[15/34] drm/amdkfd: Convert event_idr to XArray

Message ID 20190221184226.2149-31-willy@infradead.org (mailing list archive)
State New, archived
Headers show
Series Convert DRM to XArray | expand

Commit Message

Matthew Wilcox Feb. 21, 2019, 6:41 p.m. UTC
Signed-off-by: Matthew Wilcox <willy@infradead.org>
---
 drivers/gpu/drm/amd/amdkfd/kfd_events.c | 71 ++++++++++---------------
 drivers/gpu/drm/amd/amdkfd/kfd_priv.h   |  2 +-
 2 files changed, 30 insertions(+), 43 deletions(-)
diff mbox series

Patch

diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index e9f0e0a1b41c..28adfb52d7ca 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -94,7 +94,7 @@  static struct kfd_signal_page *allocate_signal_page(struct kfd_process *p)
 static int allocate_event_notification_slot(struct kfd_process *p,
 					    struct kfd_event *ev)
 {
-	int id;
+	int err;
 
 	if (!p->signal_page) {
 		p->signal_page = allocate_signal_page(p);
@@ -110,13 +110,12 @@  static int allocate_event_notification_slot(struct kfd_process *p,
 	 * KFD_SIGNAL_EVENT_LIMIT. This also allows future increase
 	 * of the event limit without breaking user mode.
 	 */
-	id = idr_alloc(&p->event_idr, ev, 0, p->signal_mapped_size / 8,
-		       GFP_KERNEL);
-	if (id < 0)
-		return id;
+	err = xa_alloc(&p->events, &ev->event_id, ev,
+			XA_LIMIT(0, p->signal_mapped_size / 8 - 1), GFP_KERNEL);
+	if (err < 0)
+		return err;
 
-	ev->event_id = id;
-	page_slots(p->signal_page)[id] = UNSIGNALED_EVENT_SLOT;
+	page_slots(p->signal_page)[ev->event_id] = UNSIGNALED_EVENT_SLOT;
 
 	return 0;
 }
@@ -127,7 +126,7 @@  static int allocate_event_notification_slot(struct kfd_process *p,
  */
 static struct kfd_event *lookup_event_by_id(struct kfd_process *p, uint32_t id)
 {
-	return idr_find(&p->event_idr, id);
+	return xa_load(&p->events, id);
 }
 
 /**
@@ -162,7 +161,7 @@  static struct kfd_event *lookup_signaled_event_by_partial_id(
 		if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
 			return NULL;
 
-		return idr_find(&p->event_idr, id);
+		return xa_load(&p->events, id);
 	}
 
 	/* General case for partial IDs: Iterate over all matching IDs
@@ -172,7 +171,7 @@  static struct kfd_event *lookup_signaled_event_by_partial_id(
 		if (page_slots(p->signal_page)[id] == UNSIGNALED_EVENT_SLOT)
 			continue;
 
-		ev = idr_find(&p->event_idr, id);
+		ev = xa_load(&p->events, id);
 	}
 
 	return ev;
@@ -211,26 +210,15 @@  static int create_signal_event(struct file *devkfd,
 
 static int create_other_event(struct kfd_process *p, struct kfd_event *ev)
 {
-	/* Cast KFD_LAST_NONSIGNAL_EVENT to uint32_t. This allows an
-	 * intentional integer overflow to -1 without a compiler
-	 * warning. idr_alloc treats a negative value as "maximum
-	 * signed integer".
-	 */
-	int id = idr_alloc(&p->event_idr, ev, KFD_FIRST_NONSIGNAL_EVENT_ID,
-			   (uint32_t)KFD_LAST_NONSIGNAL_EVENT_ID + 1,
-			   GFP_KERNEL);
-
-	if (id < 0)
-		return id;
-	ev->event_id = id;
-
-	return 0;
+	return xa_alloc(&p->events, &ev->event_id, ev,
+			XA_LIMIT(KFD_FIRST_NONSIGNAL_EVENT_ID,
+				KFD_LAST_NONSIGNAL_EVENT_ID), GFP_KERNEL);
 }
 
 void kfd_event_init_process(struct kfd_process *p)
 {
 	mutex_init(&p->event_mutex);
-	idr_init(&p->event_idr);
+	xa_init_flags(&p->events, XA_FLAGS_ALLOC);
 	p->signal_page = NULL;
 	p->signal_event_count = 0;
 }
@@ -248,18 +236,18 @@  static void destroy_event(struct kfd_process *p, struct kfd_event *ev)
 	    ev->type == KFD_EVENT_TYPE_DEBUG)
 		p->signal_event_count--;
 
-	idr_remove(&p->event_idr, ev->event_id);
+	xa_erase(&p->events, ev->event_id);
 	kfree(ev);
 }
 
 static void destroy_events(struct kfd_process *p)
 {
 	struct kfd_event *ev;
-	uint32_t id;
+	unsigned long id;
 
-	idr_for_each_entry(&p->event_idr, ev, id)
+	xa_for_each(&p->events, id, ev)
 		destroy_event(p, ev);
-	idr_destroy(&p->event_idr);
+	xa_destroy(&p->events);
 }
 
 /*
@@ -490,7 +478,7 @@  void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
 		 * exhaustive search of signaled events.
 		 */
 		uint64_t *slots = page_slots(p->signal_page);
-		uint32_t id;
+		unsigned long id;
 
 		if (valid_id_bits)
 			pr_debug_ratelimited("Partial ID invalid: %u (%u valid bits)\n",
@@ -498,9 +486,9 @@  void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
 
 		if (p->signal_event_count < KFD_SIGNAL_EVENT_LIMIT / 64) {
 			/* With relatively few events, it's faster to
-			 * iterate over the event IDR
+			 * iterate over the event array
 			 */
-			idr_for_each_entry(&p->event_idr, ev, id) {
+			xa_for_each(&p->events, id, ev) {
 				if (id >= KFD_SIGNAL_EVENT_LIMIT)
 					break;
 
@@ -510,7 +498,7 @@  void kfd_signal_event_interrupt(unsigned int pasid, uint32_t partial_id,
 		} else {
 			/* With relatively many events, it's faster to
 			 * iterate over the signal slots and lookup
-			 * only signaled events from the IDR.
+			 * only signaled events from the array.
 			 */
 			for (id = 0; id < KFD_SIGNAL_EVENT_LIMIT; id++)
 				if (slots[id] != UNSIGNALED_EVENT_SLOT) {
@@ -833,13 +821,12 @@  static void lookup_events_by_type_and_signal(struct kfd_process *p,
 {
 	struct kfd_hsa_memory_exception_data *ev_data;
 	struct kfd_event *ev;
-	uint32_t id;
+	unsigned long id;
 	bool send_signal = true;
 
 	ev_data = (struct kfd_hsa_memory_exception_data *) event_data;
 
-	id = KFD_FIRST_NONSIGNAL_EVENT_ID;
-	idr_for_each_entry_continue(&p->event_idr, ev, id)
+	xa_for_each_start(&p->events, id, ev, KFD_FIRST_NONSIGNAL_EVENT_ID)
 		if (ev->type == type) {
 			send_signal = false;
 			dev_dbg(kfd_device,
@@ -975,7 +962,7 @@  void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
 				struct kfd_vm_fault_info *info)
 {
 	struct kfd_event *ev;
-	uint32_t id;
+	unsigned long id;
 	struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
 	struct kfd_hsa_memory_exception_data memory_exception_data;
 
@@ -997,8 +984,7 @@  void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
 	}
 	mutex_lock(&p->event_mutex);
 
-	id = KFD_FIRST_NONSIGNAL_EVENT_ID;
-	idr_for_each_entry_continue(&p->event_idr, ev, id)
+	xa_for_each_start(&p->events, id, ev, KFD_FIRST_NONSIGNAL_EVENT_ID)
 		if (ev->type == KFD_EVENT_TYPE_MEMORY) {
 			ev->memory_exception_data = memory_exception_data;
 			set_event(ev);
@@ -1014,7 +1000,8 @@  void kfd_signal_reset_event(struct kfd_dev *dev)
 	struct kfd_process *p;
 	struct kfd_event *ev;
 	unsigned int temp;
-	uint32_t id, idx;
+	unsigned long id;
+	int idx;
 
 	/* Whole gpu reset caused by GPU hang and memory is lost */
 	memset(&hw_exception_data, 0, sizeof(hw_exception_data));
@@ -1024,8 +1011,8 @@  void kfd_signal_reset_event(struct kfd_dev *dev)
 	idx = srcu_read_lock(&kfd_processes_srcu);
 	hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
 		mutex_lock(&p->event_mutex);
-		id = KFD_FIRST_NONSIGNAL_EVENT_ID;
-		idr_for_each_entry_continue(&p->event_idr, ev, id)
+		xa_for_each_start(&p->events, id, ev,
+				KFD_FIRST_NONSIGNAL_EVENT_ID)
 			if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
 				ev->hw_exception_data = hw_exception_data;
 				set_event(ev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 0689d4ccbbc0..9878abc6d847 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -657,7 +657,7 @@  struct kfd_process {
 	/* Event-related data */
 	struct mutex event_mutex;
 	/* Event ID allocator and lookup */
-	struct idr event_idr;
+	struct xarray events;
 	/* Event page */
 	struct kfd_signal_page *signal_page;
 	size_t signal_mapped_size;