diff mbox

[RFC,6/7] lib/scatterlist: Add sg_trim_table

Message ID 1476349444-7331-7-git-send-email-tvrtko.ursulin@linux.intel.com (mailing list archive)
State New, archived
Headers show

Commit Message

Tvrtko Ursulin Oct. 13, 2016, 9:04 a.m. UTC
From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>

*** BROKEN *** RFC only *** BROKEN ***

In cases where it is hard to know to minimum numbers of ents
table will need to hold at sg_alloc_time, and we end up with
a table with unused sg entries at its end, this function will
trim (free) the unused sg entry blocks and adjust the
table->orig_nents down.

Signed-off-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
---
 include/linux/scatterlist.h |  2 ++
 lib/scatterlist.c           | 53 +++++++++++++++++++++++++++++++++++++++++++++
 2 files changed, 55 insertions(+)

Comments

Chris Wilson Oct. 13, 2016, 9:23 a.m. UTC | #1
On Thu, Oct 13, 2016 at 10:04:03AM +0100, Tvrtko Ursulin wrote:
> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
> 
> *** BROKEN *** RFC only *** BROKEN ***
> 
> In cases where it is hard to know to minimum numbers of ents
> table will need to hold at sg_alloc_time, and we end up with
> a table with unused sg entries at its end, this function will
> trim (free) the unused sg entry blocks and adjust the
> table->orig_nents down.

An alternative I had in mind, was to allocate the new chunks
iteratively, i.e. as we run out of room in i915_sg_add_page.
-Chris
Tvrtko Ursulin Oct. 13, 2016, 9:51 a.m. UTC | #2
On 13/10/2016 10:23, Chris Wilson wrote:
> On Thu, Oct 13, 2016 at 10:04:03AM +0100, Tvrtko Ursulin wrote:
>> From: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
>>
>> *** BROKEN *** RFC only *** BROKEN ***
>>
>> In cases where it is hard to know to minimum numbers of ents
>> table will need to hold at sg_alloc_time, and we end up with
>> a table with unused sg entries at its end, this function will
>> trim (free) the unused sg entry blocks and adjust the
>> table->orig_nents down.
> An alternative I had in mind, was to allocate the new chunks
> iteratively, i.e. as we run out of room in i915_sg_add_page.

Thought about it but did not like having to handle another source of 
failures. Hm, don't know. Will think about it.

Regards,

Tvrtko
diff mbox

Patch

diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index cb3c8fe6acd7..b344ecc8eddf 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -266,6 +266,8 @@  int sg_alloc_table_from_pages(struct sg_table *sgt,
 	unsigned long offset, unsigned long size,
 	gfp_t gfp_mask);
 
+void sg_trim_table(struct sg_table *);
+
 size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
 		      size_t buflen, off_t skip, bool to_buffer);
 
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 004fc70fc56a..748b1b9a197e 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -243,6 +243,59 @@  void __sg_free_table(struct sg_table *table, unsigned int max_ents,
 EXPORT_SYMBOL(__sg_free_table);
 
 /**
+ * sg_trim_table - Free unused sg table entries at the end of the table
+ * @table:	The sg table header to use
+ *
+ *  Description:
+ *    In cases where it is hard to know to minimum numbers of ents table will
+ *    need to hold at sg_alloc_time, and we end up with a table with unused sg
+ *    entries at its end, this function will trim (free) the unused sg entry
+ *    blocks and adjust the table->orig_nents down.
+ *
+ **/
+void sg_trim_table(struct sg_table *table)
+{
+	struct scatterlist *sgl, *next, *prev = NULL;
+	unsigned int seen_nents = 0;
+	const unsigned int max_ents = SG_MAX_SINGLE_ALLOC;
+
+	sgl = table->sgl;
+	while (sgl) {
+		unsigned int alloc_size = table->orig_nents;
+		unsigned int sg_size;
+
+		/*
+		 * If we have more than max_ents segments left,
+		 * then assign 'next' to the sg table after the current one.
+		 * sg_size is then one less than alloc size, since the last
+		 * element is the chain pointer.
+		 */
+		if (alloc_size > max_ents) {
+			next = sg_chain_ptr(&sgl[max_ents - 1]);
+			alloc_size = max_ents;
+			sg_size = alloc_size - 1;
+		} else {
+			sg_size = alloc_size;
+			next = NULL;
+		}
+
+		if (seen_nents >= table->nents) {
+			if (prev)
+				sg_mark_end(prev);
+			prev = NULL;
+			table->orig_nents -= sg_size;
+			sg_kfree(sgl, alloc_size);
+		} else {
+			prev = sgl;
+		}
+
+		seen_nents += sg_size;
+		sgl = next;
+	}
+}
+EXPORT_SYMBOL(sg_trim_table);
+
+/**
  * sg_free_table - Free a previously allocated sg table
  * @table:	The mapped sg table header
  *