Message ID | 36d655a0ff45f4c86762358c7b6a7b58939313fb.1626605893.git.leonro@nvidia.com (mailing list archive) |
---|---|
State | New, archived |
Headers | show |
Series | SG fix together with update to RDMA umem | expand |
Looks good,
Reviewed-by: Christoph Hellwig <hch@lst.de>
On Sun, Jul 18, 2021 at 02:09:12PM +0300, Leon Romanovsky wrote: > @@ -386,12 +414,14 @@ static struct scatterlist *get_next_sg(struct sg_table *table, > return ERR_PTR(-ENOMEM); > sg_init_table(new_sg, alloc_size); > if (cur) { > + if (total_nents) > + *total_nents += alloc_size - 1; > __sg_chain(next_sg, new_sg); > - table->orig_nents += alloc_size - 1; > } else { > table->sgl = new_sg; > - table->orig_nents = alloc_size; > table->nents = 0; Why does this still touch nents? > @@ -515,6 +548,7 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, > cur_page = j; > } > sgt->nents += added_nents; > + sgt->orig_nents = sgt->nents; And here too? nents should only be set by the dma mapper, right? I'm also trying to understand why it is OK to pass in NULL for total_nents? Any situation where _sg_alloc_table_from_pages() returns with sgt->orig_nents != total_nents requires the use of sg_free_table_entries() It looks like there is some trouble here: for (i = 0; i < chunks; i++) { s = get_next_sg(sgt, s, chunks - i + left_pages, gfp_mask, total_nents); if (IS_ERR(s)) { This will update total_nents but after a few loops it can exit without synchronizing sgt->orig_nents - thus any caller handling an error return from __sg_alloc_table_from_pages() must not pass in NULL and must use sg_free_table_entries() So I would see two options: 1) Remove the possiblity to return NULL and fix all callers to use sg_free_table_entries() on error 2) Once __sg_alloc_table_from_pages() fails the sg_table is corrupted and the user must call sg_free_table_entries(). ie forcibly store total_nents in the orig_nents and thus destroy the ability to continue to use the sg_table. This is what sg_alloc_table_from_pages() already has to do Further upon success of __sg_alloc_table_from_pages() it should be true that sgt->orig_nents == total_nents so the ib_umem change is confusing. total_nents should be removed from the struct and only the failure paths in the function calling __sg_alloc_table_from_pages() need a stack local variable and sg_free_table_entries() IMHO this API may have become unwieldly and complicated, I wonder if this is better: struct sg_append_table state; sg_append_init(&state, sgt, gfp_mask); while (..) ret = sg_append_pages(&state, pages, n_pages, ..) if (ret) sg_append_abort(&state); // Frees the sgt and puts it to NULL sg_append_complete(&state) Which allows sg_alloc_table_from_pages() to be written as struct sg_append_table state; sg_append_init(&state, sgt, gfp_mask); ret = sg_append_pages(&state,pages, n_pages, offset, size, UINT_MAX) if (ret) { sg_append_abort(&state); return ret; } sg_append_complete(&state); return 0; And then the API can manage all of this in some sane and understandable way. Jason
On Thu, Jul 22, 2021 at 10:00:40AM -0300, Jason Gunthorpe wrote: > this is better: > > struct sg_append_table state; > > sg_append_init(&state, sgt, gfp_mask); > > while (..) > ret = sg_append_pages(&state, pages, n_pages, ..) > if (ret) > sg_append_abort(&state); // Frees the sgt and puts it to NULL > sg_append_complete(&state) > > Which allows sg_alloc_table_from_pages() to be written as > > struct sg_append_table state; > sg_append_init(&state, sgt, gfp_mask); > ret = sg_append_pages(&state,pages, n_pages, offset, size, UINT_MAX) > if (ret) { > sg_append_abort(&state); > return ret; > } > sg_append_complete(&state); > return 0; > > And then the API can manage all of this in some sane and > understandable way. That would be a lot easier to use for sure. Not sure how invasive the changes would be, though.
On Thu, Jul 22, 2021 at 02:07:51PM +0100, Christoph Hellwig wrote: > On Thu, Jul 22, 2021 at 10:00:40AM -0300, Jason Gunthorpe wrote: > > this is better: > > > > struct sg_append_table state; > > > > sg_append_init(&state, sgt, gfp_mask); > > > > while (..) > > ret = sg_append_pages(&state, pages, n_pages, ..) > > if (ret) > > sg_append_abort(&state); // Frees the sgt and puts it to NULL > > sg_append_complete(&state) > > > > Which allows sg_alloc_table_from_pages() to be written as > > > > struct sg_append_table state; > > sg_append_init(&state, sgt, gfp_mask); > > ret = sg_append_pages(&state,pages, n_pages, offset, size, UINT_MAX) > > if (ret) { > > sg_append_abort(&state); > > return ret; > > } > > sg_append_complete(&state); > > return 0; > > > > And then the API can manage all of this in some sane and > > understandable way. > > That would be a lot easier to use for sure. Not sure how invasive the > changes would be, though. Looks pretty good, Maor can you try it? diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 1739d10a2c556e..6c8e761ab389e8 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -806,27 +806,27 @@ static const struct dma_buf_ops drm_gem_prime_dmabuf_ops = { struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, struct page **pages, unsigned int nr_pages) { - struct sg_table *sg; - struct scatterlist *sge; + struct sg_table *sgt; size_t max_segment = 0; + int ret; - sg = kmalloc(sizeof(struct sg_table), GFP_KERNEL); - if (!sg) + sgt = kmalloc(sizeof(struct sg_table), GFP_KERNEL); + if (!sgt) return ERR_PTR(-ENOMEM); if (dev) max_segment = dma_max_mapping_size(dev->dev); if (max_segment == 0) max_segment = UINT_MAX; - sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0, - nr_pages << PAGE_SHIFT, - max_segment, - NULL, 0, GFP_KERNEL, NULL); - if (IS_ERR(sge)) { - kfree(sg); - sg = ERR_CAST(sge); + + ret = sg_alloc_table_from_pages_segment(sgt, pages, nr_pages, 0, + nr_pages << PAGE_SHIFT, + max_segment, GFP_KERNEL); + if (ret) { + kfree(sgt); + return ERR_PTR(ret); } - return sg; + return sgt; } EXPORT_SYMBOL(drm_prime_pages_to_sg); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index c341d3e3386ccb..a2c5e1b30f425f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -131,6 +131,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) struct drm_i915_private *i915 = to_i915(obj->base.dev); const unsigned long num_pages = obj->base.size >> PAGE_SHIFT; unsigned int max_segment = i915_sg_segment_size(); + struct sg_append_table state; struct sg_table *st; unsigned int sg_page_sizes; struct scatterlist *sg; @@ -153,13 +154,11 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) spin_unlock(&i915->mm.notifier_lock); alloc_table: - sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0, - num_pages << PAGE_SHIFT, max_segment, - NULL, 0, GFP_KERNEL, NULL); - if (IS_ERR(sg)) { - ret = PTR_ERR(sg); + ret = sg_alloc_table_from_pages_segment(st, pvec, num_pages, 0, + num_pages << PAGE_SHIFT, + max_segment, GFP_KERNEL); + if (ret) goto err; - } ret = i915_gem_gtt_prepare_pages(obj, st); if (ret) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 2ad889272b0a15..56214bcc6c5280 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -386,15 +386,12 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) if (unlikely(ret != 0)) return ret; - sg = __sg_alloc_table_from_pages(&vmw_tt->sgt, vsgt->pages, - vsgt->num_pages, 0, - (unsigned long) vsgt->num_pages << PAGE_SHIFT, - dma_get_max_seg_size(dev_priv->drm.dev), - NULL, 0, GFP_KERNEL, NULL); - if (IS_ERR(sg)) { - ret = PTR_ERR(sg); + ret = sg_alloc_table_from_pages_segment( + vmw_tt->sgt, vsgt->pages, vsgt->num_pages, 0, + (unsigned long)vsgt->num_pages << PAGE_SHIFT, + dma_get_max_seg_size(dev_priv->drm.dev), GFP_KERNEL); + if (ret) goto out_sg_alloc_fail; - } if (vsgt->num_pages > vmw_tt->sgt.orig_nents) { uint64_t over_alloc = diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 5cc41ae962ec1d..53de1ec77326be 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -556,6 +556,25 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, } EXPORT_SYMBOL(__sg_alloc_table_from_pages); +int sg_alloc_table_from_pages_segment(struct sg_table *sgt, struct page **pages, + unsigned int n_pages, unsigned int offset, + unsigned long size, + unsigned int max_segment, gfp_t gfp_mask) +{ + struct sg_append_table state; + int ret; + + sg_append_init(&state, sgt, max_segment, gfp_mask); + ret = sg_append_pages(&state, pages, n_pages, offset, size); + if (ret) { + sg_append_abort(&state); + return ret; + } + sg_append_complete(&state); + return 0; +} +EXPORT_SYMBOL(sg_alloc_table_from_pages_segment); + /** * sg_alloc_table_from_pages - Allocate and initialize an sg table from * an array of pages @@ -580,8 +599,8 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, gfp_t gfp_mask) { - return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages, - offset, size, UINT_MAX, NULL, 0, gfp_mask, NULL)); + return sg_alloc_table_from_pages_segment(sgt, pages, n_pages, offset, + size, UINT_MAX, gfp_mask); } EXPORT_SYMBOL(sg_alloc_table_from_pages);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c index 2a54f86856af..1739d10a2c55 100644 --- a/drivers/gpu/drm/drm_prime.c +++ b/drivers/gpu/drm/drm_prime.c @@ -821,7 +821,7 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev, sge = __sg_alloc_table_from_pages(sg, pages, nr_pages, 0, nr_pages << PAGE_SHIFT, max_segment, - NULL, 0, GFP_KERNEL); + NULL, 0, GFP_KERNEL, NULL); if (IS_ERR(sge)) { kfree(sg); sg = ERR_CAST(sge); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 7487bab11f0b..c341d3e3386c 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -155,7 +155,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) alloc_table: sg = __sg_alloc_table_from_pages(st, pvec, num_pages, 0, num_pages << PAGE_SHIFT, max_segment, - NULL, 0, GFP_KERNEL); + NULL, 0, GFP_KERNEL, NULL); if (IS_ERR(sg)) { ret = PTR_ERR(sg); goto err; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 0488042fb287..2ad889272b0a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -390,7 +390,7 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) vsgt->num_pages, 0, (unsigned long) vsgt->num_pages << PAGE_SHIFT, dma_get_max_seg_size(dev_priv->drm.dev), - NULL, 0, GFP_KERNEL); + NULL, 0, GFP_KERNEL, NULL); if (IS_ERR(sg)) { ret = PTR_ERR(sg); goto out_sg_alloc_fail; diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c index 0eb40025075f..cf4197363346 100644 --- a/drivers/infiniband/core/umem.c +++ b/drivers/infiniband/core/umem.c @@ -59,7 +59,7 @@ static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int d unpin_user_page_range_dirty_lock(sg_page(sg), DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty); - sg_free_table(&umem->sg_head); + sg_free_table_entries(&umem->sg_head, umem->total_nents); } /** @@ -229,8 +229,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, sg = __sg_alloc_table_from_pages(&umem->sg_head, page_list, ret, 0, ret << PAGE_SHIFT, ib_dma_max_seg_size(device), sg, npages, - GFP_KERNEL); - umem->sg_nents = umem->sg_head.nents; + GFP_KERNEL, &umem->total_nents); if (IS_ERR(sg)) { unpin_user_pages_dirty_lock(page_list, ret, 0); ret = PTR_ERR(sg); diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h index ecf87484814f..c796c165d5ee 100644 --- a/include/linux/scatterlist.h +++ b/include/linux/scatterlist.h @@ -282,6 +282,7 @@ typedef void (sg_free_fn)(struct scatterlist *, unsigned int); void __sg_free_table(struct sg_table *, unsigned int, unsigned int, sg_free_fn *); void sg_free_table(struct sg_table *); +void sg_free_table_entries(struct sg_table *sgt, unsigned int num_entries); int __sg_alloc_table(struct sg_table *, unsigned int, unsigned int, struct scatterlist *, unsigned int, gfp_t, sg_alloc_fn *); int sg_alloc_table(struct sg_table *, unsigned int, gfp_t); @@ -289,7 +290,7 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, unsigned int max_segment, struct scatterlist *prv, unsigned int left_pages, - gfp_t gfp_mask); + gfp_t gfp_mask, unsigned int *total_nents); int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, gfp_t gfp_mask); diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h index 676c57f5ca80..5a65212efc2e 100644 --- a/include/rdma/ib_umem.h +++ b/include/rdma/ib_umem.h @@ -27,8 +27,7 @@ struct ib_umem { u32 is_dmabuf : 1; struct work_struct work; struct sg_table sg_head; - int nmap; - unsigned int sg_nents; + unsigned int total_nents; }; struct ib_umem_dmabuf { diff --git a/lib/scatterlist.c b/lib/scatterlist.c index 27efa6178153..5cc41ae962ec 100644 --- a/lib/scatterlist.c +++ b/lib/scatterlist.c @@ -175,22 +175,11 @@ static void sg_kfree(struct scatterlist *sg, unsigned int nents) kfree(sg); } -/** - * __sg_free_table - Free a previously mapped sg table - * @table: The sg table header to use - * @max_ents: The maximum number of entries per single scatterlist - * @nents_first_chunk: Number of entries int the (preallocated) first - * scatterlist chunk, 0 means no such preallocated first chunk - * @free_fn: Free function - * - * Description: - * Free an sg table previously allocated and setup with - * __sg_alloc_table(). The @max_ents value must be identical to - * that previously used with __sg_alloc_table(). - * - **/ -void __sg_free_table(struct sg_table *table, unsigned int max_ents, - unsigned int nents_first_chunk, sg_free_fn *free_fn) +static void __sg_free_table_entries(struct sg_table *table, + unsigned int max_ents, + unsigned int nents_first_chunk, + sg_free_fn *free_fn, + unsigned int num_entries) { struct scatterlist *sgl, *next; unsigned curr_max_ents = nents_first_chunk ?: max_ents; @@ -199,8 +188,8 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents, return; sgl = table->sgl; - while (table->orig_nents) { - unsigned int alloc_size = table->orig_nents; + while (num_entries) { + unsigned int alloc_size = num_entries; unsigned int sg_size; /* @@ -218,7 +207,7 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents, next = NULL; } - table->orig_nents -= sg_size; + num_entries -= sg_size; if (nents_first_chunk) nents_first_chunk = 0; else @@ -229,6 +218,43 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents, table->sgl = NULL; } + +/** + * sg_free_table_entries - Free a previously allocated sg table according to + * the total number of entries in the table. + * @table: The mapped sg table header + * @num_entries: The number of entries in the table. + * + **/ +void sg_free_table_entries(struct sg_table *table, unsigned int num_entries) +{ + __sg_free_table_entries(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree, + num_entries); +} +EXPORT_SYMBOL(sg_free_table_entries); + +/** + * __sg_free_table - Free a previously mapped sg table + * @table: The sg table header to use + * @max_ents: The maximum number of entries per single scatterlist + * @total_ents: The total number of entries in the table + * @nents_first_chunk: Number of entries int the (preallocated) first + * scatterlist chunk, 0 means no such preallocated + * first chunk + * @free_fn: Free function + * + * Description: + * Free an sg table previously allocated and setup with + * __sg_alloc_table(). The @max_ents value must be identical to + * that previously used with __sg_alloc_table(). + * + **/ +void __sg_free_table(struct sg_table *table, unsigned int max_ents, + unsigned int nents_first_chunk, sg_free_fn *free_fn) +{ + __sg_free_table_entries(table, max_ents, nents_first_chunk, free_fn, + table->orig_nents); +} EXPORT_SYMBOL(__sg_free_table); /** @@ -238,7 +264,8 @@ EXPORT_SYMBOL(__sg_free_table); **/ void sg_free_table(struct sg_table *table) { - __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree); + __sg_free_table_entries(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree, + table->orig_nents); } EXPORT_SYMBOL(sg_free_table); @@ -368,7 +395,8 @@ EXPORT_SYMBOL(sg_alloc_table); static struct scatterlist *get_next_sg(struct sg_table *table, struct scatterlist *cur, unsigned long needed_sges, - gfp_t gfp_mask) + gfp_t gfp_mask, + unsigned int *total_nents) { struct scatterlist *new_sg, *next_sg; unsigned int alloc_size; @@ -386,12 +414,14 @@ static struct scatterlist *get_next_sg(struct sg_table *table, return ERR_PTR(-ENOMEM); sg_init_table(new_sg, alloc_size); if (cur) { + if (total_nents) + *total_nents += alloc_size - 1; __sg_chain(next_sg, new_sg); - table->orig_nents += alloc_size - 1; } else { table->sgl = new_sg; - table->orig_nents = alloc_size; table->nents = 0; + if (total_nents) + *total_nents = alloc_size; } return new_sg; } @@ -408,6 +438,7 @@ static struct scatterlist *get_next_sg(struct sg_table *table, * @prv: Last populated sge in sgt * @left_pages: Left pages caller have to set after this call * @gfp_mask: GFP allocation mask + * @total_nents: Output of total number of entries in the table * * Description: * If @prv is NULL, allocate and initialize an sg table from a list of pages, @@ -419,7 +450,8 @@ static struct scatterlist *get_next_sg(struct sg_table *table, * * Returns: * Last SGE in sgt on success, PTR_ERR on otherwise. - * The allocation in @sgt must be released by sg_free_table. + * The allocation in @sgt must be released by sg_free_table or + * sg_free_table_entries. * * Notes: * If this function returns non-0 (eg failure), the caller must call @@ -429,7 +461,7 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned int n_pages, unsigned int offset, unsigned long size, unsigned int max_segment, struct scatterlist *prv, unsigned int left_pages, - gfp_t gfp_mask) + gfp_t gfp_mask, unsigned int *total_nents) { unsigned int chunks, cur_page, seg_len, i, prv_len = 0; unsigned int added_nents = 0; @@ -496,7 +528,8 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, } /* Pass how many chunks might be left */ - s = get_next_sg(sgt, s, chunks - i + left_pages, gfp_mask); + s = get_next_sg(sgt, s, chunks - i + left_pages, gfp_mask, + total_nents); if (IS_ERR(s)) { /* * Adjust entry length to be as before function was @@ -515,6 +548,7 @@ struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt, cur_page = j; } sgt->nents += added_nents; + sgt->orig_nents = sgt->nents; out: if (!left_pages) sg_mark_end(s); @@ -547,7 +581,7 @@ int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages, unsigned long size, gfp_t gfp_mask) { return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages, - offset, size, UINT_MAX, NULL, 0, gfp_mask)); + offset, size, UINT_MAX, NULL, 0, gfp_mask, NULL)); } EXPORT_SYMBOL(sg_alloc_table_from_pages); diff --git a/tools/testing/scatterlist/main.c b/tools/testing/scatterlist/main.c index 652254754b4c..00576c86971e 100644 --- a/tools/testing/scatterlist/main.c +++ b/tools/testing/scatterlist/main.c @@ -88,11 +88,14 @@ int main(void) struct page *pages[MAX_PAGES]; struct sg_table st; struct scatterlist *sg; + unsigned int total_nents; set_pages(pages, test->pfn, test->num_pages); sg = __sg_alloc_table_from_pages(&st, pages, test->num_pages, 0, - test->size, test->max_seg, NULL, left_pages, GFP_KERNEL); + test->size, test->max_seg, + NULL, left_pages, GFP_KERNEL, + &total_nents); assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret); if (test->alloc_ret) @@ -100,8 +103,9 @@ int main(void) if (test->pfn_app) { set_pages(pages, test->pfn_app, test->num_pages); - sg = __sg_alloc_table_from_pages(&st, pages, test->num_pages, 0, - test->size, test->max_seg, sg, 0, GFP_KERNEL); + sg = __sg_alloc_table_from_pages( + &st, pages, test->num_pages, 0, test->size, + test->max_seg, sg, 0, GFP_KERNEL, &total_nents); assert(PTR_ERR_OR_ZERO(sg) == test->alloc_ret); } @@ -110,7 +114,10 @@ int main(void) if (!test->pfn_app) VALIDATE(st.orig_nents == test->expected_segments, &st, test); - sg_free_table(&st); + if (test->pfn_app) + sg_free_table_entries(&st, total_nents); + else + sg_free_table(&st); } assert(i == (sizeof(tests) / sizeof(tests[0])) - 1);