@@ -2131,7 +2131,7 @@ queue_full:
transport_handle_queue_full(cmd, cmd->se_dev);
}
-static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
+void target_free_sgl(struct scatterlist *sgl, int nents)
{
struct scatterlist *sg;
int count;
@@ -2141,6 +2141,7 @@ static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
kfree(sgl);
}
+EXPORT_SYMBOL(target_free_sgl);
static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
{
@@ -2161,7 +2162,7 @@ static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
static inline void transport_free_pages(struct se_cmd *cmd)
{
if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
- transport_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
+ target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
cmd->t_prot_sg = NULL;
cmd->t_prot_nents = 0;
}
@@ -2172,7 +2173,7 @@ static inline void transport_free_pages(struct se_cmd *cmd)
* SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
*/
if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
- transport_free_sgl(cmd->t_bidi_data_sg,
+ target_free_sgl(cmd->t_bidi_data_sg,
cmd->t_bidi_data_nents);
cmd->t_bidi_data_sg = NULL;
cmd->t_bidi_data_nents = 0;
@@ -2182,11 +2183,11 @@ static inline void transport_free_pages(struct se_cmd *cmd)
}
transport_reset_sgl_orig(cmd);
- transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
+ target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
cmd->t_data_sg = NULL;
cmd->t_data_nents = 0;
- transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
+ target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
cmd->t_bidi_data_sg = NULL;
cmd->t_bidi_data_nents = 0;
}
@@ -2260,20 +2261,22 @@ EXPORT_SYMBOL(transport_kunmap_data_sg);
int
target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
- bool zero_page)
+ bool zero_page, bool chainable)
{
struct scatterlist *sg;
struct page *page;
gfp_t zero_flag = (zero_page) ? __GFP_ZERO : 0;
- unsigned int nent;
+ unsigned int nalloc, nent;
int i = 0;
- nent = DIV_ROUND_UP(length, PAGE_SIZE);
- sg = kmalloc(sizeof(struct scatterlist) * nent, GFP_KERNEL);
+ nalloc = nent = DIV_ROUND_UP(length, PAGE_SIZE);
+ if (chainable)
+ nalloc++;
+ sg = kmalloc_array(nalloc, sizeof(struct scatterlist), GFP_KERNEL);
if (!sg)
return -ENOMEM;
- sg_init_table(sg, nent);
+ sg_init_table(sg, nalloc);
while (length) {
u32 page_len = min_t(u32, length, PAGE_SIZE);
@@ -2297,6 +2300,7 @@ out:
kfree(sg);
return -ENOMEM;
}
+EXPORT_SYMBOL(target_alloc_sgl);
/*
* Allocate any required resources to execute the command. For writes we
@@ -2312,7 +2316,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
if (cmd->prot_op != TARGET_PROT_NORMAL &&
!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
- cmd->prot_length, true);
+ cmd->prot_length, true, false);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
@@ -2337,13 +2341,13 @@ transport_generic_new_cmd(struct se_cmd *cmd)
ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
&cmd->t_bidi_data_nents,
- bidi_length, zero_flag);
+ bidi_length, zero_flag, false);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
- cmd->data_length, zero_flag);
+ cmd->data_length, zero_flag, false);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
} else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
@@ -2357,7 +2361,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
&cmd->t_bidi_data_nents,
- caw_length, zero_flag);
+ caw_length, zero_flag, false);
if (ret < 0)
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
}
@@ -563,7 +563,7 @@ static int target_xcopy_setup_pt_cmd(
if (alloc_mem) {
rc = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
- cmd->data_length, false);
+ cmd->data_length, false, false);
if (rc < 0) {
ret = rc;
goto out;
@@ -85,7 +85,6 @@ extern struct configfs_attribute *passthrough_attrib_attrs[];
void *transport_kmap_data_sg(struct se_cmd *);
void transport_kunmap_data_sg(struct se_cmd *);
/* core helpers also used by xcopy during internal command setup */
-int target_alloc_sgl(struct scatterlist **, unsigned int *, u32, bool);
sense_reason_t transport_generic_map_mem_to_cmd(struct se_cmd *,
struct scatterlist *, u32, struct scatterlist *, u32);
@@ -179,6 +179,10 @@ int core_tpg_set_initiator_node_tag(struct se_portal_group *,
int core_tpg_register(struct se_wwn *, struct se_portal_group *, int);
int core_tpg_deregister(struct se_portal_group *);
+int target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents,
+ u32 length, bool zero_page, bool chainable);
+void target_free_sgl(struct scatterlist *sgl, int nents);
+
/*
* The LIO target core uses DMA_TO_DEVICE to mean that data is going
* to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
The SRP target driver will need to allocate and chain it's own SGLs soon. For this export target_alloc_sgl, and add a new argument to it so that it can allocate an additional chain entry that doesn't point to a page. Also export transport_free_sgl after renaming it to target_free_sgl to free these SGLs again. Signed-off-by: Christoph Hellwig <hch@lst.de> --- drivers/target/target_core_transport.c | 32 ++++++++++++++++++-------------- drivers/target/target_core_xcopy.c | 2 +- include/target/target_core_backend.h | 1 - include/target/target_core_fabric.h | 4 ++++ 4 files changed, 23 insertions(+), 16 deletions(-)