diff mbox series

[08/23] hw/block/nvme: Avoid dynamic stack allocation

Message ID 20210505211047.1496765-9-philmd@redhat.com (mailing list archive)
State New, archived
Headers show
Series misc: Remove variable-length arrays on the stack | expand

Commit Message

Philippe Mathieu-Daudé May 5, 2021, 9:10 p.m. UTC
Use autofree heap allocation instead of variable-length
array on the stack.

Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
---
 hw/block/nvme.c | 15 ++++++++-------
 1 file changed, 8 insertions(+), 7 deletions(-)

Comments

Klaus Jensen May 6, 2021, 6:43 a.m. UTC | #1
On May  5 23:10, Philippe Mathieu-Daudé wrote:
>Use autofree heap allocation instead of variable-length
>array on the stack.
>
>Signed-off-by: Philippe Mathieu-Daudé <philmd@redhat.com>
>---
> hw/block/nvme.c | 15 ++++++++-------
> 1 file changed, 8 insertions(+), 7 deletions(-)
>
>diff --git a/hw/block/nvme.c b/hw/block/nvme.c
>index 2f6d4925826..905c4bb57af 100644
>--- a/hw/block/nvme.c
>+++ b/hw/block/nvme.c
>@@ -652,7 +652,7 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1,
>     len -= trans_len;
>     if (len) {
>         if (len > n->page_size) {
>-            uint64_t prp_list[n->max_prp_ents];
>+            g_autofree uint64_t *prp_list = NULL;
>             uint32_t nents, prp_trans;
>             int i = 0;
>
>@@ -662,8 +662,10 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1,
>              * that offset.
>              */
>             nents = (n->page_size - (prp2 & (n->page_size - 1))) >> 3;
>-            prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
>-            ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
>+            prp_trans = MIN(n->max_prp_ents, nents);
>+            prp_list = g_new(uint64_t, prp_trans);
>+            ret = nvme_addr_read(n, prp2, (void *)prp_list,
>+                                 prp_trans * sizeof(uint64_t));

prp_trans determines how much we must transfer, not the size of the 
prp_list. Subsequent PRP lists may contain more than nents PRPs, so this 
may now go out of bounds.

Just do the allocation when prp_list is declared:

     g_autofree uint64_t *prp_list = g_new(uint64_t, n->max_prp_ents);

>             if (ret) {
>                 trace_pci_nvme_err_addr_read(prp2);
>                 status = NVME_DATA_TRAS_ERROR;
>@@ -682,9 +684,8 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1,
>                     i = 0;
>                     nents = (len + n->page_size - 1) >> n->page_bits;
>                     nents = MIN(nents, n->max_prp_ents);
>-                    prp_trans = nents * sizeof(uint64_t);
>                     ret = nvme_addr_read(n, prp_ent, (void *)prp_list,
>-                                         prp_trans);
>+                                         nents * sizeof(uint64_t));
>                     if (ret) {
>                         trace_pci_nvme_err_addr_read(prp_ent);
>                         status = NVME_DATA_TRAS_ERROR;
>@@ -2510,10 +2511,10 @@ static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
>     if (attr & NVME_DSMGMT_AD) {
>         int64_t offset;
>         size_t len;
>-        NvmeDsmRange range[nr];
>+        g_autofree NvmeDsmRange *range = g_new(NvmeDsmRange, nr);
>         uintptr_t *discards = (uintptr_t *)&req->opaque;
>
>-        status = nvme_h2c(n, (uint8_t *)range, sizeof(range), req);
>+        status = nvme_h2c(n, (uint8_t *)range, sizeof(*range) * nr, req);
>         if (status) {
>             return status;
>         }

DSM change LGTM.
diff mbox series

Patch

diff --git a/hw/block/nvme.c b/hw/block/nvme.c
index 2f6d4925826..905c4bb57af 100644
--- a/hw/block/nvme.c
+++ b/hw/block/nvme.c
@@ -652,7 +652,7 @@  static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1,
     len -= trans_len;
     if (len) {
         if (len > n->page_size) {
-            uint64_t prp_list[n->max_prp_ents];
+            g_autofree uint64_t *prp_list = NULL;
             uint32_t nents, prp_trans;
             int i = 0;
 
@@ -662,8 +662,10 @@  static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1,
              * that offset.
              */
             nents = (n->page_size - (prp2 & (n->page_size - 1))) >> 3;
-            prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
-            ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
+            prp_trans = MIN(n->max_prp_ents, nents);
+            prp_list = g_new(uint64_t, prp_trans);
+            ret = nvme_addr_read(n, prp2, (void *)prp_list,
+                                 prp_trans * sizeof(uint64_t));
             if (ret) {
                 trace_pci_nvme_err_addr_read(prp2);
                 status = NVME_DATA_TRAS_ERROR;
@@ -682,9 +684,8 @@  static uint16_t nvme_map_prp(NvmeCtrl *n, NvmeSg *sg, uint64_t prp1,
                     i = 0;
                     nents = (len + n->page_size - 1) >> n->page_bits;
                     nents = MIN(nents, n->max_prp_ents);
-                    prp_trans = nents * sizeof(uint64_t);
                     ret = nvme_addr_read(n, prp_ent, (void *)prp_list,
-                                         prp_trans);
+                                         nents * sizeof(uint64_t));
                     if (ret) {
                         trace_pci_nvme_err_addr_read(prp_ent);
                         status = NVME_DATA_TRAS_ERROR;
@@ -2510,10 +2511,10 @@  static uint16_t nvme_dsm(NvmeCtrl *n, NvmeRequest *req)
     if (attr & NVME_DSMGMT_AD) {
         int64_t offset;
         size_t len;
-        NvmeDsmRange range[nr];
+        g_autofree NvmeDsmRange *range = g_new(NvmeDsmRange, nr);
         uintptr_t *discards = (uintptr_t *)&req->opaque;
 
-        status = nvme_h2c(n, (uint8_t *)range, sizeof(range), req);
+        status = nvme_h2c(n, (uint8_t *)range, sizeof(*range) * nr, req);
         if (status) {
             return status;
         }