diff mbox series

[08/12] linux-user: Introduce imgsrc_mmap

Message ID 20210619034329.532318-9-richard.henderson@linaro.org (mailing list archive)
State New, archived
Headers show
Series linux-user: Load a vdso for x86_64 and hppa | expand

Commit Message

Richard Henderson June 19, 2021, 3:43 a.m. UTC
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
---
 linux-user/qemu.h      | 11 +++++++++++
 linux-user/elfload.c   |  4 ++--
 linux-user/linuxload.c | 44 ++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 57 insertions(+), 2 deletions(-)
diff mbox series

Patch

diff --git a/linux-user/qemu.h b/linux-user/qemu.h
index dafaae6293..255182e133 100644
--- a/linux-user/qemu.h
+++ b/linux-user/qemu.h
@@ -199,6 +199,17 @@  bool imgsrc_read(void *dst, off_t offset, size_t len,
 void *imgsrc_read_alloc(off_t offset, size_t len,
                         const ImageSource *img, Error **errp);
 
+/**
+ * imgsrc_mmap: Map from ImageSource
+ *
+ * If @src has a file descriptor, pass on to target_mmap.  Otherwise,
+ * this is "mapping" from a host buffer, which resolves to memcpy.
+ * Therefore, flags must be MAP_PRIVATE | MAP_FIXED; the argument is
+ * retained for clarity.
+ */
+abi_long imgsrc_mmap(abi_ulong start, abi_ulong len, int prot,
+                     int flags, const ImageSource *src, abi_ulong offset);
+
 /* Read a good amount of data initially, to hopefully get all the
    program headers loaded.  */
 #define BPRM_BUF_SIZE  1024
diff --git a/linux-user/elfload.c b/linux-user/elfload.c
index 3c31a5e3b0..8a3a7ae3ac 100644
--- a/linux-user/elfload.c
+++ b/linux-user/elfload.c
@@ -2832,9 +2832,9 @@  static void load_elf_image(const char *image_name, const ImageSource *src,
              */
             if (eppnt->p_filesz != 0) {
                 vaddr_len = TARGET_ELF_PAGELENGTH(eppnt->p_filesz + vaddr_po);
-                error = target_mmap(vaddr_ps, vaddr_len, elf_prot,
+                error = imgsrc_mmap(vaddr_ps, vaddr_len, elf_prot,
                                     MAP_PRIVATE | MAP_FIXED,
-                                    src->fd, eppnt->p_offset - vaddr_po);
+                                    src, eppnt->p_offset - vaddr_po);
 
                 if (error == -1) {
                     goto exit_mmap;
diff --git a/linux-user/linuxload.c b/linux-user/linuxload.c
index d0d3f2ed0e..a437a22b49 100644
--- a/linux-user/linuxload.c
+++ b/linux-user/linuxload.c
@@ -208,3 +208,47 @@  void *imgsrc_read_alloc(off_t offset, size_t len,
     }
     return alloc;
 }
+
+abi_long imgsrc_mmap(abi_ulong start, abi_ulong len, int prot,
+                     int flags, const ImageSource *src, abi_ulong offset)
+{
+    abi_long ret;
+    int prot_write;
+    void *haddr;
+
+    assert(flags == (MAP_PRIVATE | MAP_FIXED));
+
+    if (src->fd >= 0) {
+        return target_mmap(start, len, prot, flags, src->fd, offset);
+    }
+
+    /*
+     * This case is for the vdso; we don't expect bad images.
+     * The mmap may extend beyond the end of the image, especially
+     * to the end of the page.  Zero fill.
+     */
+    assert(offset < src->cache_size);
+
+    prot_write = prot | PROT_WRITE;
+    ret = target_mmap(start, len, prot_write, flags | MAP_ANON, -1, 0);
+    if (ret == -1) {
+        return ret;
+    }
+
+    haddr = lock_user(VERIFY_WRITE, start, len, 0);
+    assert(haddr != NULL);
+    if (offset + len < src->cache_size) {
+        memcpy(haddr, src->cache + offset, len);
+    } else {
+        size_t rest = src->cache_size - offset;
+        memcpy(haddr, src->cache + offset, rest);
+        memset(haddr + rest, 0, len - rest);
+    }
+    unlock_user(haddr, start, len);
+
+    if (prot != prot_write) {
+        target_mprotect(start, len, prot);
+    }
+
+    return ret;
+}