@@ -2,5 +2,10 @@ QEMU_PROG_FUZZ=qemu-fuzz-$(TARGET_NAME)$(EXESUF)
fuzz-obj-y += tests/qtest/libqtest.o
fuzz-obj-y += tests/qtest/fuzz/fuzz.o # Fuzzer skeleton
+fuzz-obj-y += tests/qtest/fuzz/fork_fuzz.o
FUZZ_CFLAGS += -I$(SRC_PATH)/tests -I$(SRC_PATH)/tests/qtest
+
+# Linker Script to force coverage-counters into known regions which we can mark
+# shared
+FUZZ_LDFLAGS += -Xlinker -T$(SRC_PATH)/tests/qtest/fuzz/fork_fuzz.ld
new file mode 100644
@@ -0,0 +1,55 @@
+/*
+ * Fork-based fuzzing helpers
+ *
+ * Copyright Red Hat Inc., 2019
+ *
+ * Authors:
+ * Alexander Bulekov <alxndr@bu.edu>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "fork_fuzz.h"
+
+
+void counter_shm_init(void)
+{
+ char *shm_path = g_strdup_printf("/qemu-fuzz-cntrs.%d", getpid());
+ int fd = shm_open(shm_path, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
+ g_free(shm_path);
+
+ if (fd == -1) {
+ perror("Error: ");
+ exit(1);
+ }
+ if (ftruncate(fd, &__FUZZ_COUNTERS_END - &__FUZZ_COUNTERS_START) == -1) {
+ perror("Error: ");
+ exit(1);
+ }
+ /* Copy what's in the counter region to the shm.. */
+ void *rptr = mmap(NULL ,
+ &__FUZZ_COUNTERS_END - &__FUZZ_COUNTERS_START,
+ PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
+ memcpy(rptr,
+ &__FUZZ_COUNTERS_START,
+ &__FUZZ_COUNTERS_END - &__FUZZ_COUNTERS_START);
+
+ munmap(rptr, &__FUZZ_COUNTERS_END - &__FUZZ_COUNTERS_START);
+
+ /* And map the shm over the counter region */
+ rptr = mmap(&__FUZZ_COUNTERS_START,
+ &__FUZZ_COUNTERS_END - &__FUZZ_COUNTERS_START,
+ PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, 0);
+
+ close(fd);
+
+ if (!rptr) {
+ perror("Error: ");
+ exit(1);
+ }
+}
+
+
new file mode 100644
@@ -0,0 +1,23 @@
+/*
+ * Fork-based fuzzing helpers
+ *
+ * Copyright Red Hat Inc., 2019
+ *
+ * Authors:
+ * Alexander Bulekov <alxndr@bu.edu>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ *
+ */
+
+#ifndef FORK_FUZZ_H
+#define FORK_FUZZ_H
+
+extern uint8_t __FUZZ_COUNTERS_START;
+extern uint8_t __FUZZ_COUNTERS_END;
+
+void counter_shm_init(void);
+
+#endif
+
new file mode 100644
@@ -0,0 +1,37 @@
+/* We adjust linker script modification to place all of the stuff that needs to
+ * persist across fuzzing runs into a contiguous seciton of memory. Then, it is
+ * easy to re-map the counter-related memory as shared.
+*/
+
+SECTIONS
+{
+ .data.fuzz_start : ALIGN(4K)
+ {
+ __FUZZ_COUNTERS_START = .;
+ __start___sancov_cntrs = .;
+ *(_*sancov_cntrs);
+ __stop___sancov_cntrs = .;
+
+ /* Lowest stack counter */
+ *(__sancov_lowest_stack);
+ }
+ .data.fuzz_ordered :
+ {
+ /* Coverage counters. They're not necessary for fuzzing, but are useful
+ * for analyzing the fuzzing performance
+ */
+ __start___llvm_prf_cnts = .;
+ *(*llvm_prf_cnts);
+ __stop___llvm_prf_cnts = .;
+
+ /* Internal Libfuzzer TracePC object which contains the ValueProfileMap */
+ FuzzerTracePC*(.bss*);
+ }
+ .data.fuzz_end : ALIGN(4K)
+ {
+ __FUZZ_COUNTERS_END = .;
+ }
+}
+/* Dont overwrite the SECTIONS in the default linker script. Instead insert the
+ * above into the default script */
+INSERT AFTER .data;