@@ -120,6 +120,7 @@ TESTS_progs = \
gem_unref_active_buffers \
gem_vmap_blits \
gem_wait_render_timeout \
+ gem_bo_falloc \
gen3_mixed_blits \
gen3_render_linear_blits \
gen3_render_mixed_blits \
new file mode 100644
@@ -0,0 +1,469 @@
+/*
+ * Copyright © 2014 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ *
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <inttypes.h>
+#include <errno.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include "drm.h"
+#include "i915_drm.h"
+#include "drmtest.h"
+#include "intel_chipset.h"
+#include "intel_gpu_tools.h"
+
+#define OBJECT_SIZE (8 * PAGE_SIZE)
+#define COPY_BLT_CMD (2<<29|0x53<<22|0x6)
+#define BLT_WRITE_ALPHA (1<<21)
+#define BLT_WRITE_RGB (1<<20)
+#define BLT_SRC_TILED (1<<15)
+#define BLT_DST_TILED (1<<11)
+
+static uint8_t buf[OBJECT_SIZE];
+
+static uint32_t create_bo(int fd)
+{
+ int i;
+ uint32_t page_count;
+ uint32_t handle;
+
+ handle = gem_create(fd, sizeof(buf));
+ page_count = sizeof(buf) / PAGE_SIZE;
+
+ for (i = 0; i < page_count; ++i)
+ memset(buf + (i * PAGE_SIZE), i+1, PAGE_SIZE);
+
+ gem_write(fd, handle, 0, buf, sizeof(buf));
+ return handle;
+}
+
+static int gem_linear_blt(int fd,
+ uint32_t *batch,
+ uint32_t src,
+ uint32_t dst,
+ uint32_t length,
+ struct drm_i915_gem_relocation_entry *reloc)
+{
+ uint32_t *b = batch;
+ int height = length / (16 * 1024);
+
+ igt_assert(height <= 1<<16);
+
+ if (height) {
+ int i = 0;
+ b[i++] = COPY_BLT_CMD | BLT_WRITE_ALPHA | BLT_WRITE_RGB;
+ if (intel_gen(intel_get_drm_devid(fd)) >= 8)
+ b[i-1]+=2;
+ b[i++] = 0xcc << 16 | 1 << 25 | 1 << 24 | (16*1024);
+ b[i++] = 0;
+ b[i++] = height << 16 | (4*1024);
+ b[i++] = 0;
+ reloc->offset = (b-batch+4) * sizeof(uint32_t);
+ reloc->delta = 0;
+ reloc->target_handle = dst;
+ reloc->read_domains = I915_GEM_DOMAIN_RENDER;
+ reloc->write_domain = I915_GEM_DOMAIN_RENDER;
+ reloc->presumed_offset = 0;
+ reloc++;
+ if (intel_gen(intel_get_drm_devid(fd)) >= 8)
+ b[i++] = 0; /* FIXME */
+
+ b[i++] = 0;
+ b[i++] = 16*1024;
+ b[i++] = 0;
+ reloc->offset = (b-batch+7) * sizeof(uint32_t);
+ if (intel_gen(intel_get_drm_devid(fd)) >= 8)
+ reloc->offset += sizeof(uint32_t);
+ reloc->delta = 0;
+ reloc->target_handle = src;
+ reloc->read_domains = I915_GEM_DOMAIN_RENDER;
+ reloc->write_domain = 0;
+ reloc->presumed_offset = 0;
+ reloc++;
+ if (intel_gen(intel_get_drm_devid(fd)) >= 8)
+ b[i++] = 0; /* FIXME */
+
+ b += i;
+ length -= height * 16*1024;
+ }
+
+ if (length) {
+ int i = 0;
+ b[i++] = COPY_BLT_CMD | BLT_WRITE_ALPHA | BLT_WRITE_RGB;
+ if (intel_gen(intel_get_drm_devid(fd)) >= 8)
+ b[i-1]+=2;
+ b[i++] = 0xcc << 16 | 1 << 25 | 1 << 24 | (16*1024);
+ b[i++] = height << 16;
+ b[i++] = (1+height) << 16 | (length / 4);
+ b[i++] = 0;
+ reloc->offset = (b-batch+4) * sizeof(uint32_t);
+ reloc->delta = 0;
+ reloc->target_handle = dst;
+ reloc->read_domains = I915_GEM_DOMAIN_RENDER;
+ reloc->write_domain = I915_GEM_DOMAIN_RENDER;
+ reloc->presumed_offset = 0;
+ reloc++;
+ if (intel_gen(intel_get_drm_devid(fd)) >= 8)
+ b[i++] = 0; /* FIXME */
+
+ b[i++] = height << 16;
+ b[i++] = 16*1024;
+ b[i++] = 0;
+ reloc->offset = (b-batch+7) * sizeof(uint32_t);
+ if (intel_gen(intel_get_drm_devid(fd)) >= 8)
+ reloc->offset += sizeof(uint32_t);
+ reloc->delta = 0;
+ reloc->target_handle = src;
+ reloc->read_domains = I915_GEM_DOMAIN_RENDER;
+ reloc->write_domain = 0;
+ reloc->presumed_offset = 0;
+ reloc++;
+ if (intel_gen(intel_get_drm_devid(fd)) >= 8)
+ b[i++] = 0; /* FIXME */
+
+ b += i;
+ }
+
+ b[0] = MI_BATCH_BUFFER_END;
+ b[1] = 0;
+
+ return (b+2 - batch) * sizeof(uint32_t);
+}
+
+static void do_blit(int fd, uint32_t src, uint32_t dst, uint32_t length)
+{
+ int len;
+ int ring;
+ uint32_t handle;
+ uint32_t buf[20];
+ struct drm_i915_gem_execbuffer2 execbuf;
+ struct drm_i915_gem_exec_object2 exec[3];
+ struct drm_i915_gem_relocation_entry reloc[4];
+
+ handle = gem_create(fd, 4096);
+
+ len = gem_linear_blt(fd, buf, src, dst, length, reloc);
+
+ gem_write(fd, handle, 0, buf, len);
+
+ exec[0].handle = src;
+ exec[0].relocation_count = 0;
+ exec[0].relocs_ptr = 0;
+ exec[0].alignment = 0;
+ exec[0].offset = 0;
+ exec[0].flags = 0;
+ exec[0].rsvd1 = 0;
+ exec[0].rsvd2 = 0;
+
+ exec[1].handle = dst;
+ exec[1].relocation_count = 0;
+ exec[1].relocs_ptr = 0;
+ exec[1].alignment = 0;
+ exec[1].offset = 0;
+ exec[1].flags = 0;
+ exec[1].rsvd1 = 0;
+ exec[1].rsvd2 = 0;
+
+ exec[2].handle = handle;
+ if (intel_gen(intel_get_drm_devid(fd)) >= 8)
+ exec[2].relocation_count = len > 56 ? 4 : 2;
+ else
+ exec[2].relocation_count = len > 40 ? 4 : 2;
+ exec[2].relocs_ptr = (uintptr_t)reloc;
+ exec[2].alignment = 0;
+ exec[2].offset = 0;
+ exec[2].flags = 0;
+ exec[2].rsvd1 = 0;
+ exec[2].rsvd2 = 0;
+
+ ring = 0;
+ if (HAS_BLT_RING(intel_get_drm_devid(fd)))
+ ring = I915_EXEC_BLT;
+
+ execbuf.buffers_ptr = (uintptr_t)exec;
+ execbuf.buffer_count = 3;
+ execbuf.batch_start_offset = 0;
+ execbuf.batch_len = len;
+ execbuf.cliprects_ptr = 0;
+ execbuf.num_cliprects = 0;
+ execbuf.DR1 = 0;
+ execbuf.DR4 = 0;
+ execbuf.flags = ring;
+ i915_execbuffer2_set_context_id(execbuf, 0);
+ execbuf.rsvd2 = 0;
+
+ gem_execbuf(fd, &execbuf);
+
+ gem_sync(fd, handle);
+
+ fflush(stdout);
+ gem_close(fd, handle);
+}
+
+static int do_falloc(int fd, int handle,
+ uint32_t mode, uint32_t start, uint32_t length)
+{
+ int ret;
+ struct drm_i915_gem_fallocate gem_bo;
+ gem_bo.handle = handle;
+ gem_bo.start = start;
+ gem_bo.length = length;
+ gem_bo.mode = mode;
+ ret = ioctl(fd, DRM_IOCTL_I915_GEM_FALLOCATE, &gem_bo);
+ return ret;
+}
+
+static void test_gem_falloc_arguments_validation(int fd)
+{
+ int ret;
+ uint32_t handle;
+ struct drm_i915_gem_fallocate gem_bo;
+
+ handle = gem_create(fd, OBJECT_SIZE);
+ /* invalid mode */
+ ret = do_falloc(fd, handle, ~0, 0, OBJECT_SIZE >> 1);
+ igt_assert(ret != 0);
+
+ ret = do_falloc(fd, handle, 0, 0, OBJECT_SIZE >> 1);
+ igt_assert(ret != 0);
+
+ /* invalid start */
+ ret = do_falloc(fd, handle, I915_GEM_FALLOC_MARK_SCRATCH,
+ OBJECT_SIZE + (OBJECT_SIZE >> 1),
+ OBJECT_SIZE >> 1);
+ igt_assert(ret != 0);
+
+ /* invalid length */
+ ret = do_falloc(fd, handle, I915_GEM_FALLOC_MARK_SCRATCH,
+ OBJECT_SIZE >> 1, ~0);
+ igt_assert(ret != 0);
+
+ /* marked region overflowing obj range */
+ ret = do_falloc(fd, handle, I915_GEM_FALLOC_MARK_SCRATCH,
+ OBJECT_SIZE >> 1, OBJECT_SIZE);
+ igt_assert(ret != 0);
+
+ gem_close(fd, handle);
+}
+
+static void test_gem_falloc_random_usage(int fd)
+{
+ int ret;
+ uint32_t src, dst;
+ uint8_t *src_addr, *dst_addr;
+ struct drm_i915_gem_fallocate gem_bo;
+ uint8_t buf[OBJECT_SIZE];
+
+ src = create_bo(fd);
+ dst = gem_create(fd, OBJECT_SIZE);
+
+ gem_read(fd, src, 0, buf, OBJECT_SIZE);
+ do_blit(fd, src, dst, OBJECT_SIZE);
+
+ src_addr = gem_mmap__gtt(fd, src, OBJECT_SIZE, PROT_WRITE | PROT_READ);
+ dst_addr = gem_mmap__gtt(fd, dst, OBJECT_SIZE, PROT_WRITE | PROT_READ);
+ igt_assert(memcmp(dst_addr, src_addr, OBJECT_SIZE) == 0);
+
+ /* mark whole object as scratch */
+ ret = do_falloc(fd, dst, I915_GEM_FALLOC_MARK_SCRATCH,
+ 0, OBJECT_SIZE);
+ igt_assert(ret == 0);
+ igt_assert(memcmp(dst_addr, src_addr, OBJECT_SIZE) != 0);
+
+
+ /* unmark whole object */
+ ret = do_falloc(fd, dst, I915_GEM_FALLOC_UNMARK_SCRATCH,
+ 0, OBJECT_SIZE);
+ igt_assert(ret == 0);
+
+ gem_write(fd, dst, 0, buf, OBJECT_SIZE);
+
+ memset(buf, 0x00, OBJECT_SIZE);
+ gem_read(fd, dst, 0, buf, OBJECT_SIZE);
+ igt_assert(memcmp(buf, src_addr, OBJECT_SIZE) == 0);
+
+ munmap(src_addr, OBJECT_SIZE);
+ munmap(dst_addr, OBJECT_SIZE);
+ gem_close(fd, src);
+ gem_close(fd, dst);
+}
+
+static void test_gem_falloc_pwrite(int fd)
+{
+ int ret;
+ uint32_t offset;
+ uint32_t src, dst;
+ uint8_t *src_addr, *dst_addr;
+ struct drm_i915_gem_fallocate gem_bo;
+ uint8_t src_buf[OBJECT_SIZE];
+ uint8_t dst_buf[OBJECT_SIZE];
+
+ src = create_bo(fd);
+ dst = create_bo(fd);
+
+ ret = do_falloc(fd, dst, I915_GEM_FALLOC_MARK_SCRATCH,
+ OBJECT_SIZE >> 1, OBJECT_SIZE >> 1);
+ igt_assert(ret == 0);
+
+ gem_read(fd, src, 0, src_buf, OBJECT_SIZE);
+ gem_read(fd, dst, 0, dst_buf, OBJECT_SIZE);
+ igt_assert(memcmp(src_buf, dst_buf, OBJECT_SIZE) != 0);
+
+ ret = do_falloc(fd, dst, I915_GEM_FALLOC_UNMARK_SCRATCH,
+ OBJECT_SIZE >> 1, OBJECT_SIZE >> 1);
+ igt_assert(ret == 0);
+ offset = OBJECT_SIZE >> 1;
+ gem_write(fd, dst, offset, src_buf + offset, OBJECT_SIZE >> 1);
+
+ memset(dst_buf, 0x00, OBJECT_SIZE);
+ gem_read(fd, dst, 0, dst_buf, OBJECT_SIZE);
+ igt_assert(memcmp(src_buf, dst_buf, OBJECT_SIZE) == 0);
+
+ gem_close(fd, src);
+ gem_close(fd, dst);
+}
+
+static void test_gem_falloc_multiple_regions(int fd)
+{
+ int i;
+ int ret, len;
+ uint32_t handle, src, dst;
+ uint32_t falloc_start, falloc_len;
+ uint8_t *src_addr, *dst_addr;
+
+ printf("GEM object falloc of multiple regions test\n");
+
+ src = create_bo(fd);
+ dst = gem_create(fd, OBJECT_SIZE);
+
+ do_blit(fd, src, dst, OBJECT_SIZE);
+
+ src_addr = gem_mmap__gtt(fd, src, OBJECT_SIZE, PROT_WRITE | PROT_READ);
+ dst_addr = gem_mmap__gtt(fd, dst, OBJECT_SIZE, PROT_WRITE | PROT_READ);
+ igt_assert(memcmp(dst_addr, src_addr, OBJECT_SIZE) == 0);
+
+ /* mark multiple ranges without overlap */
+ falloc_start = OBJECT_SIZE >> 2;
+ falloc_len = OBJECT_SIZE >> 2;
+ ret = do_falloc(fd, dst, I915_GEM_FALLOC_MARK_SCRATCH,
+ falloc_start, falloc_len);
+ igt_assert(ret == 0);
+
+ falloc_start = (OBJECT_SIZE >> 1) + (OBJECT_SIZE >> 2);
+ ret = do_falloc(fd, dst, I915_GEM_FALLOC_MARK_SCRATCH,
+ falloc_start, falloc_len);
+ igt_assert(ret == 0);
+
+ /* do some operation */
+ do_blit(fd, src, dst, OBJECT_SIZE);
+ igt_assert(memcmp(dst_addr, src_addr, OBJECT_SIZE) != 0);
+
+ /* unmark multiple ranges without overlap */
+ falloc_start = OBJECT_SIZE >> 2;
+ falloc_len = OBJECT_SIZE >> 2;
+ ret = do_falloc(fd, dst, I915_GEM_FALLOC_UNMARK_SCRATCH,
+ falloc_start, falloc_len);
+ igt_assert(ret == 0);
+
+ falloc_start = (OBJECT_SIZE >> 1) + (OBJECT_SIZE >> 2);
+ falloc_len = OBJECT_SIZE >> 2;
+ ret = do_falloc(fd, dst, I915_GEM_FALLOC_UNMARK_SCRATCH,
+ falloc_start, falloc_len);
+ igt_assert(ret == 0);
+
+ munmap(dst_addr, OBJECT_SIZE);
+ dst_addr = gem_mmap__gtt(fd, dst, OBJECT_SIZE, PROT_WRITE | PROT_READ);
+
+ /* blt whole obj to ensure unmarking is ok */
+ do_blit(fd, src, dst, OBJECT_SIZE);
+
+ igt_assert(memcmp(dst_addr, src_addr, OBJECT_SIZE) == 0);
+
+
+ /* mark multiple ranges with overlap */
+ falloc_start = OBJECT_SIZE >> 2;
+ falloc_len = OBJECT_SIZE >> 2;
+ ret = do_falloc(fd, dst, I915_GEM_FALLOC_MARK_SCRATCH,
+ falloc_start, falloc_len);
+ igt_assert(ret == 0);
+
+ falloc_len = OBJECT_SIZE >> 1;
+ ret = do_falloc(fd, dst, I915_GEM_FALLOC_MARK_SCRATCH,
+ falloc_start, falloc_len);
+ igt_assert(ret == 0);
+
+ /* do some operation */
+ do_blit(fd, src, dst, OBJECT_SIZE);
+ igt_assert(memcmp(dst_addr, src_addr, OBJECT_SIZE) != 0);
+
+ /* unmark the whole region */
+ falloc_start = OBJECT_SIZE >> 2;
+ falloc_len = OBJECT_SIZE >> 1;
+ ret = do_falloc(fd, dst, I915_GEM_FALLOC_UNMARK_SCRATCH,
+ falloc_start, falloc_len);
+ igt_assert(ret == 0);
+
+ munmap(dst_addr, OBJECT_SIZE);
+ dst_addr = gem_mmap__gtt(fd, dst, OBJECT_SIZE, PROT_WRITE | PROT_READ);
+
+ /* blt whole obj to ensure unmarking is ok */
+ do_blit(fd, src, dst, OBJECT_SIZE);
+
+ igt_assert(memcmp(dst_addr, src_addr, OBJECT_SIZE) == 0);
+
+ munmap(src_addr, OBJECT_SIZE);
+ munmap(dst_addr, OBJECT_SIZE);
+ gem_close(fd, src);
+ gem_close(fd, dst);
+}
+
+int fd;
+
+int main(int argc, char **argv)
+{
+ igt_subtest_init(argc, argv);
+
+ igt_fixture
+ fd = drm_open_any();
+
+ igt_subtest("gem_falloc arguments validation")
+ test_gem_falloc_arguments_validation(fd);
+
+ igt_subtest("gem_falloc random usage")
+ test_gem_falloc_random_usage(fd);
+
+ igt_subtest("gem_falloc with pwrite/pread")
+ test_gem_falloc_pwrite(fd);
+
+ igt_subtest("gem_falloc multiple regions")
+ test_gem_falloc_multiple_regions(fd);
+
+ close(fd);
+ igt_exit();
+}