@@ -129,6 +129,8 @@
extern int amdgpu_cik_support;
#endif
+extern int amdgpu_p2p_sharing;
+
#define AMDGPU_DEFAULT_GTT_SIZE_MB 3072ULL /* 3GB by default */
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -471,6 +473,8 @@ struct drm_gem_object *
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
struct drm_gem_object *gobj,
int flags);
+struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
int amdgpu_gem_prime_pin(struct drm_gem_object *obj);
void amdgpu_gem_prime_unpin(struct drm_gem_object *obj);
struct reservation_object *amdgpu_gem_prime_res_obj(struct drm_gem_object *);
@@ -120,6 +120,7 @@
int amdgpu_param_buf_per_se = 0;
int amdgpu_job_hang_limit = 0;
int amdgpu_lbpw = -1;
+int amdgpu_p2p_sharing = 0;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -289,6 +290,9 @@
module_param_named(cik_support, amdgpu_cik_support, int, 0444);
#endif
+MODULE_PARM_DESC(p2p_sharing, "Enable P2P buffer sharing (1 = enabled, 0 = disabled (default))");
+module_param_named(p2p_sharing, amdgpu_p2p_sharing, int, 0444);
+
static const struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_SI
@@ -835,7 +839,7 @@ long amdgpu_drm_ioctl(struct file *filp,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = amdgpu_gem_prime_export,
- .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_import = amdgpu_gem_prime_import,
.gem_prime_pin = amdgpu_gem_prime_pin,
.gem_prime_unpin = amdgpu_gem_prime_unpin,
.gem_prime_res_obj = amdgpu_gem_prime_res_obj,
@@ -159,3 +159,63 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev,
return drm_gem_prime_export(dev, gobj, flags);
}
+
+static struct drm_gem_object *
+amdgpu_gem_prime_foreign_bo(struct amdgpu_device *adev, struct amdgpu_bo *bo)
+{
+ struct amdgpu_gem_object *gobj;
+ int r;
+
+ ww_mutex_lock(&bo->tbo.resv->lock, NULL);
+
+ list_for_each_entry(gobj, &bo->gem_objects, list) {
+ if (gobj->base.dev != adev->ddev)
+ continue;
+
+ ww_mutex_unlock(&bo->tbo.resv->lock);
+ drm_gem_object_reference(&gobj->base);
+ return &gobj->base;
+ }
+
+
+ gobj = kzalloc(sizeof(struct amdgpu_gem_object), GFP_KERNEL);
+ if (unlikely(!gobj)) {
+ ww_mutex_unlock(&bo->tbo.resv->lock);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ r = drm_gem_object_init(adev->ddev, &gobj->base, amdgpu_bo_size(bo));
+ if (unlikely(r)) {
+ kfree(gobj);
+ ww_mutex_unlock(&bo->tbo.resv->lock);
+ return ERR_PTR(r);
+ }
+
+ list_add(&gobj->list, &bo->gem_objects);
+ gobj->bo = amdgpu_bo_ref(bo);
+ bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+
+ ww_mutex_unlock(&bo->tbo.resv->lock);
+
+ return &gobj->base;
+}
+
+struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf)
+{
+ struct amdgpu_device *adev = dev->dev_private;
+
+ if (amdgpu_p2p_sharing) {
+ struct drm_gem_object *obj =
+ drm_gem_prime_dmabuf_to_object(dma_buf, dev->driver);
+
+ if (obj && obj->dev != dev) {
+ /* It's a amdgpu_bo from a different driver instance */
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ return amdgpu_gem_prime_foreign_bo(adev, bo);
+ }
+ }
+
+ return drm_gem_prime_import(dev, dma_buf);
+}