@@ -1341,6 +1341,63 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
}
EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, DMA_BUF);
+/**
+ * dma_buf_transfer_charge - Change the GPU cgroup to which the provided dma_buf is charged.
+ * @dmabuf: [in] buffer whose charge will be migrated to a different GPU cgroup
+ * @target: [in] the task_struct of the destination process for the GPU cgroup charge
+ *
+ * Only tasks that belong to the same cgroup the buffer is currently charged to
+ * may call this function, otherwise it will return -EPERM.
+ *
+ * Returns 0 on success, or a negative errno code otherwise.
+ */
+int dma_buf_transfer_charge(struct dma_buf *dmabuf, struct task_struct *target)
+{
+ struct gpucg *current_gpucg, *target_gpucg, *to_release;
+ int ret;
+
+ if (!dmabuf->gpucg || !dmabuf->gpucg_bucket) {
+ /* This dmabuf is not tracked under GPU cgroup accounting */
+ return 0;
+ }
+
+ current_gpucg = gpucg_get(current);
+ target_gpucg = gpucg_get(target);
+ to_release = target_gpucg;
+
+ /* If the source and destination cgroups are the same, don't do anything. */
+ if (current_gpucg == target_gpucg) {
+ ret = 0;
+ goto skip_transfer;
+ }
+
+ /*
+ * Verify that the cgroup of the process requesting the transfer
+ * is the same as the one the buffer is currently charged to.
+ */
+ mutex_lock(&dmabuf->lock);
+ if (current_gpucg != dmabuf->gpucg) {
+ ret = -EPERM;
+ goto err;
+ }
+
+ ret = gpucg_transfer_charge(
+ dmabuf->gpucg, target_gpucg, dmabuf->gpucg_bucket, dmabuf->size);
+ if (ret)
+ goto err;
+
+ to_release = dmabuf->gpucg;
+ dmabuf->gpucg = target_gpucg;
+
+err:
+ mutex_unlock(&dmabuf->lock);
+skip_transfer:
+ gpucg_put(current_gpucg);
+ gpucg_put(to_release);
+ return ret;
+}
+EXPORT_SYMBOL_NS_GPL(dma_buf_transfer_charge, DMA_BUF);
+
#ifdef CONFIG_DEBUG_FS
static int dma_buf_debug_show(struct seq_file *s, void *unused)
{
@@ -83,7 +83,13 @@ static inline struct gpucg *gpucg_parent(struct gpucg *cg)
}
int gpucg_charge(struct gpucg *gpucg, struct gpucg_bucket *bucket, u64 size);
+
void gpucg_uncharge(struct gpucg *gpucg, struct gpucg_bucket *bucket, u64 size);
+
+int gpucg_transfer_charge(struct gpucg *source,
+ struct gpucg *dest,
+ struct gpucg_bucket *bucket,
+ u64 size);
int gpucg_register_bucket(struct gpucg_bucket *bucket, const char *name);
#else /* CONFIG_CGROUP_GPU */
@@ -118,6 +124,14 @@ static inline void gpucg_uncharge(struct gpucg *gpucg,
struct gpucg_bucket *bucket,
u64 size) {}
+static inline int gpucg_transfer_charge(struct gpucg *source,
+ struct gpucg *dest,
+ struct gpucg_bucket *bucket,
+ u64 size)
+{
+ return 0;
+}
+
static inline int gpucg_register_bucket(struct gpucg_bucket *bucket, const char *name) {}
#endif /* CONFIG_CGROUP_GPU */
#endif /* _CGROUP_GPU_H */
@@ -18,6 +18,7 @@
#include <linux/file.h>
#include <linux/err.h>
#include <linux/scatterlist.h>
+#include <linux/sched.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/fs.h>
@@ -650,9 +651,14 @@ void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map);
void dma_buf_exp_info_set_gpucg(struct dma_buf_export_info *exp_info,
struct gpucg *gpucg,
struct gpucg_bucket *gpucg_bucket);
+
+int dma_buf_transfer_charge(struct dma_buf *dmabuf, struct task_struct *target);
#else/* CONFIG_CGROUP_GPU */
static inline void dma_buf_exp_info_set_gpucg(struct dma_buf_export_info *exp_info,
struct gpucg *gpucg,
struct gpucg_bucket *gpucg_bucket) {}
+
+static inline int dma_buf_transfer_charge(struct dma_buf *dmabuf, struct task_struct *target)
+{ return 0; }
#endif /* CONFIG_CGROUP_GPU */
#endif /* __DMA_BUF_H__ */
@@ -252,6 +252,68 @@ void gpucg_uncharge(struct gpucg *gpucg, struct gpucg_bucket *bucket, u64 size)
css_put(&gpucg->css);
}
+/**
+ * gpucg_transfer_charge - Transfer a GPU charge from one cgroup to another.
+ *
+ * @source: [in] The GPU cgroup the charge will be transferred from.
+ * @dest: [in] The GPU cgroup the charge will be transferred to.
+ * @bucket: [in] The GPU cgroup bucket corresponding to the charge.
+ * @size: [in] The size of the memory in bytes.
+ * This size will be rounded up to the nearest page size.
+ *
+ * Returns 0 on success, or a negative errno code otherwise.
+ */
+int gpucg_transfer_charge(struct gpucg *source,
+ struct gpucg *dest,
+ struct gpucg_bucket *bucket,
+ u64 size)
+{
+ struct page_counter *counter;
+ u64 nr_pages;
+ struct gpucg_resource_pool *rp_source, *rp_dest;
+ int ret = 0;
+
+ nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+
+ mutex_lock(&gpucg_mutex);
+ rp_source = cg_rpool_find_locked(source, bucket);
+ if (unlikely(!rp_source)) {
+ ret = -ENOENT;
+ goto exit_early;
+ }
+
+ rp_dest = cg_rpool_get_locked(dest, bucket);
+ if (IS_ERR(rp_dest)) {
+ ret = PTR_ERR(rp_dest);
+ goto exit_early;
+ }
+
+ /*
+ * First uncharge from the pool it's currently charged to. This ordering avoids double
+ * charging while the transfer is in progress, which could cause us to hit a limit.
+ * If the try_charge fails for this transfer, we need to be able to reverse this uncharge,
+ * so we continue to hold the gpucg_mutex here.
+ */
+ page_counter_uncharge(&rp_source->total, nr_pages);
+ css_put(&source->css);
+
+ /* Now attempt the new charge */
+ if (page_counter_try_charge(&rp_dest->total, nr_pages, &counter)) {
+ css_get(&dest->css);
+ } else {
+ /*
+ * The new charge failed, so reverse the uncharge from above. This should always
+ * succeed since charges on source are blocked by gpucg_mutex.
+ */
+ WARN_ON(!page_counter_try_charge(&rp_source->total, nr_pages, &counter));
+ css_get(&source->css);
+ ret = -ENOMEM;
+ }
+exit_early:
+ mutex_unlock(&gpucg_mutex);
+ return ret;
+}
+
/**
* gpucg_register_bucket - Registers a bucket for memory accounting using the
* GPU cgroup controller.
The dma_buf_transfer_charge function provides a way for processes to transfer charge of a buffer to a different process. This is essential for the cases where a central allocator process does allocations for various subsystems, hands over the fd to the client who requested the memory and drops all references to the allocated memory. Originally-by: Hridya Valsaraju <hridya@google.com> Signed-off-by: T.J. Mercier <tjmercier@google.com> --- v5 changes Fix commit message which still contained the old name for dma_buf_transfer_charge per Michal Koutný. Modify the dma_buf_transfer_charge API to accept a task_struct instead of a gpucg. This avoids requiring the caller to manage the refcount of the gpucg upon failure and confusing ownership transfer logic. v4 changes Adjust ordering of charge/uncharge during transfer to avoid potentially hitting cgroup limit per Michal Koutný. v3 changes Use more common dual author commit message format per John Stultz. v2 changes Move dma-buf cgroup charge transfer from a dma_buf_op defined by every heap to a single dma-buf function for all heaps per Daniel Vetter and Christian König. --- drivers/dma-buf/dma-buf.c | 57 +++++++++++++++++++++++++++++++++++ include/linux/cgroup_gpu.h | 14 +++++++++ include/linux/dma-buf.h | 6 ++++ kernel/cgroup/gpu.c | 62 ++++++++++++++++++++++++++++++++++++++ 4 files changed, 139 insertions(+)