@@ -35,7 +35,7 @@ EXPORT_TRACEPOINT_SYMBOL(fence_emit);
* context or not. One device can have multiple separate contexts,
* and they're used if some engine can run independently of another.
*/
-static atomic_t fence_context_counter = ATOMIC_INIT(0);
+static atomic64_t fence_context_counter = ATOMIC64_INIT(0);
/**
* fence_context_alloc - allocate an array of fence contexts
@@ -44,10 +44,10 @@ static atomic_t fence_context_counter = ATOMIC_INIT(0);
* This function will return the first index of the number of fences allocated.
* The fence context is used for setting fence->context to a unique number.
*/
-unsigned fence_context_alloc(unsigned num)
+u64 fence_context_alloc(unsigned num)
{
BUG_ON(!num);
- return atomic_add_return(num, &fence_context_counter) - num;
+ return atomic64_add_return(num, &fence_context_counter) - num;
}
EXPORT_SYMBOL(fence_context_alloc);
@@ -513,7 +513,7 @@ EXPORT_SYMBOL(fence_wait_any_timeout);
*/
void
fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, unsigned context, unsigned seqno)
+ spinlock_t *lock, u64 context, unsigned seqno)
{
BUG_ON(!lock);
BUG_ON(!ops || !ops->wait || !ops->enable_signaling ||
@@ -2045,7 +2045,7 @@ struct amdgpu_device {
struct amdgpu_irq_src hpd_irq;
/* rings */
- unsigned fence_context;
+ u64 fence_context;
unsigned num_rings;
struct amdgpu_ring *rings[AMDGPU_MAX_RINGS];
bool ib_pool_ready;
@@ -428,7 +428,7 @@ void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
soffset, eoffset, eoffset - soffset);
if (i->fence)
- seq_printf(m, " protected by 0x%08x on context %d",
+ seq_printf(m, " protected by 0x%08x on context %llu",
i->fence->seqno, i->fence->context);
seq_printf(m, "\n");
@@ -123,7 +123,7 @@ struct etnaviv_gpu {
u32 completed_fence;
u32 retired_fence;
wait_queue_head_t fence_event;
- unsigned int fence_context;
+ u64 fence_context;
spinlock_t fence_spinlock;
/* worker for handling active-list retiring: */
@@ -57,7 +57,8 @@ struct nouveau_fence_priv {
int (*context_new)(struct nouveau_channel *);
void (*context_del)(struct nouveau_channel *);
- u32 contexts, context_base;
+ u32 contexts;
+ u64 context_base;
bool uevent;
};
@@ -96,7 +96,7 @@ retry:
return 0;
if (have_drawable_releases && sc > 300) {
- FENCE_WARN(fence, "failed to wait on release %d "
+ FENCE_WARN(fence, "failed to wait on release %llu "
"after spincount %d\n",
fence->context & ~0xf0000000, sc);
goto signaled;
@@ -2386,7 +2386,7 @@ struct radeon_device {
struct radeon_mman mman;
struct radeon_fence_driver fence_drv[RADEON_NUM_RINGS];
wait_queue_head_t fence_queue;
- unsigned fence_context;
+ u64 fence_context;
struct mutex ring_lock;
struct radeon_ring ring[RADEON_NUM_RINGS];
bool ib_pool_ready;
@@ -46,7 +46,7 @@ struct vmw_fence_manager {
bool goal_irq_on; /* Protected by @goal_irq_mutex */
bool seqno_valid; /* Protected by @lock, and may not be set to true
without the @goal_irq_mutex held. */
- unsigned ctx;
+ u64 ctx;
};
struct vmw_user_fence {
@@ -96,7 +96,8 @@ struct sync_timeline {
/* protected by child_list_lock */
bool destroyed;
- int context, value;
+ u64 context;
+ int value;
struct list_head child_list_head;
spinlock_t child_list_lock;
@@ -75,7 +75,8 @@ struct fence {
struct rcu_head rcu;
struct list_head cb_list;
spinlock_t *lock;
- unsigned context, seqno;
+ u64 context;
+ unsigned seqno;
unsigned long flags;
ktime_t timestamp;
int status;
@@ -176,7 +177,7 @@ struct fence_ops {
};
void fence_init(struct fence *fence, const struct fence_ops *ops,
- spinlock_t *lock, unsigned context, unsigned seqno);
+ spinlock_t *lock, u64 context, unsigned seqno);
void fence_release(struct kref *kref);
void fence_free(struct fence *fence);
@@ -350,27 +351,27 @@ static inline signed long fence_wait(struct fence *fence, bool intr)
return ret < 0 ? ret : 0;
}
-unsigned fence_context_alloc(unsigned num);
+u64 fence_context_alloc(unsigned num);
#define FENCE_TRACE(f, fmt, args...) \
do { \
struct fence *__ff = (f); \
if (config_enabled(CONFIG_FENCE_TRACE)) \
- pr_info("f %u#%u: " fmt, \
+ pr_info("f %llu#%u: " fmt, \
__ff->context, __ff->seqno, ##args); \
} while (0)
#define FENCE_WARN(f, fmt, args...) \
do { \
struct fence *__ff = (f); \
- pr_warn("f %u#%u: " fmt, __ff->context, __ff->seqno, \
+ pr_warn("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
##args); \
} while (0)
#define FENCE_ERR(f, fmt, args...) \
do { \
struct fence *__ff = (f); \
- pr_err("f %u#%u: " fmt, __ff->context, __ff->seqno, \
+ pr_err("f %llu#%u: " fmt, __ff->context, __ff->seqno, \
##args); \
} while (0)