@@ -7,6 +7,7 @@
#include <xen/sizes.h>
#include <xen/types.h>
#include <xen/mm.h>
+#include <xen/lib.h>
#include <xen/list.h>
#include <xen/spinlock.h>
@@ -171,8 +172,8 @@ static int get_shm_pages(struct domain *d, struct ffa_shm_mem *shm,
for ( n = 0; n < range_count; n++ )
{
- page_count = read_atomic(&range[n].page_count);
- addr = read_atomic(&range[n].address);
+ page_count = ACCESS_ONCE(range[n].page_count);
+ addr = ACCESS_ONCE(range[n].address);
for ( m = 0; m < page_count; m++ )
{
if ( pg_idx >= shm->page_count )
@@ -527,13 +528,13 @@ void ffa_handle_mem_share(struct cpu_user_regs *regs)
goto out_unlock;
mem_access = ctx->tx + trans.mem_access_offs;
- if ( read_atomic(&mem_access->access_perm.perm) != FFA_MEM_ACC_RW )
+ if ( ACCESS_ONCE(mem_access->access_perm.perm) != FFA_MEM_ACC_RW )
{
ret = FFA_RET_NOT_SUPPORTED;
goto out_unlock;
}
- region_offs = read_atomic(&mem_access->region_offs);
+ region_offs = ACCESS_ONCE(mem_access->region_offs);
if ( sizeof(*region_descr) + region_offs > frag_len )
{
ret = FFA_RET_NOT_SUPPORTED;
@@ -541,8 +542,8 @@ void ffa_handle_mem_share(struct cpu_user_regs *regs)
}
region_descr = ctx->tx + region_offs;
- range_count = read_atomic(®ion_descr->address_range_count);
- page_count = read_atomic(®ion_descr->total_page_count);
+ range_count = ACCESS_ONCE(region_descr->address_range_count);
+ page_count = ACCESS_ONCE(region_descr->total_page_count);
if ( !page_count )
{
@@ -557,7 +558,7 @@ void ffa_handle_mem_share(struct cpu_user_regs *regs)
goto out_unlock;
}
shm->sender_id = trans.sender_id;
- shm->ep_id = read_atomic(&mem_access->access_perm.endpoint_id);
+ shm->ep_id = ACCESS_ONCE(mem_access->access_perm.endpoint_id);
/*
* Check that the Composite memory region descriptor fits.