@@ -161,13 +161,11 @@ device must complete the update before the driver callback returns.
When the device driver wants to populate a range of virtual addresses, it can
use::
- long hmm_range_fault(struct hmm_range *range, unsigned int flags);
+ long hmm_range_fault(struct hmm_range *range);
-With the HMM_RANGE_SNAPSHOT flag, it will only fetch present CPU page table
-entries and will not trigger a page fault on missing or non-present entries.
-Without that flag, it does trigger a page fault on missing or read-only entries
-if write access is requested (see below). Page faults use the generic mm page
-fault code path just like a CPU page fault.
+It will trigger a page fault on missing or read-only entries if write access is
+requested (see below). Page faults use the generic mm page fault code path just
+like a CPU page fault.
Both functions copy CPU page table entries into their pfns array argument. Each
entry in that array corresponds to an address in the virtual range. HMM
@@ -197,7 +195,7 @@ The usage pattern is::
again:
range.notifier_seq = mmu_interval_read_begin(&interval_sub);
down_read(&mm->mmap_sem);
- ret = hmm_range_fault(&range, HMM_RANGE_SNAPSHOT);
+ ret = hmm_range_fault(&range);
if (ret) {
up_read(&mm->mmap_sem);
if (ret == -EBUSY)
@@ -856,7 +856,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
down_read(&mm->mmap_sem);
- r = hmm_range_fault(range, 0);
+ r = hmm_range_fault(range);
up_read(&mm->mmap_sem);
if (unlikely(r <= 0)) {
/*
@@ -540,7 +540,7 @@ static int nouveau_range_fault(struct nouveau_svmm *svmm,
range.default_flags = 0;
range.pfn_flags_mask = -1UL;
down_read(&mm->mmap_sem);
- ret = hmm_range_fault(&range, 0);
+ ret = hmm_range_fault(&range);
up_read(&mm->mmap_sem);
if (ret <= 0) {
if (ret == 0 || ret == -EBUSY)
@@ -117,13 +117,10 @@ static inline struct page *hmm_device_entry_to_page(const struct hmm_range *rang
return pfn_to_page(entry >> range->pfn_shift);
}
-/* Don't fault in missing PTEs, just snapshot the current state. */
-#define HMM_FAULT_SNAPSHOT (1 << 1)
-
/*
* Please see Documentation/vm/hmm.rst for how to use the range API.
*/
-long hmm_range_fault(struct hmm_range *range, unsigned int flags);
+long hmm_range_fault(struct hmm_range *range);
/*
* HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
@@ -29,7 +29,6 @@
struct hmm_vma_walk {
struct hmm_range *range;
unsigned long last;
- unsigned int flags;
};
enum {
@@ -112,9 +111,6 @@ static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
{
struct hmm_range *range = hmm_vma_walk->range;
- if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
- return 0;
-
/*
* So we not only consider the individual per page request we also
* consider the default flags requested for the range. The API can
@@ -142,15 +138,27 @@ static unsigned int hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
return 0;
}
+/*
+ * If the valid flag is masked off, and default_flags doesn't set valid, then
+ * hmm_pte_need_fault() always returns 0.
+ */
+static bool hmm_can_fault(struct hmm_range *range)
+{
+ return ((range->flags[HMM_PFN_VALID] & range->pfn_flags_mask) |
+ range->default_flags) &
+ range->flags[HMM_PFN_VALID];
+}
+
static unsigned int
hmm_range_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
const uint64_t *pfns, unsigned long npages,
uint64_t cpu_flags)
{
+ struct hmm_range *range = hmm_vma_walk->range;
unsigned int required_fault = 0;
unsigned long i;
- if (hmm_vma_walk->flags & HMM_FAULT_SNAPSHOT)
+ if (!hmm_can_fault(range))
return 0;
for (i = 0; i < npages; ++i) {
@@ -557,7 +565,6 @@ static const struct mm_walk_ops hmm_walk_ops = {
/**
* hmm_range_fault - try to fault some address in a virtual address range
* @range: argument structure
- * @flags: HMM_FAULT_* flags
*
* Return: the number of valid pages in range->pfns[] (from range start
* address), which may be zero. On error one of the following status codes
@@ -579,12 +586,11 @@ static const struct mm_walk_ops hmm_walk_ops = {
* On error, for one virtual address in the range, the function will mark the
* corresponding HMM pfn entry with an error flag.
*/
-long hmm_range_fault(struct hmm_range *range, unsigned int flags)
+long hmm_range_fault(struct hmm_range *range)
{
struct hmm_vma_walk hmm_vma_walk = {
.range = range,
.last = range->start,
- .flags = flags,
};
struct mm_struct *mm = range->notifier->mm;
int ret;