@@ -102,6 +102,9 @@ struct iommufd_object *iommufd_get_object(struct iommufd_ctx *ictx, u32 id,
{
struct iommufd_object *obj;
+ if (iommufd_should_fail())
+ return ERR_PTR(-ENOENT);
+
xa_lock(&ictx->objects);
obj = xa_load(&ictx->objects, id);
if (!obj || (type != IOMMUFD_OBJ_ANY && obj->type != type) ||
@@ -80,6 +80,10 @@ static void *temp_kmalloc(size_t *size, void *backup, size_t backup_len)
if (*size < backup_len)
return backup;
+
+ if (!backup && iommufd_should_fail())
+ return NULL;
+
*size = min_t(size_t, *size, TEMP_MEMORY_LIMIT);
res = kmalloc(*size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
if (res)
@@ -482,6 +486,7 @@ static int pages_to_xarray(struct xarray *xa, unsigned long start_index,
unsigned long last_index, struct page **pages)
{
struct page **end_pages = pages + (last_index - start_index) + 1;
+ struct page **half_pages = pages + (end_pages - pages) / 2;
XA_STATE(xas, xa, start_index);
do {
@@ -489,6 +494,15 @@ static int pages_to_xarray(struct xarray *xa, unsigned long start_index,
xas_lock(&xas);
while (pages != end_pages) {
+ /* xarray does not participate in fault injection */
+ if (pages == half_pages && iommufd_should_fail()) {
+ xas_set_err(&xas, -EINVAL);
+ xas_unlock(&xas);
+ /* aka xas_destroy() */
+ xas_nomem(&xas, GFP_KERNEL);
+ goto err_clear;
+ }
+
old = xas_store(&xas, xa_mk_value(page_to_pfn(*pages)));
if (xas_error(&xas))
break;
@@ -499,6 +513,7 @@ static int pages_to_xarray(struct xarray *xa, unsigned long start_index,
xas_unlock(&xas);
} while (xas_nomem(&xas, GFP_KERNEL));
+err_clear:
if (xas_error(&xas)) {
if (xas.xa_index != start_index)
clear_xarray(xa, start_index, xas.xa_index - 1);
@@ -662,6 +677,10 @@ static int pfn_reader_user_pin(struct pfn_reader_user *user,
npages = min_t(unsigned long, last_index - start_index + 1,
user->upages_len / sizeof(*user->upages));
+
+ if (iommufd_should_fail())
+ return -EFAULT;
+
uptr = (uintptr_t)(pages->uptr + start_index * PAGE_SIZE);
if (!remote_mm)
rc = pin_user_pages_fast(uptr, npages, user->gup_flags,
@@ -806,6 +825,8 @@ static int pfn_reader_user_update_pinned(struct pfn_reader_user *user,
npages = pages->last_npinned - pages->npinned;
inc = false;
} else {
+ if (iommufd_should_fail())
+ return -ENOMEM;
npages = pages->npinned - pages->last_npinned;
inc = true;
}
@@ -1636,6 +1657,11 @@ static int iopt_pages_rw_page(struct iopt_pages *pages, unsigned long index,
return iopt_pages_rw_slow(pages, index, index, offset, data,
length, flags);
+ if (iommufd_should_fail()) {
+ rc = -EINVAL;
+ goto out_mmput;
+ }
+
mmap_read_lock(pages->source_mm);
rc = pin_user_pages_remote(
pages->source_mm, (uintptr_t)(pages->uptr + index * PAGE_SIZE),
This increases the coverage the fail_nth test gets, as well as via syzkaller. Signed-off-by: Jason Gunthorpe <jgg@nvidia.com> --- drivers/iommu/iommufd/main.c | 3 +++ drivers/iommu/iommufd/pages.c | 26 ++++++++++++++++++++++++++ 2 files changed, 29 insertions(+)