diff mbox series

[RFC,02/11] rust: add `pages` module for handling page allocation

Message ID 20230503090708.2524310-3-nmi@metaspace.dk (mailing list archive)
State New, archived
Headers show
Series Rust null block driver | expand

Commit Message

Andreas Hindborg May 3, 2023, 9:06 a.m. UTC
From: Andreas Hindborg <a.hindborg@samsung.com>

This patch adds support for working with pages of order 0. Support for pages
with higher order is deferred. Page allocation flags are fixed in this patch.
Future work might allow the user to specify allocation flags.

This patch is a heavily modified version of code available in the rust tree [1],
primarily adding support for multiple page mapping strategies.

[1] https://github.com/rust-for-Linux/linux/tree/bc22545f38d74473cfef3e9fd65432733435b79f/rust/kernel/pages.rs

Signed-off-by: Andreas Hindborg <a.hindborg@samsung.com>
---
 rust/helpers.c       |  31 +++++
 rust/kernel/lib.rs   |   6 +
 rust/kernel/pages.rs | 284 +++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 321 insertions(+)
 create mode 100644 rust/kernel/pages.rs

Comments

Benno Lossin May 3, 2023, 12:31 p.m. UTC | #1
On 03.05.23 11:06, Andreas Hindborg wrote:
> From: Andreas Hindborg <a.hindborg@samsung.com>
> 
> This patch adds support for working with pages of order 0. Support for pages
> with higher order is deferred. Page allocation flags are fixed in this patch.
> Future work might allow the user to specify allocation flags.
> 
> This patch is a heavily modified version of code available in the rust tree [1],
> primarily adding support for multiple page mapping strategies.
> 
> [1] https://github.com/rust-for-Linux/linux/tree/bc22545f38d74473cfef3e9fd65432733435b79f/rust/kernel/pages.rs
> 
> Signed-off-by: Andreas Hindborg <a.hindborg@samsung.com>
> ---
>  rust/helpers.c       |  31 +++++
>  rust/kernel/lib.rs   |   6 +
>  rust/kernel/pages.rs | 284 +++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 321 insertions(+)
>  create mode 100644 rust/kernel/pages.rs
> 
> diff --git a/rust/helpers.c b/rust/helpers.c
> index 5dd5e325b7cc..9bd9d95da951 100644
> --- a/rust/helpers.c
> +++ b/rust/helpers.c
> @@ -27,6 +27,7 @@
>  #include <linux/sched/signal.h>
>  #include <linux/wait.h>
>  #include <linux/radix-tree.h>
> +#include <linux/highmem.h>
> 
>  __noreturn void rust_helper_BUG(void)
>  {
> @@ -150,6 +151,36 @@ void **rust_helper_radix_tree_next_slot(void **slot,
>  }
>  EXPORT_SYMBOL_GPL(rust_helper_radix_tree_next_slot);
> 
> +void *rust_helper_kmap(struct page *page)
> +{
> +	return kmap(page);
> +}
> +EXPORT_SYMBOL_GPL(rust_helper_kmap);
> +
> +void rust_helper_kunmap(struct page *page)
> +{
> +	return kunmap(page);
> +}
> +EXPORT_SYMBOL_GPL(rust_helper_kunmap);
> +
> +void *rust_helper_kmap_atomic(struct page *page)
> +{
> +	return kmap_atomic(page);
> +}
> +EXPORT_SYMBOL_GPL(rust_helper_kmap_atomic);
> +
> +void rust_helper_kunmap_atomic(void *address)
> +{
> +	kunmap_atomic(address);
> +}
> +EXPORT_SYMBOL_GPL(rust_helper_kunmap_atomic);
> +
> +struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
> +{
> +	return alloc_pages(gfp_mask, order);
> +}
> +EXPORT_SYMBOL_GPL(rust_helper_alloc_pages);
> +
>  /*
>   * We use `bindgen`'s `--size_t-is-usize` option to bind the C `size_t` type
>   * as the Rust `usize` type, so we can use it in contexts where Rust
> diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
> index a85cb6aae8d6..8bef6686504b 100644
> --- a/rust/kernel/lib.rs
> +++ b/rust/kernel/lib.rs
> @@ -38,6 +38,7 @@ mod build_assert;
>  pub mod error;
>  pub mod init;
>  pub mod ioctl;
> +pub mod pages;
>  pub mod prelude;
>  pub mod print;
>  pub mod radix_tree;
> @@ -57,6 +58,11 @@ pub use uapi;
>  #[doc(hidden)]
>  pub use build_error::build_error;
> 
> +/// Page size defined in terms of the `PAGE_SHIFT` macro from C.
> #@
> #@ `PAGE_SHIFT` is not using a doc-link.
> #@
> +///
> +/// [`PAGE_SHIFT`]: ../../../include/asm-generic/page.h
> +pub const PAGE_SIZE: u32 = 1 << bindings::PAGE_SHIFT;
> #@
> #@ This should be of type `usize`.
> #@
> +
>  /// Prefix to appear before log messages printed from within the `kernel` crate.
>  const __LOG_PREFIX: &[u8] = b"rust_kernel\0";
> 
> diff --git a/rust/kernel/pages.rs b/rust/kernel/pages.rs
> new file mode 100644
> index 000000000000..ed51b053dd5d
> --- /dev/null
> +++ b/rust/kernel/pages.rs
> @@ -0,0 +1,284 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +//! Kernel page allocation and management.
> +//!
> +//! This module currently provides limited support. It supports pages of order 0
> +//! for most operations. Page allocation flags are fixed.
> +
> +use crate::{bindings, error::code::*, error::Result, PAGE_SIZE};
> +use core::{marker::PhantomData, ptr};
> +
> +/// A set of physical pages.
> +///
> +/// `Pages` holds a reference to a set of pages of order `ORDER`. Having the order as a generic
> +/// const allows the struct to have the same size as a pointer.
> #@
> #@ I would remove the 'Having the order as a...' sentence. Since that is
> #@ just implementation detail.
> #@
> +///
> +/// # Invariants
> +///
> +/// The pointer `Pages::pages` is valid and points to 2^ORDER pages.
> #@
> #@ `Pages::pages` -> `pages`.
> #@
> +pub struct Pages<const ORDER: u32> {
> +    pub(crate) pages: *mut bindings::page,
> +}
> +
> +impl<const ORDER: u32> Pages<ORDER> {
> +    /// Allocates a new set of contiguous pages.
> +    pub fn new() -> Result<Self> {
> +        let pages = unsafe {
> +            bindings::alloc_pages(
> +                bindings::GFP_KERNEL | bindings::__GFP_ZERO | bindings::___GFP_HIGHMEM,
> +                ORDER,
> +            )
> +        };
> #@
> #@ Missing `SAFETY` comment.
> #@
> +        if pages.is_null() {
> +            return Err(ENOMEM);
> +        }
> +        // INVARIANTS: We checked that the allocation above succeeded.
> +        // SAFETY: We allocated pages above
> +        Ok(unsafe { Self::from_raw(pages) })
> +    }
> +
> +    /// Create a `Pages` from a raw `struct page` pointer
> +    ///
> +    /// # Safety
> +    ///
> +    /// Caller must own the pages pointed to by `ptr` as these will be freed
> +    /// when the returned `Pages` is dropped.
> +    pub unsafe fn from_raw(ptr: *mut bindings::page) -> Self {
> +        Self { pages: ptr }
> +    }
> +}
> +
> +impl Pages<0> {
> +    #[inline(always)]
> #@
> #@ Is this really needed? I think this function should be inlined
> #@ automatically.
> #@
> +    fn check_offset_and_map<I: MappingInfo>(
> +        &self,
> +        offset: usize,
> +        len: usize,
> +    ) -> Result<PageMapping<'_, I>>
> +    where
> +        Pages<0>: MappingActions<I>,
> #@
> #@ Why not use `Self: MappingActions<I>`?
> #@
> +    {
> +        let end = offset.checked_add(len).ok_or(EINVAL)?;
> +        if end as u32 > PAGE_SIZE {
> #@
> #@ Remove the `as u32`, since `PAGE_SIZE` should be of type `usize`.
> #@
> +            return Err(EINVAL);
> #@
> #@ I think it would make sense to create a more descriptive Rust error with
> #@ a `From` impl to turn it into an `Error`. It always is better to know from
> #@ the signature what exactly can go wrong when calling a function.
> #@
> +        }
> +
> +        let mapping = <Self as MappingActions<I>>::map(self);
> +
> +        Ok(mapping)
> #@
> #@ I would merge these lines.
> #@
> +    }
> +
> +    #[inline(always)]
> +    unsafe fn read_internal<I: MappingInfo>(
> #@
> #@ Missing `# Safety` section.
> #@
> +        &self,
> +        dest: *mut u8,
> +        offset: usize,
> +        len: usize,
> +    ) -> Result
> +    where
> +        Pages<0>: MappingActions<I>,
> +    {
> +        let mapping = self.check_offset_and_map::<I>(offset, len)?;
> +
> +        unsafe { ptr::copy_nonoverlapping((mapping.ptr as *mut u8).add(offset), dest, len) };
> #@
> #@ Missing `SAFETY` comment. Replace `as *mut u8` with `.cast::<u8>()`.
> #@
> +        Ok(())
> +    }
> +
> +    /// Maps the pages and reads from them into the given buffer.
> +    ///
> +    /// # Safety
> +    ///
> +    /// Callers must ensure that the destination buffer is valid for the given
> +    /// length. Additionally, if the raw buffer is intended to be recast, they
> +    /// must ensure that the data can be safely cast;
> +    /// [`crate::io_buffer::ReadableFromBytes`] has more details about it.
> +    /// `dest` may not point to the source page.
> #@
> #@ - `dest` is valid for writes for `len`.
> #@ - What is meant by 'the raw buffer is intended to be recast'?
> #@ - `io_buffer` does not yet exist in `rust-next`.
> #@
> +    #[inline(always)]
> +    pub unsafe fn read(&self, dest: *mut u8, offset: usize, len: usize) -> Result {
> +        unsafe { self.read_internal::<NormalMappingInfo>(dest, offset, len) }
> #@
> #@ Missing `SAFETY` comment.
> #@
> +    }
> +
> +    /// Maps the pages and reads from them into the given buffer. The page is
> +    /// mapped atomically.
> +    ///
> +    /// # Safety
> +    ///
> +    /// Callers must ensure that the destination buffer is valid for the given
> +    /// length. Additionally, if the raw buffer is intended to be recast, they
> +    /// must ensure that the data can be safely cast;
> +    /// [`crate::io_buffer::ReadableFromBytes`] has more details about it.
> +    /// `dest` may not point to the source page.
> +    #[inline(always)]
> +    pub unsafe fn read_atomic(&self, dest: *mut u8, offset: usize, len: usize) -> Result {
> +        unsafe { self.read_internal::<AtomicMappingInfo>(dest, offset, len) }
> #@
> #@ Missing `SAFETY` comment.
> #@
> +    }
> +
> +    #[inline(always)]
> +    unsafe fn write_internal<I: MappingInfo>(
> #@
> #@ Missing `# Safety` section.
> #@
> +        &self,
> +        src: *const u8,
> +        offset: usize,
> +        len: usize,
> +    ) -> Result
> +    where
> +        Pages<0>: MappingActions<I>,
> +    {
> +        let mapping = self.check_offset_and_map::<I>(offset, len)?;
> +
> +        unsafe { ptr::copy_nonoverlapping(src, (mapping.ptr as *mut u8).add(offset), len) };
> #@
> #@ Missing `SAFETY` comment.
> #@
> +        Ok(())
> +    }
> +
> +    /// Maps the pages and writes into them from the given buffer.
> +    ///
> +    /// # Safety
> +    ///
> +    /// Callers must ensure that the buffer is valid for the given length.
> +    /// Additionally, if the page is (or will be) mapped by userspace, they must
> +    /// ensure that no kernel data is leaked through padding if it was cast from
> +    /// another type; [`crate::io_buffer::WritableToBytes`] has more details
> +    /// about it. `src` must not point to the destination page.
> #@
> #@ `src` is valid for reads for `len`.
> #@
> +    #[inline(always)]
> +    pub unsafe fn write(&self, src: *const u8, offset: usize, len: usize) -> Result {
> +        unsafe { self.write_internal::<NormalMappingInfo>(src, offset, len) }
> +    }
> +
> +    /// Maps the pages and writes into them from the given buffer. The page is
> +    /// mapped atomically.
> +    ///
> +    /// # Safety
> +    ///
> +    /// Callers must ensure that the buffer is valid for the given length.
> +    /// Additionally, if the page is (or will be) mapped by userspace, they must
> +    /// ensure that no kernel data is leaked through padding if it was cast from
> +    /// another type; [`crate::io_buffer::WritableToBytes`] has more details
> +    /// about it. `src` must not point to the destination page.
> +    #[inline(always)]
> +    pub unsafe fn write_atomic(&self, src: *const u8, offset: usize, len: usize) -> Result {
> +        unsafe { self.write_internal::<AtomicMappingInfo>(src, offset, len) }
> +    }
> +
> +    /// Maps the page at index 0.
> +    #[inline(always)]
> +    pub fn kmap(&self) -> PageMapping<'_, NormalMappingInfo> {
> +        let ptr = unsafe { bindings::kmap(self.pages) };
> #@
> #@ Missing `SAFETY` comment.
> #@
> +
> +        PageMapping {
> +            page: self.pages,
> +            ptr,
> +            _phantom: PhantomData,
> +            _phantom2: PhantomData,
> +        }
> +    }
> +
> +    /// Atomically Maps the page at index 0.
> +    #[inline(always)]
> +    pub fn kmap_atomic(&self) -> PageMapping<'_, AtomicMappingInfo> {
> +        let ptr = unsafe { bindings::kmap_atomic(self.pages) };
> #@
> #@ Missing `SAFETY` comment.
> #@
> +
> +        PageMapping {
> +            page: self.pages,
> +            ptr,
> +            _phantom: PhantomData,
> +            _phantom2: PhantomData,
> +        }
> +    }
> +}
> +
> +impl<const ORDER: u32> Drop for Pages<ORDER> {
> +    fn drop(&mut self) {
> +        // SAFETY: By the type invariants, we know the pages are allocated with the given order.
> +        unsafe { bindings::__free_pages(self.pages, ORDER) };
> +    }
> +}
> +
> +/// Specifies the type of page mapping
> +pub trait MappingInfo {}
> +
> +/// Encapsulates methods to map and unmap pages
> +pub trait MappingActions<I: MappingInfo>
> +where
> +    Pages<0>: MappingActions<I>,
> +{
> +    /// Map a page into the kernel address scpace
> #@
> #@ Typo.
> #@
> +    fn map(pages: &Pages<0>) -> PageMapping<'_, I>;
> +
> +    /// Unmap a page specified by `mapping`
> +    ///
> +    /// # Safety
> +    ///
> +    /// Must only be called by `PageMapping::drop()`.
> +    unsafe fn unmap(mapping: &PageMapping<'_, I>);
> +}
> +
> +/// A type state indicating that pages were mapped atomically
> +pub struct AtomicMappingInfo;
> +impl MappingInfo for AtomicMappingInfo {}
> +
> +/// A type state indicating that pages were not mapped atomically
> +pub struct NormalMappingInfo;
> +impl MappingInfo for NormalMappingInfo {}
> +
> +impl MappingActions<AtomicMappingInfo> for Pages<0> {
> +    #[inline(always)]
> +    fn map(pages: &Pages<0>) -> PageMapping<'_, AtomicMappingInfo> {
> +        pages.kmap_atomic()
> +    }
> +
> +    #[inline(always)]
> +    unsafe fn unmap(mapping: &PageMapping<'_, AtomicMappingInfo>) {
> +        // SAFETY: An instance of `PageMapping` is created only when `kmap` succeeded for the given
> +        // page, so it is safe to unmap it here.
> +        unsafe { bindings::kunmap_atomic(mapping.ptr) };
> +    }
> +}
> +
> +impl MappingActions<NormalMappingInfo> for Pages<0> {
> +    #[inline(always)]
> +    fn map(pages: &Pages<0>) -> PageMapping<'_, NormalMappingInfo> {
> +        pages.kmap()
> +    }
> +
> +    #[inline(always)]
> +    unsafe fn unmap(mapping: &PageMapping<'_, NormalMappingInfo>) {
> +        // SAFETY: An instance of `PageMapping` is created only when `kmap` succeeded for the given
> +        // page, so it is safe to unmap it here.
> +        unsafe { bindings::kunmap(mapping.page) };
> +    }
> +}
> #@
> #@ I am not sure if this is the best implementation, why do the `kmap` and
> #@ `kmap_atomic` functions exist? Would it not make sense to implement
> #@ them entirely in `MappingActions::map`?
> #@
> +
> +/// An owned page mapping. When this struct is dropped, the page is unmapped.
> +pub struct PageMapping<'a, I: MappingInfo>
> +where
> +    Pages<0>: MappingActions<I>,
> +{
> +    page: *mut bindings::page,
> +    ptr: *mut core::ffi::c_void,
> +    _phantom: PhantomData<&'a i32>,
> +    _phantom2: PhantomData<I>,
> +}
> +
> +impl<'a, I: MappingInfo> PageMapping<'a, I>
> +where
> +    Pages<0>: MappingActions<I>,
> +{
> +    /// Return a pointer to the wrapped `struct page`
> +    #[inline(always)]
> +    pub fn get_ptr(&self) -> *mut core::ffi::c_void {
> +        self.ptr
> +    }
> +}
> +
> +// Because we do not have Drop specialization, we have to do this dance. Life
> +// would be much more simple if we could have `impl Drop for PageMapping<'_,
> +// Atomic>` and `impl Drop for PageMapping<'_, NotAtomic>`
> +impl<I: MappingInfo> Drop for PageMapping<'_, I>
> +where
> +    Pages<0>: MappingActions<I>,
> +{
> +    #[inline(always)]
> +    fn drop(&mut self) {
> +        // SAFETY: We are OK to call this because we are `PageMapping::drop()`
> +        unsafe { <Pages<0> as MappingActions<I>>::unmap(self) }
> +    }
> +}
> --
> 2.40.0

Here are some more general things:
- I think we could use this as an opportunity to add more docs about how
  paging works, or at least add some links to the C documentation.
- Can we improve the paging API? I have not given it any thought yet, but
  the current API looks very primitive.
- Documentation comments should form complete sentences (so end with `.`).

--
Cheers,
Benno
Benno Lossin May 3, 2023, 12:38 p.m. UTC | #2
Sorry, forgot to replace `> #@` with nothing. Fixed here:

On 03.05.23 11:06, Andreas Hindborg wrote:
> From: Andreas Hindborg <a.hindborg@samsung.com>
>
> This patch adds support for working with pages of order 0. Support for pages
> with higher order is deferred. Page allocation flags are fixed in this patch.
> Future work might allow the user to specify allocation flags.
>
> This patch is a heavily modified version of code available in the rust tree [1],
> primarily adding support for multiple page mapping strategies.
>
> [1] https://github.com/rust-for-Linux/linux/tree/bc22545f38d74473cfef3e9fd65432733435b79f/rust/kernel/pages.rs
>
> Signed-off-by: Andreas Hindborg <a.hindborg@samsung.com>
> ---
>  rust/helpers.c       |  31 +++++
>  rust/kernel/lib.rs   |   6 +
>  rust/kernel/pages.rs | 284 +++++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 321 insertions(+)
>  create mode 100644 rust/kernel/pages.rs
>
> diff --git a/rust/helpers.c b/rust/helpers.c
> index 5dd5e325b7cc..9bd9d95da951 100644
> --- a/rust/helpers.c
> +++ b/rust/helpers.c
> @@ -27,6 +27,7 @@
>  #include <linux/sched/signal.h>
>  #include <linux/wait.h>
>  #include <linux/radix-tree.h>
> +#include <linux/highmem.h>
>
>  __noreturn void rust_helper_BUG(void)
>  {
> @@ -150,6 +151,36 @@ void **rust_helper_radix_tree_next_slot(void **slot,
>  }
>  EXPORT_SYMBOL_GPL(rust_helper_radix_tree_next_slot);
>
> +void *rust_helper_kmap(struct page *page)
> +{
> +	return kmap(page);
> +}
> +EXPORT_SYMBOL_GPL(rust_helper_kmap);
> +
> +void rust_helper_kunmap(struct page *page)
> +{
> +	return kunmap(page);
> +}
> +EXPORT_SYMBOL_GPL(rust_helper_kunmap);
> +
> +void *rust_helper_kmap_atomic(struct page *page)
> +{
> +	return kmap_atomic(page);
> +}
> +EXPORT_SYMBOL_GPL(rust_helper_kmap_atomic);
> +
> +void rust_helper_kunmap_atomic(void *address)
> +{
> +	kunmap_atomic(address);
> +}
> +EXPORT_SYMBOL_GPL(rust_helper_kunmap_atomic);
> +
> +struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
> +{
> +	return alloc_pages(gfp_mask, order);
> +}
> +EXPORT_SYMBOL_GPL(rust_helper_alloc_pages);
> +
>  /*
>   * We use `bindgen`'s `--size_t-is-usize` option to bind the C `size_t` type
>   * as the Rust `usize` type, so we can use it in contexts where Rust
> diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
> index a85cb6aae8d6..8bef6686504b 100644
> --- a/rust/kernel/lib.rs
> +++ b/rust/kernel/lib.rs
> @@ -38,6 +38,7 @@ mod build_assert;
>  pub mod error;
>  pub mod init;
>  pub mod ioctl;
> +pub mod pages;
>  pub mod prelude;
>  pub mod print;
>  pub mod radix_tree;
> @@ -57,6 +58,11 @@ pub use uapi;
>  #[doc(hidden)]
>  pub use build_error::build_error;
>
> +/// Page size defined in terms of the `PAGE_SHIFT` macro from C.

`PAGE_SHIFT` is not using a doc-link.

> +///
> +/// [`PAGE_SHIFT`]: ../../../include/asm-generic/page.h
> +pub const PAGE_SIZE: u32 = 1 << bindings::PAGE_SHIFT;

This should be of type `usize`.

> +
>  /// Prefix to appear before log messages printed from within the `kernel` crate.
>  const __LOG_PREFIX: &[u8] = b"rust_kernel\0";
>
> diff --git a/rust/kernel/pages.rs b/rust/kernel/pages.rs
> new file mode 100644
> index 000000000000..ed51b053dd5d
> --- /dev/null
> +++ b/rust/kernel/pages.rs
> @@ -0,0 +1,284 @@
> +// SPDX-License-Identifier: GPL-2.0
> +
> +//! Kernel page allocation and management.
> +//!
> +//! This module currently provides limited support. It supports pages of order 0
> +//! for most operations. Page allocation flags are fixed.
> +
> +use crate::{bindings, error::code::*, error::Result, PAGE_SIZE};
> +use core::{marker::PhantomData, ptr};
> +
> +/// A set of physical pages.
> +///
> +/// `Pages` holds a reference to a set of pages of order `ORDER`. Having the order as a generic
> +/// const allows the struct to have the same size as a pointer.

I would remove the 'Having the order as a...' sentence. Since that is
just implementation detail.

> +///
> +/// # Invariants
> +///
> +/// The pointer `Pages::pages` is valid and points to 2^ORDER pages.

`Pages::pages` -> `pages`.

> +pub struct Pages<const ORDER: u32> {
> +    pub(crate) pages: *mut bindings::page,
> +}
> +
> +impl<const ORDER: u32> Pages<ORDER> {
> +    /// Allocates a new set of contiguous pages.
> +    pub fn new() -> Result<Self> {
> +        let pages = unsafe {
> +            bindings::alloc_pages(
> +                bindings::GFP_KERNEL | bindings::__GFP_ZERO | bindings::___GFP_HIGHMEM,
> +                ORDER,
> +            )
> +        };

Missing `SAFETY` comment.

> +        if pages.is_null() {
> +            return Err(ENOMEM);
> +        }
> +        // INVARIANTS: We checked that the allocation above succeeded.
> +        // SAFETY: We allocated pages above
> +        Ok(unsafe { Self::from_raw(pages) })
> +    }
> +
> +    /// Create a `Pages` from a raw `struct page` pointer
> +    ///
> +    /// # Safety
> +    ///
> +    /// Caller must own the pages pointed to by `ptr` as these will be freed
> +    /// when the returned `Pages` is dropped.
> +    pub unsafe fn from_raw(ptr: *mut bindings::page) -> Self {
> +        Self { pages: ptr }
> +    }
> +}
> +
> +impl Pages<0> {
> +    #[inline(always)]

Is this really needed? I think this function should be inlined
automatically.

> +    fn check_offset_and_map<I: MappingInfo>(
> +        &self,
> +        offset: usize,
> +        len: usize,
> +    ) -> Result<PageMapping<'_, I>>
> +    where
> +        Pages<0>: MappingActions<I>,

Why not use `Self: MappingActions<I>`?

> +    {
> +        let end = offset.checked_add(len).ok_or(EINVAL)?;
> +        if end as u32 > PAGE_SIZE {

Remove the `as u32`, since `PAGE_SIZE` should be of type `usize`.

> +            return Err(EINVAL);

I think it would make sense to create a more descriptive Rust error with
a `From` impl to turn it into an `Error`. It always is better to know from
the signature what exactly can go wrong when calling a function.

> +        }
> +
> +        let mapping = <Self as MappingActions<I>>::map(self);
> +
> +        Ok(mapping)

I would merge these lines.

> +    }
> +
> +    #[inline(always)]
> +    unsafe fn read_internal<I: MappingInfo>(

Missing `# Safety` section.

> +        &self,
> +        dest: *mut u8,
> +        offset: usize,
> +        len: usize,
> +    ) -> Result
> +    where
> +        Pages<0>: MappingActions<I>,
> +    {
> +        let mapping = self.check_offset_and_map::<I>(offset, len)?;
> +
> +        unsafe { ptr::copy_nonoverlapping((mapping.ptr as *mut u8).add(offset), dest, len) };

Missing `SAFETY` comment. Replace `as *mut u8` with `.cast::<u8>()`.

> +        Ok(())
> +    }
> +
> +    /// Maps the pages and reads from them into the given buffer.
> +    ///
> +    /// # Safety
> +    ///
> +    /// Callers must ensure that the destination buffer is valid for the given
> +    /// length. Additionally, if the raw buffer is intended to be recast, they
> +    /// must ensure that the data can be safely cast;
> +    /// [`crate::io_buffer::ReadableFromBytes`] has more details about it.
> +    /// `dest` may not point to the source page.

- `dest` is valid for writes for `len`.
- What is meant by 'the raw buffer is intended to be recast'?
- `io_buffer` does not yet exist in `rust-next`.

> +    #[inline(always)]
> +    pub unsafe fn read(&self, dest: *mut u8, offset: usize, len: usize) -> Result {
> +        unsafe { self.read_internal::<NormalMappingInfo>(dest, offset, len) }

Missing `SAFETY` comment.

> +    }
> +
> +    /// Maps the pages and reads from them into the given buffer. The page is
> +    /// mapped atomically.
> +    ///
> +    /// # Safety
> +    ///
> +    /// Callers must ensure that the destination buffer is valid for the given
> +    /// length. Additionally, if the raw buffer is intended to be recast, they
> +    /// must ensure that the data can be safely cast;
> +    /// [`crate::io_buffer::ReadableFromBytes`] has more details about it.
> +    /// `dest` may not point to the source page.
> +    #[inline(always)]
> +    pub unsafe fn read_atomic(&self, dest: *mut u8, offset: usize, len: usize) -> Result {
> +        unsafe { self.read_internal::<AtomicMappingInfo>(dest, offset, len) }

Missing `SAFETY` comment.

> +    }
> +
> +    #[inline(always)]
> +    unsafe fn write_internal<I: MappingInfo>(

Missing `# Safety` section.

> +        &self,
> +        src: *const u8,
> +        offset: usize,
> +        len: usize,
> +    ) -> Result
> +    where
> +        Pages<0>: MappingActions<I>,
> +    {
> +        let mapping = self.check_offset_and_map::<I>(offset, len)?;
> +
> +        unsafe { ptr::copy_nonoverlapping(src, (mapping.ptr as *mut u8).add(offset), len) };

Missing `SAFETY` comment.

> +        Ok(())
> +    }
> +
> +    /// Maps the pages and writes into them from the given buffer.
> +    ///
> +    /// # Safety
> +    ///
> +    /// Callers must ensure that the buffer is valid for the given length.
> +    /// Additionally, if the page is (or will be) mapped by userspace, they must
> +    /// ensure that no kernel data is leaked through padding if it was cast from
> +    /// another type; [`crate::io_buffer::WritableToBytes`] has more details
> +    /// about it. `src` must not point to the destination page.

`src` is valid for reads for `len`.

> +    #[inline(always)]
> +    pub unsafe fn write(&self, src: *const u8, offset: usize, len: usize) -> Result {
> +        unsafe { self.write_internal::<NormalMappingInfo>(src, offset, len) }
> +    }
> +
> +    /// Maps the pages and writes into them from the given buffer. The page is
> +    /// mapped atomically.
> +    ///
> +    /// # Safety
> +    ///
> +    /// Callers must ensure that the buffer is valid for the given length.
> +    /// Additionally, if the page is (or will be) mapped by userspace, they must
> +    /// ensure that no kernel data is leaked through padding if it was cast from
> +    /// another type; [`crate::io_buffer::WritableToBytes`] has more details
> +    /// about it. `src` must not point to the destination page.
> +    #[inline(always)]
> +    pub unsafe fn write_atomic(&self, src: *const u8, offset: usize, len: usize) -> Result {
> +        unsafe { self.write_internal::<AtomicMappingInfo>(src, offset, len) }
> +    }
> +
> +    /// Maps the page at index 0.
> +    #[inline(always)]
> +    pub fn kmap(&self) -> PageMapping<'_, NormalMappingInfo> {
> +        let ptr = unsafe { bindings::kmap(self.pages) };

Missing `SAFETY` comment.

> +
> +        PageMapping {
> +            page: self.pages,
> +            ptr,
> +            _phantom: PhantomData,
> +            _phantom2: PhantomData,
> +        }
> +    }
> +
> +    /// Atomically Maps the page at index 0.
> +    #[inline(always)]
> +    pub fn kmap_atomic(&self) -> PageMapping<'_, AtomicMappingInfo> {
> +        let ptr = unsafe { bindings::kmap_atomic(self.pages) };

Missing `SAFETY` comment.

> +
> +        PageMapping {
> +            page: self.pages,
> +            ptr,
> +            _phantom: PhantomData,
> +            _phantom2: PhantomData,
> +        }
> +    }
> +}
> +
> +impl<const ORDER: u32> Drop for Pages<ORDER> {
> +    fn drop(&mut self) {
> +        // SAFETY: By the type invariants, we know the pages are allocated with the given order.
> +        unsafe { bindings::__free_pages(self.pages, ORDER) };
> +    }
> +}
> +
> +/// Specifies the type of page mapping
> +pub trait MappingInfo {}
> +
> +/// Encapsulates methods to map and unmap pages
> +pub trait MappingActions<I: MappingInfo>
> +where
> +    Pages<0>: MappingActions<I>,
> +{
> +    /// Map a page into the kernel address scpace

Typo.

> +    fn map(pages: &Pages<0>) -> PageMapping<'_, I>;
> +
> +    /// Unmap a page specified by `mapping`
> +    ///
> +    /// # Safety
> +    ///
> +    /// Must only be called by `PageMapping::drop()`.
> +    unsafe fn unmap(mapping: &PageMapping<'_, I>);
> +}
> +
> +/// A type state indicating that pages were mapped atomically
> +pub struct AtomicMappingInfo;
> +impl MappingInfo for AtomicMappingInfo {}
> +
> +/// A type state indicating that pages were not mapped atomically
> +pub struct NormalMappingInfo;
> +impl MappingInfo for NormalMappingInfo {}
> +
> +impl MappingActions<AtomicMappingInfo> for Pages<0> {
> +    #[inline(always)]
> +    fn map(pages: &Pages<0>) -> PageMapping<'_, AtomicMappingInfo> {
> +        pages.kmap_atomic()
> +    }
> +
> +    #[inline(always)]
> +    unsafe fn unmap(mapping: &PageMapping<'_, AtomicMappingInfo>) {
> +        // SAFETY: An instance of `PageMapping` is created only when `kmap` succeeded for the given
> +        // page, so it is safe to unmap it here.
> +        unsafe { bindings::kunmap_atomic(mapping.ptr) };
> +    }
> +}
> +
> +impl MappingActions<NormalMappingInfo> for Pages<0> {
> +    #[inline(always)]
> +    fn map(pages: &Pages<0>) -> PageMapping<'_, NormalMappingInfo> {
> +        pages.kmap()
> +    }
> +
> +    #[inline(always)]
> +    unsafe fn unmap(mapping: &PageMapping<'_, NormalMappingInfo>) {
> +        // SAFETY: An instance of `PageMapping` is created only when `kmap` succeeded for the given
> +        // page, so it is safe to unmap it here.
> +        unsafe { bindings::kunmap(mapping.page) };
> +    }
> +}

I am not sure if this is the best implementation, why do the `kmap` and
`kmap_atomic` functions exist? Would it not make sense to implement
them entirely in `MappingActions::map`?

> +
> +/// An owned page mapping. When this struct is dropped, the page is unmapped.
> +pub struct PageMapping<'a, I: MappingInfo>
> +where
> +    Pages<0>: MappingActions<I>,
> +{
> +    page: *mut bindings::page,
> +    ptr: *mut core::ffi::c_void,
> +    _phantom: PhantomData<&'a i32>,
> +    _phantom2: PhantomData<I>,
> +}
> +
> +impl<'a, I: MappingInfo> PageMapping<'a, I>
> +where
> +    Pages<0>: MappingActions<I>,
> +{
> +    /// Return a pointer to the wrapped `struct page`
> +    #[inline(always)]
> +    pub fn get_ptr(&self) -> *mut core::ffi::c_void {
> +        self.ptr
> +    }
> +}
> +
> +// Because we do not have Drop specialization, we have to do this dance. Life
> +// would be much more simple if we could have `impl Drop for PageMapping<'_,
> +// Atomic>` and `impl Drop for PageMapping<'_, NotAtomic>`
> +impl<I: MappingInfo> Drop for PageMapping<'_, I>
> +where
> +    Pages<0>: MappingActions<I>,
> +{
> +    #[inline(always)]
> +    fn drop(&mut self) {
> +        // SAFETY: We are OK to call this because we are `PageMapping::drop()`
> +        unsafe { <Pages<0> as MappingActions<I>>::unmap(self) }
> +    }
> +}
> --
> 2.40.0

Here are some more general things:
- I think we could use this as an opportunity to add more docs about how
  paging works, or at least add some links to the C documentation.
- Can we improve the paging API? I have not given it any thought yet, but
  the current API looks very primitive.
- Documentation comments should form complete sentences (so end with `.`).

--
Cheers,
Benno
Matthew Wilcox May 5, 2023, 4:09 a.m. UTC | #3
On Wed, May 03, 2023 at 11:06:59AM +0200, Andreas Hindborg wrote:
> From: Andreas Hindborg <a.hindborg@samsung.com>
> 
> This patch adds support for working with pages of order 0. Support for pages
> with higher order is deferred. Page allocation flags are fixed in this patch.
> Future work might allow the user to specify allocation flags.
> 
> This patch is a heavily modified version of code available in the rust tree [1],
> primarily adding support for multiple page mapping strategies.

This also seems misaligned with the direction of Linux development.
Folios are the future, pages are legacy.  Please, ask about what's
going on before wasting time on the past.
Andreas Hindborg May 5, 2023, 4:42 a.m. UTC | #4
Matthew Wilcox <willy@infradead.org> writes:

> On Wed, May 03, 2023 at 11:06:59AM +0200, Andreas Hindborg wrote:
>> From: Andreas Hindborg <a.hindborg@samsung.com>
>> 
>> This patch adds support for working with pages of order 0. Support for pages
>> with higher order is deferred. Page allocation flags are fixed in this patch.
>> Future work might allow the user to specify allocation flags.
>> 
>> This patch is a heavily modified version of code available in the rust tree [1],
>> primarily adding support for multiple page mapping strategies.
>
> This also seems misaligned with the direction of Linux development.
> Folios are the future, pages are legacy.  Please, ask about what's
> going on before wasting time on the past.

I see, thanks for the heads up! In this case I wanted to do an
apples-apples comparison to the C null_blk driver. Since that is using
kmap I wanted to have that. But let's bind to the folio_* APIs in the
future, that would make sense.

Best regards
Andreas
Matthew Wilcox May 5, 2023, 5:29 a.m. UTC | #5
On Fri, May 05, 2023 at 06:42:02AM +0200, Andreas Hindborg wrote:
> 
> Matthew Wilcox <willy@infradead.org> writes:
> 
> > On Wed, May 03, 2023 at 11:06:59AM +0200, Andreas Hindborg wrote:
> >> From: Andreas Hindborg <a.hindborg@samsung.com>
> >> 
> >> This patch adds support for working with pages of order 0. Support for pages
> >> with higher order is deferred. Page allocation flags are fixed in this patch.
> >> Future work might allow the user to specify allocation flags.
> >> 
> >> This patch is a heavily modified version of code available in the rust tree [1],
> >> primarily adding support for multiple page mapping strategies.
> >
> > This also seems misaligned with the direction of Linux development.
> > Folios are the future, pages are legacy.  Please, ask about what's
> > going on before wasting time on the past.
> 
> I see, thanks for the heads up! In this case I wanted to do an
> apples-apples comparison to the C null_blk driver. Since that is using
> kmap I wanted to have that. But let's bind to the folio_* APIs in the
> future, that would make sense.

Well, kmap() is essentially a no-op on 64-bit systems, so it's not
terribly relevant to doing a comparison.
diff mbox series

Patch

diff --git a/rust/helpers.c b/rust/helpers.c
index 5dd5e325b7cc..9bd9d95da951 100644
--- a/rust/helpers.c
+++ b/rust/helpers.c
@@ -27,6 +27,7 @@ 
 #include <linux/sched/signal.h>
 #include <linux/wait.h>
 #include <linux/radix-tree.h>
+#include <linux/highmem.h>
 
 __noreturn void rust_helper_BUG(void)
 {
@@ -150,6 +151,36 @@  void **rust_helper_radix_tree_next_slot(void **slot,
 }
 EXPORT_SYMBOL_GPL(rust_helper_radix_tree_next_slot);
 
+void *rust_helper_kmap(struct page *page)
+{
+	return kmap(page);
+}
+EXPORT_SYMBOL_GPL(rust_helper_kmap);
+
+void rust_helper_kunmap(struct page *page)
+{
+	return kunmap(page);
+}
+EXPORT_SYMBOL_GPL(rust_helper_kunmap);
+
+void *rust_helper_kmap_atomic(struct page *page)
+{
+	return kmap_atomic(page);
+}
+EXPORT_SYMBOL_GPL(rust_helper_kmap_atomic);
+
+void rust_helper_kunmap_atomic(void *address)
+{
+	kunmap_atomic(address);
+}
+EXPORT_SYMBOL_GPL(rust_helper_kunmap_atomic);
+
+struct page *rust_helper_alloc_pages(gfp_t gfp_mask, unsigned int order)
+{
+	return alloc_pages(gfp_mask, order);
+}
+EXPORT_SYMBOL_GPL(rust_helper_alloc_pages);
+
 /*
  * We use `bindgen`'s `--size_t-is-usize` option to bind the C `size_t` type
  * as the Rust `usize` type, so we can use it in contexts where Rust
diff --git a/rust/kernel/lib.rs b/rust/kernel/lib.rs
index a85cb6aae8d6..8bef6686504b 100644
--- a/rust/kernel/lib.rs
+++ b/rust/kernel/lib.rs
@@ -38,6 +38,7 @@  mod build_assert;
 pub mod error;
 pub mod init;
 pub mod ioctl;
+pub mod pages;
 pub mod prelude;
 pub mod print;
 pub mod radix_tree;
@@ -57,6 +58,11 @@  pub use uapi;
 #[doc(hidden)]
 pub use build_error::build_error;
 
+/// Page size defined in terms of the `PAGE_SHIFT` macro from C.
+///
+/// [`PAGE_SHIFT`]: ../../../include/asm-generic/page.h
+pub const PAGE_SIZE: u32 = 1 << bindings::PAGE_SHIFT;
+
 /// Prefix to appear before log messages printed from within the `kernel` crate.
 const __LOG_PREFIX: &[u8] = b"rust_kernel\0";
 
diff --git a/rust/kernel/pages.rs b/rust/kernel/pages.rs
new file mode 100644
index 000000000000..ed51b053dd5d
--- /dev/null
+++ b/rust/kernel/pages.rs
@@ -0,0 +1,284 @@ 
+// SPDX-License-Identifier: GPL-2.0
+
+//! Kernel page allocation and management.
+//!
+//! This module currently provides limited support. It supports pages of order 0
+//! for most operations. Page allocation flags are fixed.
+
+use crate::{bindings, error::code::*, error::Result, PAGE_SIZE};
+use core::{marker::PhantomData, ptr};
+
+/// A set of physical pages.
+///
+/// `Pages` holds a reference to a set of pages of order `ORDER`. Having the order as a generic
+/// const allows the struct to have the same size as a pointer.
+///
+/// # Invariants
+///
+/// The pointer `Pages::pages` is valid and points to 2^ORDER pages.
+pub struct Pages<const ORDER: u32> {
+    pub(crate) pages: *mut bindings::page,
+}
+
+impl<const ORDER: u32> Pages<ORDER> {
+    /// Allocates a new set of contiguous pages.
+    pub fn new() -> Result<Self> {
+        let pages = unsafe {
+            bindings::alloc_pages(
+                bindings::GFP_KERNEL | bindings::__GFP_ZERO | bindings::___GFP_HIGHMEM,
+                ORDER,
+            )
+        };
+        if pages.is_null() {
+            return Err(ENOMEM);
+        }
+        // INVARIANTS: We checked that the allocation above succeeded.
+        // SAFETY: We allocated pages above
+        Ok(unsafe { Self::from_raw(pages) })
+    }
+
+    /// Create a `Pages` from a raw `struct page` pointer
+    ///
+    /// # Safety
+    ///
+    /// Caller must own the pages pointed to by `ptr` as these will be freed
+    /// when the returned `Pages` is dropped.
+    pub unsafe fn from_raw(ptr: *mut bindings::page) -> Self {
+        Self { pages: ptr }
+    }
+}
+
+impl Pages<0> {
+    #[inline(always)]
+    fn check_offset_and_map<I: MappingInfo>(
+        &self,
+        offset: usize,
+        len: usize,
+    ) -> Result<PageMapping<'_, I>>
+    where
+        Pages<0>: MappingActions<I>,
+    {
+        let end = offset.checked_add(len).ok_or(EINVAL)?;
+        if end as u32 > PAGE_SIZE {
+            return Err(EINVAL);
+        }
+
+        let mapping = <Self as MappingActions<I>>::map(self);
+
+        Ok(mapping)
+    }
+
+    #[inline(always)]
+    unsafe fn read_internal<I: MappingInfo>(
+        &self,
+        dest: *mut u8,
+        offset: usize,
+        len: usize,
+    ) -> Result
+    where
+        Pages<0>: MappingActions<I>,
+    {
+        let mapping = self.check_offset_and_map::<I>(offset, len)?;
+
+        unsafe { ptr::copy_nonoverlapping((mapping.ptr as *mut u8).add(offset), dest, len) };
+        Ok(())
+    }
+
+    /// Maps the pages and reads from them into the given buffer.
+    ///
+    /// # Safety
+    ///
+    /// Callers must ensure that the destination buffer is valid for the given
+    /// length. Additionally, if the raw buffer is intended to be recast, they
+    /// must ensure that the data can be safely cast;
+    /// [`crate::io_buffer::ReadableFromBytes`] has more details about it.
+    /// `dest` may not point to the source page.
+    #[inline(always)]
+    pub unsafe fn read(&self, dest: *mut u8, offset: usize, len: usize) -> Result {
+        unsafe { self.read_internal::<NormalMappingInfo>(dest, offset, len) }
+    }
+
+    /// Maps the pages and reads from them into the given buffer. The page is
+    /// mapped atomically.
+    ///
+    /// # Safety
+    ///
+    /// Callers must ensure that the destination buffer is valid for the given
+    /// length. Additionally, if the raw buffer is intended to be recast, they
+    /// must ensure that the data can be safely cast;
+    /// [`crate::io_buffer::ReadableFromBytes`] has more details about it.
+    /// `dest` may not point to the source page.
+    #[inline(always)]
+    pub unsafe fn read_atomic(&self, dest: *mut u8, offset: usize, len: usize) -> Result {
+        unsafe { self.read_internal::<AtomicMappingInfo>(dest, offset, len) }
+    }
+
+    #[inline(always)]
+    unsafe fn write_internal<I: MappingInfo>(
+        &self,
+        src: *const u8,
+        offset: usize,
+        len: usize,
+    ) -> Result
+    where
+        Pages<0>: MappingActions<I>,
+    {
+        let mapping = self.check_offset_and_map::<I>(offset, len)?;
+
+        unsafe { ptr::copy_nonoverlapping(src, (mapping.ptr as *mut u8).add(offset), len) };
+        Ok(())
+    }
+
+    /// Maps the pages and writes into them from the given buffer.
+    ///
+    /// # Safety
+    ///
+    /// Callers must ensure that the buffer is valid for the given length.
+    /// Additionally, if the page is (or will be) mapped by userspace, they must
+    /// ensure that no kernel data is leaked through padding if it was cast from
+    /// another type; [`crate::io_buffer::WritableToBytes`] has more details
+    /// about it. `src` must not point to the destination page.
+    #[inline(always)]
+    pub unsafe fn write(&self, src: *const u8, offset: usize, len: usize) -> Result {
+        unsafe { self.write_internal::<NormalMappingInfo>(src, offset, len) }
+    }
+
+    /// Maps the pages and writes into them from the given buffer. The page is
+    /// mapped atomically.
+    ///
+    /// # Safety
+    ///
+    /// Callers must ensure that the buffer is valid for the given length.
+    /// Additionally, if the page is (or will be) mapped by userspace, they must
+    /// ensure that no kernel data is leaked through padding if it was cast from
+    /// another type; [`crate::io_buffer::WritableToBytes`] has more details
+    /// about it. `src` must not point to the destination page.
+    #[inline(always)]
+    pub unsafe fn write_atomic(&self, src: *const u8, offset: usize, len: usize) -> Result {
+        unsafe { self.write_internal::<AtomicMappingInfo>(src, offset, len) }
+    }
+
+    /// Maps the page at index 0.
+    #[inline(always)]
+    pub fn kmap(&self) -> PageMapping<'_, NormalMappingInfo> {
+        let ptr = unsafe { bindings::kmap(self.pages) };
+
+        PageMapping {
+            page: self.pages,
+            ptr,
+            _phantom: PhantomData,
+            _phantom2: PhantomData,
+        }
+    }
+
+    /// Atomically Maps the page at index 0.
+    #[inline(always)]
+    pub fn kmap_atomic(&self) -> PageMapping<'_, AtomicMappingInfo> {
+        let ptr = unsafe { bindings::kmap_atomic(self.pages) };
+
+        PageMapping {
+            page: self.pages,
+            ptr,
+            _phantom: PhantomData,
+            _phantom2: PhantomData,
+        }
+    }
+}
+
+impl<const ORDER: u32> Drop for Pages<ORDER> {
+    fn drop(&mut self) {
+        // SAFETY: By the type invariants, we know the pages are allocated with the given order.
+        unsafe { bindings::__free_pages(self.pages, ORDER) };
+    }
+}
+
+/// Specifies the type of page mapping
+pub trait MappingInfo {}
+
+/// Encapsulates methods to map and unmap pages
+pub trait MappingActions<I: MappingInfo>
+where
+    Pages<0>: MappingActions<I>,
+{
+    /// Map a page into the kernel address scpace
+    fn map(pages: &Pages<0>) -> PageMapping<'_, I>;
+
+    /// Unmap a page specified by `mapping`
+    ///
+    /// # Safety
+    ///
+    /// Must only be called by `PageMapping::drop()`.
+    unsafe fn unmap(mapping: &PageMapping<'_, I>);
+}
+
+/// A type state indicating that pages were mapped atomically
+pub struct AtomicMappingInfo;
+impl MappingInfo for AtomicMappingInfo {}
+
+/// A type state indicating that pages were not mapped atomically
+pub struct NormalMappingInfo;
+impl MappingInfo for NormalMappingInfo {}
+
+impl MappingActions<AtomicMappingInfo> for Pages<0> {
+    #[inline(always)]
+    fn map(pages: &Pages<0>) -> PageMapping<'_, AtomicMappingInfo> {
+        pages.kmap_atomic()
+    }
+
+    #[inline(always)]
+    unsafe fn unmap(mapping: &PageMapping<'_, AtomicMappingInfo>) {
+        // SAFETY: An instance of `PageMapping` is created only when `kmap` succeeded for the given
+        // page, so it is safe to unmap it here.
+        unsafe { bindings::kunmap_atomic(mapping.ptr) };
+    }
+}
+
+impl MappingActions<NormalMappingInfo> for Pages<0> {
+    #[inline(always)]
+    fn map(pages: &Pages<0>) -> PageMapping<'_, NormalMappingInfo> {
+        pages.kmap()
+    }
+
+    #[inline(always)]
+    unsafe fn unmap(mapping: &PageMapping<'_, NormalMappingInfo>) {
+        // SAFETY: An instance of `PageMapping` is created only when `kmap` succeeded for the given
+        // page, so it is safe to unmap it here.
+        unsafe { bindings::kunmap(mapping.page) };
+    }
+}
+
+/// An owned page mapping. When this struct is dropped, the page is unmapped.
+pub struct PageMapping<'a, I: MappingInfo>
+where
+    Pages<0>: MappingActions<I>,
+{
+    page: *mut bindings::page,
+    ptr: *mut core::ffi::c_void,
+    _phantom: PhantomData<&'a i32>,
+    _phantom2: PhantomData<I>,
+}
+
+impl<'a, I: MappingInfo> PageMapping<'a, I>
+where
+    Pages<0>: MappingActions<I>,
+{
+    /// Return a pointer to the wrapped `struct page`
+    #[inline(always)]
+    pub fn get_ptr(&self) -> *mut core::ffi::c_void {
+        self.ptr
+    }
+}
+
+// Because we do not have Drop specialization, we have to do this dance. Life
+// would be much more simple if we could have `impl Drop for PageMapping<'_,
+// Atomic>` and `impl Drop for PageMapping<'_, NotAtomic>`
+impl<I: MappingInfo> Drop for PageMapping<'_, I>
+where
+    Pages<0>: MappingActions<I>,
+{
+    #[inline(always)]
+    fn drop(&mut self) {
+        // SAFETY: We are OK to call this because we are `PageMapping::drop()`
+        unsafe { <Pages<0> as MappingActions<I>>::unmap(self) }
+    }
+}