@@ -44,91 +44,15 @@ struct pmem_device {
static int pmem_major;
static atomic_t pmem_index;
-/*
- * direct translation from (pmem,sector) => void*
- * We do not require that sector be page aligned.
- * The return value will point to the beginning of the page containing the
- * given sector, not to the sector itself.
- */
-static void *pmem_lookup_pg_addr(struct pmem_device *pmem, sector_t sector)
-{
- size_t page_offset = sector >> PAGE_SECTORS_SHIFT;
- size_t offset = page_offset << PAGE_SHIFT;
-
- BUG_ON(offset >= pmem->size);
- return pmem->virt_addr + offset;
-}
-
-/* sector must be page aligned */
-static unsigned long pmem_lookup_pfn(struct pmem_device *pmem, sector_t sector)
-{
- size_t page_offset = sector >> PAGE_SECTORS_SHIFT;
-
- BUG_ON(sector & (PAGE_SECTORS - 1));
- return (pmem->phys_addr >> PAGE_SHIFT) + page_offset;
-}
-
-/*
- * sector is not required to be page aligned.
- * n is at most a single page, but could be less.
- */
-static void copy_to_pmem(struct pmem_device *pmem, const void *src,
- sector_t sector, size_t n)
-{
- void *dst;
- unsigned int offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT;
- size_t copy;
-
- BUG_ON(n > PAGE_SIZE);
-
- copy = min_t(size_t, n, PAGE_SIZE - offset);
- dst = pmem_lookup_pg_addr(pmem, sector);
- memcpy(dst + offset, src, copy);
-
- if (copy < n) {
- src += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- dst = pmem_lookup_pg_addr(pmem, sector);
- memcpy(dst, src, copy);
- }
-}
-
-/*
- * sector is not required to be page aligned.
- * n is at most a single page, but could be less.
- */
-static void copy_from_pmem(void *dst, struct pmem_device *pmem,
- sector_t sector, size_t n)
-{
- void *src;
- unsigned int offset = (sector & (PAGE_SECTORS - 1)) << SECTOR_SHIFT;
- size_t copy;
-
- BUG_ON(n > PAGE_SIZE);
-
- copy = min_t(size_t, n, PAGE_SIZE - offset);
- src = pmem_lookup_pg_addr(pmem, sector);
-
- memcpy(dst, src + offset, copy);
-
- if (copy < n) {
- dst += copy;
- sector += copy >> SECTOR_SHIFT;
- copy = n - copy;
- src = pmem_lookup_pg_addr(pmem, sector);
- memcpy(dst, src, copy);
- }
-}
-
static void pmem_do_bvec(struct pmem_device *pmem, struct page *page,
unsigned int len, unsigned int off, int rw,
sector_t sector)
{
void *mem = kmap_atomic(page);
+ size_t pmem_off = sector << 9;
if (rw == READ) {
- copy_from_pmem(mem + off, pmem, sector, len);
+ memcpy(mem + off, pmem->virt_addr + pmem_off, len);
flush_dcache_page(page);
} else {
/*
@@ -136,7 +60,7 @@ static void pmem_do_bvec(struct pmem_device *pmem, struct page *page,
* NVDIMMs are actually durable before returning.
*/
flush_dcache_page(page);
- copy_to_pmem(pmem, mem + off, sector, len);
+ memcpy(pmem->virt_addr + pmem_off, mem + off, len);
}
kunmap_atomic(mem);
@@ -152,25 +76,32 @@ static void pmem_make_request(struct request_queue *q, struct bio *bio)
struct bvec_iter iter;
int err = 0;
- sector = bio->bi_iter.bi_sector;
if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) {
err = -EIO;
goto out;
}
- BUG_ON(bio->bi_rw & REQ_DISCARD);
+ if (WARN_ON(bio->bi_rw & REQ_DISCARD)) {
+ err = -EINVAL;
+ goto out;
+ }
rw = bio_rw(bio);
if (rw == READA)
rw = READ;
+ sector = bio->bi_iter.bi_sector;
bio_for_each_segment(bvec, bio, iter) {
- unsigned int len = bvec.bv_len;
-
- BUG_ON(len > PAGE_SIZE);
- pmem_do_bvec(pmem, bvec.bv_page, len,
- bvec.bv_offset, rw, sector);
- sector += len >> SECTOR_SHIFT;
+ /* NOTE: There is a legend saying that bv_len might be
+ * bigger than PAGE_SIZE in the case that bv_page points to
+ * a physical contiguous PFN set. But for us it is fine because
+ * it means the Kernel virtual mapping is also contiguous. And
+ * on the pmem side we are always contiguous both virtual and
+ * physical
+ */
+ pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len, bvec.bv_offset,
+ rw, sector);
+ sector += bvec.bv_len >> 9;
}
out:
@@ -191,14 +122,15 @@ static long pmem_direct_access(struct block_device *bdev, sector_t sector,
void **kaddr, unsigned long *pfn, long size)
{
struct pmem_device *pmem = bdev->bd_disk->private_data;
+ size_t offset = sector << 9;
if (!pmem)
return -ENODEV;
- *kaddr = pmem_lookup_pg_addr(pmem, sector);
- *pfn = pmem_lookup_pfn(pmem, sector);
+ *kaddr = pmem->virt_addr + offset;
+ *pfn = (pmem->phys_addr + offset) >> PAGE_SHIFT;
- return pmem->size - (sector * 512);
+ return pmem->size - offset;
}
static const struct block_device_operations pmem_fops = {
remove 89 lines of code to do a single memcpy. The reason this was so in brd (done badly BTW) is because destination memory is page-by-page based. With pmem we have the destination contiguous so we can do any size, in one go. [v2] Remove the BUG_ON checks on out of range IO. The core already does these checks and I did not see these checks done in other drivers. Signed-off-by: Boaz Harrosh <boaz@plexistor.com> --- drivers/block/pmem.c | 112 ++++++++++----------------------------------------- 1 file changed, 22 insertions(+), 90 deletions(-)