@@ -83,18 +83,49 @@ bl_write_pagelist(struct pnfs_layout_type *lo,
return PNFS_NOT_ATTEMPTED;
}
+/* STUB */
+static void
+release_extents(struct pnfs_block_layout *bl,
+ struct nfs4_pnfs_layout_segment *range)
+{
+ return;
+}
+
+/* STUB */
+static void
+release_inval_marks(void)
+{
+ return;
+}
+
+/* Note we are relying on caller locking to prevent nasty races. */
static void
bl_free_layout(void *p)
{
+ struct pnfs_block_layout *bl = p;
+
dprintk("%s enter\n", __func__);
+ release_extents(bl, NULL);
+ release_inval_marks();
+ kfree(bl);
return;
}
static void *
bl_alloc_layout(struct pnfs_mount_type *mtype, struct inode *inode)
{
+ struct pnfs_block_layout *bl;
+
dprintk("%s enter\n", __func__);
- return NULL;
+ bl = kzalloc(sizeof(*bl), GFP_KERNEL);
+ if (!bl)
+ return NULL;
+ spin_lock_init(&bl->bl_ext_lock);
+ INIT_LIST_HEAD(&bl->bl_extents[0]);
+ INIT_LIST_HEAD(&bl->bl_extents[1]);
+ bl->bl_blocksize = NFS_SERVER(inode)->pnfs_blksize >> 9;
+ INIT_INVAL_MARKS(&bl->bl_inval, bl->bl_blocksize);
+ return bl;
}
static void
@@ -99,6 +99,31 @@ struct pnfs_blk_sig {
struct pnfs_blk_sig_comp si_comps[PNFS_BLOCK_MAX_SIG_COMP];
};
+struct pnfs_inval_markings {
+ /* STUB */
+};
+
+static inline void
+INIT_INVAL_MARKS(struct pnfs_inval_markings *marks, sector_t blocksize)
+{
+ /* STUB */
+}
+
+enum extentclass4 {
+ RW_EXTENT = 0, /* READWRTE and INVAL */
+ RO_EXTENT = 1, /* READ and NONE */
+ EXTENT_LISTS = 2,
+};
+
+struct pnfs_block_layout {
+ struct pnfs_inval_markings bl_inval; /* tracks INVAL->RW transition */
+ spinlock_t bl_ext_lock; /* Protects list manipulation */
+ struct list_head bl_extents[EXTENT_LISTS]; /* R and RW extents */
+ sector_t bl_blocksize; /* Server blocksize in sectors */
+};
+
+#define BLK_LO2EXT(lo) ((struct pnfs_block_layout *)lo->ld_data)
+
uint32_t *blk_overflow(uint32_t *p, uint32_t *end, size_t nbytes);
#define BLK_READBUF(p, e, nbytes) do { \