diff mbox series

[v7,1/4] file-posix: add tracking of the zone write pointers

Message ID 20230323051907.5948-2-faithilikerun@gmail.com (mailing list archive)
State New, archived
Headers show
Series Add zone append write for zoned device | expand

Commit Message

Sam Li March 23, 2023, 5:19 a.m. UTC
Since Linux doesn't have a user API to issue zone append operations to
zoned devices from user space, the file-posix driver is modified to add
zone append emulation using regular writes. To do this, the file-posix
driver tracks the wp location of all zones of the device. It uses an
array of uint64_t. The most significant bit of each wp location indicates
if the zone type is conventional zones.

The zones wp can be changed due to the following operations issued:
- zone reset: change the wp to the start offset of that zone
- zone finish: change to the end location of that zone
- write to a zone
- zone append

Signed-off-by: Sam Li <faithilikerun@gmail.com>
---
 block/file-posix.c               | 168 ++++++++++++++++++++++++++++++-
 include/block/block-common.h     |  14 +++
 include/block/block_int-common.h |   5 +
 3 files changed, 183 insertions(+), 4 deletions(-)

Comments

Stefan Hajnoczi April 3, 2023, 5:04 p.m. UTC | #1
On Thu, Mar 23, 2023 at 01:19:04PM +0800, Sam Li wrote:
> Since Linux doesn't have a user API to issue zone append operations to
> zoned devices from user space, the file-posix driver is modified to add
> zone append emulation using regular writes. To do this, the file-posix
> driver tracks the wp location of all zones of the device. It uses an
> array of uint64_t. The most significant bit of each wp location indicates
> if the zone type is conventional zones.
> 
> The zones wp can be changed due to the following operations issued:
> - zone reset: change the wp to the start offset of that zone
> - zone finish: change to the end location of that zone
> - write to a zone
> - zone append
> 
> Signed-off-by: Sam Li <faithilikerun@gmail.com>
> ---
>  block/file-posix.c               | 168 ++++++++++++++++++++++++++++++-
>  include/block/block-common.h     |  14 +++
>  include/block/block_int-common.h |   5 +
>  3 files changed, 183 insertions(+), 4 deletions(-)
> 
> diff --git a/block/file-posix.c b/block/file-posix.c
> index 65efe5147e..0fb425dcae 100644
> --- a/block/file-posix.c
> +++ b/block/file-posix.c
> @@ -1324,6 +1324,85 @@ static int hdev_get_max_segments(int fd, struct stat *st)
>  #endif
>  }
>  
> +#if defined(CONFIG_BLKZONED)
> +/*
> + * If the ra (reset_all) flag > 0, then the wp of that zone should be reset to
> + * the start sector. Else, take the real wp of the device.
> + */
> +static int get_zones_wp(int fd, BlockZoneWps *wps, int64_t offset,
> +                        unsigned int nrz, int ra) {

Please use bool for true/false and use clear variable names:
int ra -> bool reset_all

> +    struct blk_zone *blkz;
> +    size_t rep_size;
> +    uint64_t sector = offset >> BDRV_SECTOR_BITS;
> +    int ret, n = 0, i = 0;
> +    rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone);
> +    g_autofree struct blk_zone_report *rep = NULL;
> +
> +    rep = g_malloc(rep_size);
> +    blkz = (struct blk_zone *)(rep + 1);
> +    while (n < nrz) {
> +        memset(rep, 0, rep_size);
> +        rep->sector = sector;
> +        rep->nr_zones = nrz - n;
> +
> +        do {
> +            ret = ioctl(fd, BLKREPORTZONE, rep);
> +        } while (ret != 0 && errno == EINTR);
> +        if (ret != 0) {
> +            error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d",
> +                    fd, offset, errno);
> +            return -errno;
> +        }
> +
> +        if (!rep->nr_zones) {
> +            break;
> +        }
> +
> +        for (i = 0; i < rep->nr_zones; i++, n++) {
> +            /*
> +             * The wp tracking cares only about sequential writes required and
> +             * sequential write preferred zones so that the wp can advance to
> +             * the right location.
> +             * Use the most significant bit of the wp location to indicate the
> +             * zone type: 0 for SWR/SWP zones and 1 for conventional zones.
> +             */
> +            if (blkz[i].type == BLK_ZONE_TYPE_CONVENTIONAL) {
> +                wps->wp[i] &= 1ULL << 63;
> +            } else {
> +                switch(blkz[i].cond) {
> +                case BLK_ZONE_COND_FULL:
> +                case BLK_ZONE_COND_READONLY:
> +                    /* Zone not writable */
> +                    wps->wp[i] = (blkz[i].start + blkz[i].len) << BDRV_SECTOR_BITS;
> +                    break;
> +                case BLK_ZONE_COND_OFFLINE:
> +                    /* Zone not writable nor readable */
> +                    wps->wp[i] = (blkz[i].start) << BDRV_SECTOR_BITS;
> +                    break;
> +                default:
> +                    if (ra > 0) {
> +                        wps->wp[i] = blkz[i].start << BDRV_SECTOR_BITS;
> +                    } else {
> +                        wps->wp[i] = blkz[i].wp << BDRV_SECTOR_BITS;
> +                    }
> +                    break;
> +                }
> +            }
> +        }
> +        sector = blkz[i - 1].start + blkz[i - 1].len;
> +    }
> +
> +    return 0;
> +}
> +
> +static void update_zones_wp(int fd, BlockZoneWps *wps, int64_t offset,
> +                            unsigned int nrz) {

QEMU coding style puts the opening curly bracket on a new line:

  static void update_zones_wp(int fd, BlockZoneWps *wps, int64_t offset,
                              unsigned int nrz)
  {

> +    if (get_zones_wp(fd, wps, offset, nrz, 0) < 0) {
> +        error_report("update zone wp failed");
> +    }
> +}
> +#endif
> +
>  static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
>  {
>      BDRVRawState *s = bs->opaque;
> @@ -1413,6 +1492,21 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
>          if (ret >= 0) {
>              bs->bl.max_active_zones = ret;
>          }
> +
> +        ret = get_sysfs_long_val(&st, "physical_block_size");
> +        if (ret >= 0) {
> +            bs->bl.write_granularity = ret;
> +        }
> +
> +        bs->bl.wps = g_malloc(sizeof(BlockZoneWps) +
> +                sizeof(int64_t) * bs->bl.nr_zones);

This function can be called multiple times, so the old bs->bl.wps needs
to be freed to avoid a memory leak here.

> +        ret = get_zones_wp(s->fd, bs->bl.wps, 0, bs->bl.nr_zones, 0);
> +        if (ret < 0) {
> +            error_setg_errno(errp, -ret, "report wps failed");
> +            g_free(bs->bl.wps);

Please set it to NULL to reduce the risk of a double-free.

> +            return;
> +        }
> +        qemu_co_mutex_init(&bs->bl.wps->colock);

I just noticed there is a problem with keeping the mutex and
heap-allocated wps inside bs.bl. bdrv_refresh_limits does this:

  memset(&bs->bl, 0, sizeof(bs->bl));

It would be possible to exclude the wps and mutex from the memset, but
maybe they should be BlockDriverState fields instead. They are not
really limits.

>          return;
>      }
>  out:
> @@ -2338,9 +2432,15 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
>  {
>      BDRVRawState *s = bs->opaque;
>      RawPosixAIOData acb;
> +    int ret;
>  
>      if (fd_open(bs) < 0)
>          return -EIO;
> +#if defined(CONFIG_BLKZONED)
> +    if (type & QEMU_AIO_WRITE && bs->bl.wps) {
> +        qemu_co_mutex_lock(&bs->bl.wps->colock);
> +    }
> +#endif
>  
>      /*
>       * When using O_DIRECT, the request must be aligned to be able to use
> @@ -2354,14 +2454,16 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
>      } else if (s->use_linux_io_uring) {
>          LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
>          assert(qiov->size == bytes);
> -        return luring_co_submit(bs, aio, s->fd, offset, qiov, type);
> +        ret = luring_co_submit(bs, aio, s->fd, offset, qiov, type);
> +        goto out;
>  #endif
>  #ifdef CONFIG_LINUX_AIO
>      } else if (s->use_linux_aio) {
>          LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
>          assert(qiov->size == bytes);
> -        return laio_co_submit(bs, aio, s->fd, offset, qiov, type,
> +        ret = laio_co_submit(bs, aio, s->fd, offset, qiov, type,
>                                s->aio_max_batch);
> +        goto out;
>  #endif
>      }
>  
> @@ -2378,7 +2480,32 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
>      };
>  
>      assert(qiov->size == bytes);
> -    return raw_thread_pool_submit(bs, handle_aiocb_rw, &acb);
> +    ret = raw_thread_pool_submit(bs, handle_aiocb_rw, &acb);
> +
> +out:
> +#if defined(CONFIG_BLKZONED)
> +    BlockZoneWps *wps = bs->bl.wps;
> +    if (ret == 0) {
> +        if (type & QEMU_AIO_WRITE && wps && bs->bl.zone_size) {
> +            uint64_t *wp = &wps->wp[offset / bs->bl.zone_size];
> +            if (!BDRV_ZT_IS_CONV(*wp)) {
> +                /* Advance the wp if needed */
> +                if (offset + bytes > *wp) {
> +                    *wp = offset + bytes;
> +                }
> +            }
> +        }
> +    } else {
> +        if (type & QEMU_AIO_WRITE) {
> +            update_zones_wp(s->fd, bs->bl.wps, 0, 1);
> +        }
> +    }
> +
> +    if (type & QEMU_AIO_WRITE && wps) {
> +        qemu_co_mutex_unlock(&wps->colock);
> +    }
> +#endif
> +    return ret;
>  }
>  
>  static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
> @@ -2486,6 +2613,11 @@ static void raw_close(BlockDriverState *bs)
>      BDRVRawState *s = bs->opaque;
>  
>      if (s->fd >= 0) {
> +#if defined(CONFIG_BLKZONED)
> +        if (bs->bl.wps) {
> +            g_free(bs->bl.wps);
> +        }

The if statement can be replaced with an unconditional
g_free(bs->bl.wps) call. g_free(NULL) is valid and just returns
immediately.

> +#endif
>          qemu_close(s->fd);
>          s->fd = -1;
>      }
> @@ -3283,6 +3415,7 @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
>      const char *op_name;
>      unsigned long zo;
>      int ret;
> +    BlockZoneWps *wps = bs->bl.wps;
>      int64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS;
>  
>      zone_size = bs->bl.zone_size;
> @@ -3300,6 +3433,15 @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
>          return -EINVAL;
>      }
>  
> +    qemu_co_mutex_lock(&wps->colock);

I suggest using:

  QEMU_LOCK_GUARD(&wps->colock);
  ...

or:

  WITH_QEMU_LOCK_GUARD(&wps->colock) {
      ...
  }

instead of qemu_co_mutex_lock/unlock().

That way the lock is guaranteed to be unlocked when the function returns
and you don't need to convert the error code paths to use goto.

> +    uint32_t i = offset / bs->bl.zone_size;
> +    uint64_t *wp = &wps->wp[i];
> +    if (BDRV_ZT_IS_CONV(*wp) && len != capacity) {
> +        error_report("zone mgmt operations are not allowed for conventional zones");
> +        ret = -EIO;
> +        goto out;
> +    }
> +
>      switch (op) {
>      case BLK_ZO_OPEN:
>          op_name = "BLKOPENZONE";
> @@ -3319,7 +3461,8 @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
>          break;
>      default:
>          error_report("Unsupported zone op: 0x%x", op);
> -        return -ENOTSUP;
> +        ret = -ENOTSUP;
> +        goto out;
>      }
>  
>      acb = (RawPosixAIOData) {
> @@ -3337,10 +3480,27 @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
>                          len >> BDRV_SECTOR_BITS);
>      ret = raw_thread_pool_submit(bs, handle_aiocb_zone_mgmt, &acb);
>      if (ret != 0) {
> +        update_zones_wp(s->fd, wps, offset, i);


>          ret = -errno;
>          error_report("ioctl %s failed %d", op_name, ret);
> +        goto out;
> +    }
> +
> +    if (zo == BLKRESETZONE && len == capacity) {
> +        ret = get_zones_wp(s->fd, wps, 0, bs->bl.nr_zones, 1);
> +        if (ret < 0) {
> +            error_report("reporting single wp failed");
> +            return ret;
> +        }
> +    } else if (zo == BLKRESETZONE) {
> +        *wp = offset;
> +    } else if (zo == BLKFINISHZONE) {
> +        /* The zoned device allows the last zone smaller that the zone size. */
> +        *wp = offset + len;
>      }

The BLKRESETZONE and BLKFINISHZONE only update one zone's wp, but
[offset, offset+len) can cover multiple zones.

A loop is needed to update wps for multiple zones:

  } else if (zo == BLKRESETZONE) {
      for each zone {
          wp[i] = offset + i * zone_size;
      }
  } else if (zo == BLKFINISHZONE) {
      for each zone {
          /* The last zone may be short */
          wp[i] = MIN(offset + (i + 1) * zone_size, offset + len);
      }
  }

>  
> +out:
> +    qemu_co_mutex_unlock(&wps->colock);
>      return ret;
>  }
>  #endif
> diff --git a/include/block/block-common.h b/include/block/block-common.h
> index 1576fcf2ed..93196229ac 100644
> --- a/include/block/block-common.h
> +++ b/include/block/block-common.h
> @@ -118,6 +118,14 @@ typedef struct BlockZoneDescriptor {
>      BlockZoneState state;
>  } BlockZoneDescriptor;
>  
> +/*
> + * Track write pointers of a zone in bytes.
> + */
> +typedef struct BlockZoneWps {
> +    CoMutex colock;
> +    uint64_t wp[];
> +} BlockZoneWps;
> +
>  typedef struct BlockDriverInfo {
>      /* in bytes, 0 if irrelevant */
>      int cluster_size;
> @@ -240,6 +248,12 @@ typedef enum {
>  #define BDRV_SECTOR_BITS   9
>  #define BDRV_SECTOR_SIZE   (1ULL << BDRV_SECTOR_BITS)
>  
> +/*
> + * Get the first most significant bit of wp. If it is zero, then
> + * the zone type is SWR.
> + */
> +#define BDRV_ZT_IS_CONV(wp)    (wp & (1ULL << 63))
> +
>  #define BDRV_REQUEST_MAX_SECTORS MIN_CONST(SIZE_MAX >> BDRV_SECTOR_BITS, \
>                                             INT_MAX >> BDRV_SECTOR_BITS)
>  #define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS)
> diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h
> index 1bd2aef4d5..69d1c3e6dd 100644
> --- a/include/block/block_int-common.h
> +++ b/include/block/block_int-common.h
> @@ -884,6 +884,11 @@ typedef struct BlockLimits {
>  
>      /* maximum number of active zones */
>      int64_t max_active_zones;
> +
> +    /* array of write pointers' location of each zone in the zoned device. */
> +    BlockZoneWps *wps;
> +
> +    int64_t write_granularity;

What is this limit? Is it specific to zones?

>  } BlockLimits;
>  
>  typedef struct BdrvOpBlocker BdrvOpBlocker;
> -- 
> 2.39.2
>
Sam Li April 4, 2023, 2:38 p.m. UTC | #2
Stefan Hajnoczi <stefanha@redhat.com> 于2023年4月4日周二 01:04写道:
>
> On Thu, Mar 23, 2023 at 01:19:04PM +0800, Sam Li wrote:
> > Since Linux doesn't have a user API to issue zone append operations to
> > zoned devices from user space, the file-posix driver is modified to add
> > zone append emulation using regular writes. To do this, the file-posix
> > driver tracks the wp location of all zones of the device. It uses an
> > array of uint64_t. The most significant bit of each wp location indicates
> > if the zone type is conventional zones.
> >
> > The zones wp can be changed due to the following operations issued:
> > - zone reset: change the wp to the start offset of that zone
> > - zone finish: change to the end location of that zone
> > - write to a zone
> > - zone append
> >
> > Signed-off-by: Sam Li <faithilikerun@gmail.com>
> > ---
> >  block/file-posix.c               | 168 ++++++++++++++++++++++++++++++-
> >  include/block/block-common.h     |  14 +++
> >  include/block/block_int-common.h |   5 +
> >  3 files changed, 183 insertions(+), 4 deletions(-)
> >
> > diff --git a/block/file-posix.c b/block/file-posix.c
> > index 65efe5147e..0fb425dcae 100644
> > --- a/block/file-posix.c
> > +++ b/block/file-posix.c
> > @@ -1324,6 +1324,85 @@ static int hdev_get_max_segments(int fd, struct stat *st)
> >  #endif
> >  }
> >
> > +#if defined(CONFIG_BLKZONED)
> > +/*
> > + * If the ra (reset_all) flag > 0, then the wp of that zone should be reset to
> > + * the start sector. Else, take the real wp of the device.
> > + */
> > +static int get_zones_wp(int fd, BlockZoneWps *wps, int64_t offset,
> > +                        unsigned int nrz, int ra) {
>
> Please use bool for true/false and use clear variable names:
> int ra -> bool reset_all
>
> > +    struct blk_zone *blkz;
> > +    size_t rep_size;
> > +    uint64_t sector = offset >> BDRV_SECTOR_BITS;
> > +    int ret, n = 0, i = 0;
> > +    rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone);
> > +    g_autofree struct blk_zone_report *rep = NULL;
> > +
> > +    rep = g_malloc(rep_size);
> > +    blkz = (struct blk_zone *)(rep + 1);
> > +    while (n < nrz) {
> > +        memset(rep, 0, rep_size);
> > +        rep->sector = sector;
> > +        rep->nr_zones = nrz - n;
> > +
> > +        do {
> > +            ret = ioctl(fd, BLKREPORTZONE, rep);
> > +        } while (ret != 0 && errno == EINTR);
> > +        if (ret != 0) {
> > +            error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d",
> > +                    fd, offset, errno);
> > +            return -errno;
> > +        }
> > +
> > +        if (!rep->nr_zones) {
> > +            break;
> > +        }
> > +
> > +        for (i = 0; i < rep->nr_zones; i++, n++) {
> > +            /*
> > +             * The wp tracking cares only about sequential writes required and
> > +             * sequential write preferred zones so that the wp can advance to
> > +             * the right location.
> > +             * Use the most significant bit of the wp location to indicate the
> > +             * zone type: 0 for SWR/SWP zones and 1 for conventional zones.
> > +             */
> > +            if (blkz[i].type == BLK_ZONE_TYPE_CONVENTIONAL) {
> > +                wps->wp[i] &= 1ULL << 63;
> > +            } else {
> > +                switch(blkz[i].cond) {
> > +                case BLK_ZONE_COND_FULL:
> > +                case BLK_ZONE_COND_READONLY:
> > +                    /* Zone not writable */
> > +                    wps->wp[i] = (blkz[i].start + blkz[i].len) << BDRV_SECTOR_BITS;
> > +                    break;
> > +                case BLK_ZONE_COND_OFFLINE:
> > +                    /* Zone not writable nor readable */
> > +                    wps->wp[i] = (blkz[i].start) << BDRV_SECTOR_BITS;
> > +                    break;
> > +                default:
> > +                    if (ra > 0) {
> > +                        wps->wp[i] = blkz[i].start << BDRV_SECTOR_BITS;
> > +                    } else {
> > +                        wps->wp[i] = blkz[i].wp << BDRV_SECTOR_BITS;
> > +                    }
> > +                    break;
> > +                }
> > +            }
> > +        }
> > +        sector = blkz[i - 1].start + blkz[i - 1].len;
> > +    }
> > +
> > +    return 0;
> > +}
> > +
> > +static void update_zones_wp(int fd, BlockZoneWps *wps, int64_t offset,
> > +                            unsigned int nrz) {
>
> QEMU coding style puts the opening curly bracket on a new line:
>
>   static void update_zones_wp(int fd, BlockZoneWps *wps, int64_t offset,
>                               unsigned int nrz)
>   {
>
> > +    if (get_zones_wp(fd, wps, offset, nrz, 0) < 0) {
> > +        error_report("update zone wp failed");
> > +    }
> > +}
> > +#endif
> > +
> >  static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
> >  {
> >      BDRVRawState *s = bs->opaque;
> > @@ -1413,6 +1492,21 @@ static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
> >          if (ret >= 0) {
> >              bs->bl.max_active_zones = ret;
> >          }
> > +
> > +        ret = get_sysfs_long_val(&st, "physical_block_size");
> > +        if (ret >= 0) {
> > +            bs->bl.write_granularity = ret;
> > +        }
> > +
> > +        bs->bl.wps = g_malloc(sizeof(BlockZoneWps) +
> > +                sizeof(int64_t) * bs->bl.nr_zones);
>
> This function can be called multiple times, so the old bs->bl.wps needs
> to be freed to avoid a memory leak here.
>
> > +        ret = get_zones_wp(s->fd, bs->bl.wps, 0, bs->bl.nr_zones, 0);
> > +        if (ret < 0) {
> > +            error_setg_errno(errp, -ret, "report wps failed");
> > +            g_free(bs->bl.wps);
>
> Please set it to NULL to reduce the risk of a double-free.
>
> > +            return;
> > +        }
> > +        qemu_co_mutex_init(&bs->bl.wps->colock);
>
> I just noticed there is a problem with keeping the mutex and
> heap-allocated wps inside bs.bl. bdrv_refresh_limits does this:
>
>   memset(&bs->bl, 0, sizeof(bs->bl));
>
> It would be possible to exclude the wps and mutex from the memset, but
> maybe they should be BlockDriverState fields instead. They are not
> really limits.
>
> >          return;
> >      }
> >  out:
> > @@ -2338,9 +2432,15 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
> >  {
> >      BDRVRawState *s = bs->opaque;
> >      RawPosixAIOData acb;
> > +    int ret;
> >
> >      if (fd_open(bs) < 0)
> >          return -EIO;
> > +#if defined(CONFIG_BLKZONED)
> > +    if (type & QEMU_AIO_WRITE && bs->bl.wps) {
> > +        qemu_co_mutex_lock(&bs->bl.wps->colock);
> > +    }
> > +#endif
> >
> >      /*
> >       * When using O_DIRECT, the request must be aligned to be able to use
> > @@ -2354,14 +2454,16 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
> >      } else if (s->use_linux_io_uring) {
> >          LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
> >          assert(qiov->size == bytes);
> > -        return luring_co_submit(bs, aio, s->fd, offset, qiov, type);
> > +        ret = luring_co_submit(bs, aio, s->fd, offset, qiov, type);
> > +        goto out;
> >  #endif
> >  #ifdef CONFIG_LINUX_AIO
> >      } else if (s->use_linux_aio) {
> >          LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
> >          assert(qiov->size == bytes);
> > -        return laio_co_submit(bs, aio, s->fd, offset, qiov, type,
> > +        ret = laio_co_submit(bs, aio, s->fd, offset, qiov, type,
> >                                s->aio_max_batch);
> > +        goto out;
> >  #endif
> >      }
> >
> > @@ -2378,7 +2480,32 @@ static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
> >      };
> >
> >      assert(qiov->size == bytes);
> > -    return raw_thread_pool_submit(bs, handle_aiocb_rw, &acb);
> > +    ret = raw_thread_pool_submit(bs, handle_aiocb_rw, &acb);
> > +
> > +out:
> > +#if defined(CONFIG_BLKZONED)
> > +    BlockZoneWps *wps = bs->bl.wps;
> > +    if (ret == 0) {
> > +        if (type & QEMU_AIO_WRITE && wps && bs->bl.zone_size) {
> > +            uint64_t *wp = &wps->wp[offset / bs->bl.zone_size];
> > +            if (!BDRV_ZT_IS_CONV(*wp)) {
> > +                /* Advance the wp if needed */
> > +                if (offset + bytes > *wp) {
> > +                    *wp = offset + bytes;
> > +                }
> > +            }
> > +        }
> > +    } else {
> > +        if (type & QEMU_AIO_WRITE) {
> > +            update_zones_wp(s->fd, bs->bl.wps, 0, 1);
> > +        }
> > +    }
> > +
> > +    if (type & QEMU_AIO_WRITE && wps) {
> > +        qemu_co_mutex_unlock(&wps->colock);
> > +    }
> > +#endif
> > +    return ret;
> >  }
> >
> >  static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
> > @@ -2486,6 +2613,11 @@ static void raw_close(BlockDriverState *bs)
> >      BDRVRawState *s = bs->opaque;
> >
> >      if (s->fd >= 0) {
> > +#if defined(CONFIG_BLKZONED)
> > +        if (bs->bl.wps) {
> > +            g_free(bs->bl.wps);
> > +        }
>
> The if statement can be replaced with an unconditional
> g_free(bs->bl.wps) call. g_free(NULL) is valid and just returns
> immediately.
>
> > +#endif
> >          qemu_close(s->fd);
> >          s->fd = -1;
> >      }
> > @@ -3283,6 +3415,7 @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
> >      const char *op_name;
> >      unsigned long zo;
> >      int ret;
> > +    BlockZoneWps *wps = bs->bl.wps;
> >      int64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS;
> >
> >      zone_size = bs->bl.zone_size;
> > @@ -3300,6 +3433,15 @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
> >          return -EINVAL;
> >      }
> >
> > +    qemu_co_mutex_lock(&wps->colock);
>
> I suggest using:
>
>   QEMU_LOCK_GUARD(&wps->colock);
>   ...
>
> or:
>
>   WITH_QEMU_LOCK_GUARD(&wps->colock) {
>       ...
>   }
>
> instead of qemu_co_mutex_lock/unlock().
>
> That way the lock is guaranteed to be unlocked when the function returns
> and you don't need to convert the error code paths to use goto.
>
> > +    uint32_t i = offset / bs->bl.zone_size;
> > +    uint64_t *wp = &wps->wp[i];
> > +    if (BDRV_ZT_IS_CONV(*wp) && len != capacity) {
> > +        error_report("zone mgmt operations are not allowed for conventional zones");
> > +        ret = -EIO;
> > +        goto out;
> > +    }
> > +
> >      switch (op) {
> >      case BLK_ZO_OPEN:
> >          op_name = "BLKOPENZONE";
> > @@ -3319,7 +3461,8 @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
> >          break;
> >      default:
> >          error_report("Unsupported zone op: 0x%x", op);
> > -        return -ENOTSUP;
> > +        ret = -ENOTSUP;
> > +        goto out;
> >      }
> >
> >      acb = (RawPosixAIOData) {
> > @@ -3337,10 +3480,27 @@ static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
> >                          len >> BDRV_SECTOR_BITS);
> >      ret = raw_thread_pool_submit(bs, handle_aiocb_zone_mgmt, &acb);
> >      if (ret != 0) {
> > +        update_zones_wp(s->fd, wps, offset, i);
>
>
> >          ret = -errno;
> >          error_report("ioctl %s failed %d", op_name, ret);
> > +        goto out;
> > +    }
> > +
> > +    if (zo == BLKRESETZONE && len == capacity) {
> > +        ret = get_zones_wp(s->fd, wps, 0, bs->bl.nr_zones, 1);
> > +        if (ret < 0) {
> > +            error_report("reporting single wp failed");
> > +            return ret;
> > +        }
> > +    } else if (zo == BLKRESETZONE) {
> > +        *wp = offset;
> > +    } else if (zo == BLKFINISHZONE) {
> > +        /* The zoned device allows the last zone smaller that the zone size. */
> > +        *wp = offset + len;
> >      }
>
> The BLKRESETZONE and BLKFINISHZONE only update one zone's wp, but
> [offset, offset+len) can cover multiple zones.
>
> A loop is needed to update wps for multiple zones:
>
>   } else if (zo == BLKRESETZONE) {
>       for each zone {
>           wp[i] = offset + i * zone_size;
>       }
>   } else if (zo == BLKFINISHZONE) {
>       for each zone {
>           /* The last zone may be short */
>           wp[i] = MIN(offset + (i + 1) * zone_size, offset + len);
>       }
>   }

Thanks! Indeed, only zone_append and write can not advance to the next zone.

>
> >
> > +out:
> > +    qemu_co_mutex_unlock(&wps->colock);
> >      return ret;
> >  }
> >  #endif
> > diff --git a/include/block/block-common.h b/include/block/block-common.h
> > index 1576fcf2ed..93196229ac 100644
> > --- a/include/block/block-common.h
> > +++ b/include/block/block-common.h
> > @@ -118,6 +118,14 @@ typedef struct BlockZoneDescriptor {
> >      BlockZoneState state;
> >  } BlockZoneDescriptor;
> >
> > +/*
> > + * Track write pointers of a zone in bytes.
> > + */
> > +typedef struct BlockZoneWps {
> > +    CoMutex colock;
> > +    uint64_t wp[];
> > +} BlockZoneWps;
> > +
> >  typedef struct BlockDriverInfo {
> >      /* in bytes, 0 if irrelevant */
> >      int cluster_size;
> > @@ -240,6 +248,12 @@ typedef enum {
> >  #define BDRV_SECTOR_BITS   9
> >  #define BDRV_SECTOR_SIZE   (1ULL << BDRV_SECTOR_BITS)
> >
> > +/*
> > + * Get the first most significant bit of wp. If it is zero, then
> > + * the zone type is SWR.
> > + */
> > +#define BDRV_ZT_IS_CONV(wp)    (wp & (1ULL << 63))
> > +
> >  #define BDRV_REQUEST_MAX_SECTORS MIN_CONST(SIZE_MAX >> BDRV_SECTOR_BITS, \
> >                                             INT_MAX >> BDRV_SECTOR_BITS)
> >  #define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS)
> > diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h
> > index 1bd2aef4d5..69d1c3e6dd 100644
> > --- a/include/block/block_int-common.h
> > +++ b/include/block/block_int-common.h
> > @@ -884,6 +884,11 @@ typedef struct BlockLimits {
> >
> >      /* maximum number of active zones */
> >      int64_t max_active_zones;
> > +
> > +    /* array of write pointers' location of each zone in the zoned device. */
> > +    BlockZoneWps *wps;
> > +
> > +    int64_t write_granularity;
>
> What is this limit? Is it specific to zones?

It's the physical block size. Zone append write needs to check its io
size aligned with it.

>
> >  } BlockLimits;
> >
> >  typedef struct BdrvOpBlocker BdrvOpBlocker;
> > --
> > 2.39.2
> >
diff mbox series

Patch

diff --git a/block/file-posix.c b/block/file-posix.c
index 65efe5147e..0fb425dcae 100644
--- a/block/file-posix.c
+++ b/block/file-posix.c
@@ -1324,6 +1324,85 @@  static int hdev_get_max_segments(int fd, struct stat *st)
 #endif
 }
 
+#if defined(CONFIG_BLKZONED)
+/*
+ * If the ra (reset_all) flag > 0, then the wp of that zone should be reset to
+ * the start sector. Else, take the real wp of the device.
+ */
+static int get_zones_wp(int fd, BlockZoneWps *wps, int64_t offset,
+                        unsigned int nrz, int ra) {
+    struct blk_zone *blkz;
+    size_t rep_size;
+    uint64_t sector = offset >> BDRV_SECTOR_BITS;
+    int ret, n = 0, i = 0;
+    rep_size = sizeof(struct blk_zone_report) + nrz * sizeof(struct blk_zone);
+    g_autofree struct blk_zone_report *rep = NULL;
+
+    rep = g_malloc(rep_size);
+    blkz = (struct blk_zone *)(rep + 1);
+    while (n < nrz) {
+        memset(rep, 0, rep_size);
+        rep->sector = sector;
+        rep->nr_zones = nrz - n;
+
+        do {
+            ret = ioctl(fd, BLKREPORTZONE, rep);
+        } while (ret != 0 && errno == EINTR);
+        if (ret != 0) {
+            error_report("%d: ioctl BLKREPORTZONE at %" PRId64 " failed %d",
+                    fd, offset, errno);
+            return -errno;
+        }
+
+        if (!rep->nr_zones) {
+            break;
+        }
+
+        for (i = 0; i < rep->nr_zones; i++, n++) {
+            /*
+             * The wp tracking cares only about sequential writes required and
+             * sequential write preferred zones so that the wp can advance to
+             * the right location.
+             * Use the most significant bit of the wp location to indicate the
+             * zone type: 0 for SWR/SWP zones and 1 for conventional zones.
+             */
+            if (blkz[i].type == BLK_ZONE_TYPE_CONVENTIONAL) {
+                wps->wp[i] &= 1ULL << 63;
+            } else {
+                switch(blkz[i].cond) {
+                case BLK_ZONE_COND_FULL:
+                case BLK_ZONE_COND_READONLY:
+                    /* Zone not writable */
+                    wps->wp[i] = (blkz[i].start + blkz[i].len) << BDRV_SECTOR_BITS;
+                    break;
+                case BLK_ZONE_COND_OFFLINE:
+                    /* Zone not writable nor readable */
+                    wps->wp[i] = (blkz[i].start) << BDRV_SECTOR_BITS;
+                    break;
+                default:
+                    if (ra > 0) {
+                        wps->wp[i] = blkz[i].start << BDRV_SECTOR_BITS;
+                    } else {
+                        wps->wp[i] = blkz[i].wp << BDRV_SECTOR_BITS;
+                    }
+                    break;
+                }
+            }
+        }
+        sector = blkz[i - 1].start + blkz[i - 1].len;
+    }
+
+    return 0;
+}
+
+static void update_zones_wp(int fd, BlockZoneWps *wps, int64_t offset,
+                            unsigned int nrz) {
+    if (get_zones_wp(fd, wps, offset, nrz, 0) < 0) {
+        error_report("update zone wp failed");
+    }
+}
+#endif
+
 static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
 {
     BDRVRawState *s = bs->opaque;
@@ -1413,6 +1492,21 @@  static void raw_refresh_limits(BlockDriverState *bs, Error **errp)
         if (ret >= 0) {
             bs->bl.max_active_zones = ret;
         }
+
+        ret = get_sysfs_long_val(&st, "physical_block_size");
+        if (ret >= 0) {
+            bs->bl.write_granularity = ret;
+        }
+
+        bs->bl.wps = g_malloc(sizeof(BlockZoneWps) +
+                sizeof(int64_t) * bs->bl.nr_zones);
+        ret = get_zones_wp(s->fd, bs->bl.wps, 0, bs->bl.nr_zones, 0);
+        if (ret < 0) {
+            error_setg_errno(errp, -ret, "report wps failed");
+            g_free(bs->bl.wps);
+            return;
+        }
+        qemu_co_mutex_init(&bs->bl.wps->colock);
         return;
     }
 out:
@@ -2338,9 +2432,15 @@  static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
 {
     BDRVRawState *s = bs->opaque;
     RawPosixAIOData acb;
+    int ret;
 
     if (fd_open(bs) < 0)
         return -EIO;
+#if defined(CONFIG_BLKZONED)
+    if (type & QEMU_AIO_WRITE && bs->bl.wps) {
+        qemu_co_mutex_lock(&bs->bl.wps->colock);
+    }
+#endif
 
     /*
      * When using O_DIRECT, the request must be aligned to be able to use
@@ -2354,14 +2454,16 @@  static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
     } else if (s->use_linux_io_uring) {
         LuringState *aio = aio_get_linux_io_uring(bdrv_get_aio_context(bs));
         assert(qiov->size == bytes);
-        return luring_co_submit(bs, aio, s->fd, offset, qiov, type);
+        ret = luring_co_submit(bs, aio, s->fd, offset, qiov, type);
+        goto out;
 #endif
 #ifdef CONFIG_LINUX_AIO
     } else if (s->use_linux_aio) {
         LinuxAioState *aio = aio_get_linux_aio(bdrv_get_aio_context(bs));
         assert(qiov->size == bytes);
-        return laio_co_submit(bs, aio, s->fd, offset, qiov, type,
+        ret = laio_co_submit(bs, aio, s->fd, offset, qiov, type,
                               s->aio_max_batch);
+        goto out;
 #endif
     }
 
@@ -2378,7 +2480,32 @@  static int coroutine_fn raw_co_prw(BlockDriverState *bs, uint64_t offset,
     };
 
     assert(qiov->size == bytes);
-    return raw_thread_pool_submit(bs, handle_aiocb_rw, &acb);
+    ret = raw_thread_pool_submit(bs, handle_aiocb_rw, &acb);
+
+out:
+#if defined(CONFIG_BLKZONED)
+    BlockZoneWps *wps = bs->bl.wps;
+    if (ret == 0) {
+        if (type & QEMU_AIO_WRITE && wps && bs->bl.zone_size) {
+            uint64_t *wp = &wps->wp[offset / bs->bl.zone_size];
+            if (!BDRV_ZT_IS_CONV(*wp)) {
+                /* Advance the wp if needed */
+                if (offset + bytes > *wp) {
+                    *wp = offset + bytes;
+                }
+            }
+        }
+    } else {
+        if (type & QEMU_AIO_WRITE) {
+            update_zones_wp(s->fd, bs->bl.wps, 0, 1);
+        }
+    }
+
+    if (type & QEMU_AIO_WRITE && wps) {
+        qemu_co_mutex_unlock(&wps->colock);
+    }
+#endif
+    return ret;
 }
 
 static int coroutine_fn raw_co_preadv(BlockDriverState *bs, int64_t offset,
@@ -2486,6 +2613,11 @@  static void raw_close(BlockDriverState *bs)
     BDRVRawState *s = bs->opaque;
 
     if (s->fd >= 0) {
+#if defined(CONFIG_BLKZONED)
+        if (bs->bl.wps) {
+            g_free(bs->bl.wps);
+        }
+#endif
         qemu_close(s->fd);
         s->fd = -1;
     }
@@ -3283,6 +3415,7 @@  static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
     const char *op_name;
     unsigned long zo;
     int ret;
+    BlockZoneWps *wps = bs->bl.wps;
     int64_t capacity = bs->total_sectors << BDRV_SECTOR_BITS;
 
     zone_size = bs->bl.zone_size;
@@ -3300,6 +3433,15 @@  static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
         return -EINVAL;
     }
 
+    qemu_co_mutex_lock(&wps->colock);
+    uint32_t i = offset / bs->bl.zone_size;
+    uint64_t *wp = &wps->wp[i];
+    if (BDRV_ZT_IS_CONV(*wp) && len != capacity) {
+        error_report("zone mgmt operations are not allowed for conventional zones");
+        ret = -EIO;
+        goto out;
+    }
+
     switch (op) {
     case BLK_ZO_OPEN:
         op_name = "BLKOPENZONE";
@@ -3319,7 +3461,8 @@  static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
         break;
     default:
         error_report("Unsupported zone op: 0x%x", op);
-        return -ENOTSUP;
+        ret = -ENOTSUP;
+        goto out;
     }
 
     acb = (RawPosixAIOData) {
@@ -3337,10 +3480,27 @@  static int coroutine_fn raw_co_zone_mgmt(BlockDriverState *bs, BlockZoneOp op,
                         len >> BDRV_SECTOR_BITS);
     ret = raw_thread_pool_submit(bs, handle_aiocb_zone_mgmt, &acb);
     if (ret != 0) {
+        update_zones_wp(s->fd, wps, offset, i);
         ret = -errno;
         error_report("ioctl %s failed %d", op_name, ret);
+        goto out;
+    }
+
+    if (zo == BLKRESETZONE && len == capacity) {
+        ret = get_zones_wp(s->fd, wps, 0, bs->bl.nr_zones, 1);
+        if (ret < 0) {
+            error_report("reporting single wp failed");
+            return ret;
+        }
+    } else if (zo == BLKRESETZONE) {
+        *wp = offset;
+    } else if (zo == BLKFINISHZONE) {
+        /* The zoned device allows the last zone smaller that the zone size. */
+        *wp = offset + len;
     }
 
+out:
+    qemu_co_mutex_unlock(&wps->colock);
     return ret;
 }
 #endif
diff --git a/include/block/block-common.h b/include/block/block-common.h
index 1576fcf2ed..93196229ac 100644
--- a/include/block/block-common.h
+++ b/include/block/block-common.h
@@ -118,6 +118,14 @@  typedef struct BlockZoneDescriptor {
     BlockZoneState state;
 } BlockZoneDescriptor;
 
+/*
+ * Track write pointers of a zone in bytes.
+ */
+typedef struct BlockZoneWps {
+    CoMutex colock;
+    uint64_t wp[];
+} BlockZoneWps;
+
 typedef struct BlockDriverInfo {
     /* in bytes, 0 if irrelevant */
     int cluster_size;
@@ -240,6 +248,12 @@  typedef enum {
 #define BDRV_SECTOR_BITS   9
 #define BDRV_SECTOR_SIZE   (1ULL << BDRV_SECTOR_BITS)
 
+/*
+ * Get the first most significant bit of wp. If it is zero, then
+ * the zone type is SWR.
+ */
+#define BDRV_ZT_IS_CONV(wp)    (wp & (1ULL << 63))
+
 #define BDRV_REQUEST_MAX_SECTORS MIN_CONST(SIZE_MAX >> BDRV_SECTOR_BITS, \
                                            INT_MAX >> BDRV_SECTOR_BITS)
 #define BDRV_REQUEST_MAX_BYTES (BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS)
diff --git a/include/block/block_int-common.h b/include/block/block_int-common.h
index 1bd2aef4d5..69d1c3e6dd 100644
--- a/include/block/block_int-common.h
+++ b/include/block/block_int-common.h
@@ -884,6 +884,11 @@  typedef struct BlockLimits {
 
     /* maximum number of active zones */
     int64_t max_active_zones;
+
+    /* array of write pointers' location of each zone in the zoned device. */
+    BlockZoneWps *wps;
+
+    int64_t write_granularity;
 } BlockLimits;
 
 typedef struct BdrvOpBlocker BdrvOpBlocker;