@@ -490,10 +490,42 @@ when hardware support is available. This works in the following way:
keys being passed to these methods.
blk-crypto-fallback doesn't support hardware-wrapped keys. Therefore,
hardware-wrapped keys can only be used with actual inline encryption hardware.
+All the above deals with hardware-wrapped keys in ephemerally-wrapped form only.
+To get such keys in the first place, new block device ioctls have been added to
+provide a generic interface to creating and preparing such keys:
+
+- ``BLKCRYPTOIMPORTKEY`` converts a raw key to long-term wrapped form. It takes
+ in a pointer to a ``struct blk_crypto_import_key_arg``. The caller must set
+ ``raw_key_ptr`` and ``raw_key_size`` to the pointer and size (in bytes) of the
+ raw key to import. On success, ``BLKCRYPTOIMPORTKEY`` returns 0 and writes
+ the resulting long-term wrapped key blob to the buffer pointed to by
+ ``lt_key_ptr``, which is of maximum size ``lt_key_size``. It also updates
+ ``lt_key_size`` to be the actual size of the key. On failure, it returns -1
+ and sets errno.
+
+- ``BLKCRYPTOGENERATEKEY`` is like ``BLKCRYPTOIMPORTKEY``, but it has the
+ hardware generate the key instead of importing one. It takes in a pointer to
+ a ``struct blk_crypto_generate_key_arg``.
+
+- ``BLKCRYPTOPREPAREKEY`` converts a key from long-term wrapped form to
+ ephemerally-wrapped form. It takes in a pointer to a ``struct
+ blk_crypto_prepare_key_arg``. The caller must set ``lt_key_ptr`` and
+ ``lt_key_size`` to the pointer and size (in bytes) of the long-term wrapped
+ key blob to convert. On success, ``BLKCRYPTOPREPAREKEY`` returns 0 and writes
+ the resulting ephemerally-wrapped key blob to the buffer pointed to by
+ ``eph_key_ptr``, which is of maximum size ``eph_key_size``. It also updates
+ ``eph_key_size`` to be the actual size of the key. On failure, it returns -1
+ and sets errno.
+
+Userspace needs to use either ``BLKCRYPTOIMPORTKEY`` or ``BLKCRYPTOGENERATEKEY``
+once to create a key, and then ``BLKCRYPTOPREPAREKEY`` each time the key is
+unlocked and added to the kernel. Note that these ioctls have no relevance for
+raw keys; they are only for hardware-wrapped keys.
+
Testability
-----------
Both the hardware KDF and the inline encryption itself are well-defined
algorithms that don't depend on any secrets other than the unwrapped key.
@@ -83,10 +83,12 @@ Code Seq# Include File Comments
0x10 00-0F drivers/char/s390/vmcp.h
0x10 10-1F arch/s390/include/uapi/sclp_ctl.h
0x10 20-2F arch/s390/include/uapi/asm/hypfs.h
0x12 all linux/fs.h BLK* ioctls
linux/blkpg.h
+ linux/blkzoned.h
+ linux/blk-crypto.h
0x15 all linux/fs.h FS_IOC_* ioctls
0x1b all InfiniBand Subsystem
<http://infiniband.sourceforge.net/>
0x20 all drivers/cdrom/cm206.h
0x22 all scsi/sg.h
@@ -81,10 +81,13 @@ int __blk_crypto_evict_key(struct blk_crypto_profile *profile,
const struct blk_crypto_key *key);
bool __blk_crypto_cfg_supported(struct blk_crypto_profile *profile,
const struct blk_crypto_config *cfg);
+int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd,
+ void __user *argp);
+
#else /* CONFIG_BLK_INLINE_ENCRYPTION */
static inline int blk_crypto_sysfs_register(struct gendisk *disk)
{
return 0;
@@ -128,10 +131,16 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
static inline bool blk_crypto_rq_has_keyslot(struct request *rq)
{
return false;
}
+static inline int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd,
+ void __user *argp)
+{
+ return -ENOTTY;
+}
+
#endif /* CONFIG_BLK_INLINE_ENCRYPTION */
void __bio_crypt_advance(struct bio *bio, unsigned int bytes);
static inline void bio_crypt_advance(struct bio *bio, unsigned int bytes)
{
@@ -500,10 +500,67 @@ int blk_crypto_derive_sw_secret(struct block_device *bdev,
sw_secret);
blk_crypto_hw_exit(profile);
return err;
}
+int blk_crypto_import_key(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ int ret;
+
+ if (!profile)
+ return -EOPNOTSUPP;
+ if (!(profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED))
+ return -EOPNOTSUPP;
+ if (!profile->ll_ops.import_key)
+ return -EOPNOTSUPP;
+ blk_crypto_hw_enter(profile);
+ ret = profile->ll_ops.import_key(profile, raw_key, raw_key_size,
+ lt_key);
+ blk_crypto_hw_exit(profile);
+ return ret;
+}
+
+int blk_crypto_generate_key(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ int ret;
+
+ if (!profile)
+ return -EOPNOTSUPP;
+ if (!(profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED))
+ return -EOPNOTSUPP;
+ if (!profile->ll_ops.generate_key)
+ return -EOPNOTSUPP;
+
+ blk_crypto_hw_enter(profile);
+ ret = profile->ll_ops.generate_key(profile, lt_key);
+ blk_crypto_hw_exit(profile);
+ return ret;
+}
+
+int blk_crypto_prepare_key(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE])
+{
+ int ret;
+
+ if (!profile)
+ return -EOPNOTSUPP;
+ if (!(profile->key_types_supported & BLK_CRYPTO_KEY_TYPE_HW_WRAPPED))
+ return -EOPNOTSUPP;
+ if (!profile->ll_ops.prepare_key)
+ return -EOPNOTSUPP;
+
+ blk_crypto_hw_enter(profile);
+ ret = profile->ll_ops.prepare_key(profile, lt_key, lt_key_size,
+ eph_key);
+ blk_crypto_hw_exit(profile);
+ return ret;
+}
+
/**
* blk_crypto_intersect_capabilities() - restrict supported crypto capabilities
* by child device
* @parent: the crypto profile for the parent device
* @child: the crypto profile for the child device, or NULL
@@ -465,5 +465,148 @@ void blk_crypto_evict_key(struct block_device *bdev,
*/
if (err)
pr_warn_ratelimited("%pg: error %d evicting key\n", bdev, err);
}
EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
+
+static int blk_crypto_ioctl_import_key(struct blk_crypto_profile *profile,
+ void __user *argp)
+{
+ struct blk_crypto_import_key_arg arg;
+ u8 raw_key[BLK_CRYPTO_MAX_RAW_KEY_SIZE];
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE];
+ int ret;
+
+ if (copy_from_user(&arg, argp, sizeof(arg)))
+ return -EFAULT;
+
+ if (memchr_inv(arg.reserved, 0, sizeof(arg.reserved)))
+ return -EINVAL;
+
+ if (arg.raw_key_size < 16 || arg.raw_key_size > sizeof(raw_key))
+ return -EINVAL;
+
+ if (copy_from_user(raw_key, u64_to_user_ptr(arg.raw_key_ptr),
+ arg.raw_key_size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = blk_crypto_import_key(profile, raw_key, arg.raw_key_size, lt_key);
+ if (ret < 0)
+ goto out;
+ if (ret > arg.lt_key_size) {
+ ret = -EOVERFLOW;
+ goto out;
+ }
+ arg.lt_key_size = ret;
+ if (copy_to_user(u64_to_user_ptr(arg.lt_key_ptr), lt_key,
+ arg.lt_key_size) ||
+ copy_to_user(argp, &arg, sizeof(arg))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = 0;
+
+out:
+ memzero_explicit(raw_key, sizeof(raw_key));
+ memzero_explicit(lt_key, sizeof(lt_key));
+ return ret;
+}
+
+static int blk_crypto_ioctl_generate_key(struct blk_crypto_profile *profile,
+ void __user *argp)
+{
+ struct blk_crypto_generate_key_arg arg;
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE];
+ int ret;
+
+ if (copy_from_user(&arg, argp, sizeof(arg)))
+ return -EFAULT;
+
+ if (memchr_inv(arg.reserved, 0, sizeof(arg.reserved)))
+ return -EINVAL;
+
+ ret = blk_crypto_generate_key(profile, lt_key);
+ if (ret < 0)
+ goto out;
+ if (ret > arg.lt_key_size) {
+ ret = -EOVERFLOW;
+ goto out;
+ }
+ arg.lt_key_size = ret;
+ if (copy_to_user(u64_to_user_ptr(arg.lt_key_ptr), lt_key,
+ arg.lt_key_size) ||
+ copy_to_user(argp, &arg, sizeof(arg))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = 0;
+
+out:
+ memzero_explicit(lt_key, sizeof(lt_key));
+ return ret;
+}
+
+static int blk_crypto_ioctl_prepare_key(struct blk_crypto_profile *profile,
+ void __user *argp)
+{
+ struct blk_crypto_prepare_key_arg arg;
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE];
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE];
+ int ret;
+
+ if (copy_from_user(&arg, argp, sizeof(arg)))
+ return -EFAULT;
+
+ if (memchr_inv(arg.reserved, 0, sizeof(arg.reserved)))
+ return -EINVAL;
+
+ if (arg.lt_key_size > sizeof(lt_key))
+ return -EINVAL;
+
+ if (copy_from_user(lt_key, u64_to_user_ptr(arg.lt_key_ptr),
+ arg.lt_key_size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = blk_crypto_prepare_key(profile, lt_key, arg.lt_key_size, eph_key);
+ if (ret < 0)
+ goto out;
+ if (ret > arg.eph_key_size) {
+ ret = -EOVERFLOW;
+ goto out;
+ }
+ arg.eph_key_size = ret;
+ if (copy_to_user(u64_to_user_ptr(arg.eph_key_ptr), eph_key,
+ arg.eph_key_size) ||
+ copy_to_user(argp, &arg, sizeof(arg))) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = 0;
+
+out:
+ memzero_explicit(lt_key, sizeof(lt_key));
+ memzero_explicit(eph_key, sizeof(eph_key));
+ return ret;
+}
+
+int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd,
+ void __user *argp)
+{
+ struct blk_crypto_profile *profile =
+ bdev_get_queue(bdev)->crypto_profile;
+
+ if (!profile)
+ return -EOPNOTSUPP;
+
+ switch (cmd) {
+ case BLKCRYPTOIMPORTKEY:
+ return blk_crypto_ioctl_import_key(profile, argp);
+ case BLKCRYPTOGENERATEKEY:
+ return blk_crypto_ioctl_generate_key(profile, argp);
+ case BLKCRYPTOPREPAREKEY:
+ return blk_crypto_ioctl_prepare_key(profile, argp);
+ default:
+ return -ENOTTY;
+ }
+}
@@ -13,10 +13,11 @@
#include <linux/uaccess.h>
#include <linux/pagemap.h>
#include <linux/io_uring/cmd.h>
#include <uapi/linux/blkdev.h>
#include "blk.h"
+#include "blk-crypto-internal.h"
static int blkpg_do_ioctl(struct block_device *bdev,
struct blkpg_partition __user *upart, int op)
{
struct gendisk *disk = bdev->bd_disk;
@@ -618,10 +619,14 @@ static int blkdev_common_ioctl(struct block_device *bdev, blk_mode_t mode,
mode | BLK_OPEN_STRICT_SCAN);
case BLKTRACESTART:
case BLKTRACESTOP:
case BLKTRACETEARDOWN:
return blk_trace_ioctl(bdev, cmd, argp);
+ case BLKCRYPTOIMPORTKEY:
+ case BLKCRYPTOGENERATEKEY:
+ case BLKCRYPTOPREPAREKEY:
+ return blk_crypto_ioctl(bdev, cmd, argp);
case IOC_PR_REGISTER:
return blkdev_pr_register(bdev, mode, argp);
case IOC_PR_RESERVE:
return blkdev_pr_reserve(bdev, mode, argp);
case IOC_PR_RELEASE:
@@ -69,10 +69,52 @@ struct blk_crypto_ll_ops {
* -errno code on other errors.
*/
int (*derive_sw_secret)(struct blk_crypto_profile *profile,
const u8 *eph_key, size_t eph_key_size,
u8 sw_secret[BLK_CRYPTO_SW_SECRET_SIZE]);
+
+ /**
+ * @import_key: Create a hardware-wrapped key by importing a raw key.
+ *
+ * This only needs to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED
+ * is supported.
+ *
+ * On success, must write the new key in long-term wrapped form to
+ * @lt_key and return its size in bytes. On failure, must return a
+ * -errno value.
+ */
+ int (*import_key)(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+ /**
+ * @generate_key: Generate a hardware-wrapped key.
+ *
+ * This only needs to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED
+ * is supported.
+ *
+ * On success, must write the new key in long-term wrapped form to
+ * @lt_key and return its size in bytes. On failure, must return a
+ * -errno value.
+ */
+ int (*generate_key)(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+ /**
+ * @prepare_key: Prepare a hardware-wrapped key to be used.
+ *
+ * Prepare a hardware-wrapped key to be used by converting it from
+ * long-term wrapped form to ephemerally-wrapped form. This only needs
+ * to be implemented if BLK_CRYPTO_KEY_TYPE_HW_WRAPPED is supported.
+ *
+ * On success, must write the key in ephemerally-wrapped form to
+ * @eph_key and return its size in bytes. On failure, must return a
+ * -errno value.
+ */
+ int (*prepare_key)(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
};
/**
* struct blk_crypto_profile - inline encryption profile for a device
*
@@ -161,10 +203,21 @@ unsigned int blk_crypto_keyslot_index(struct blk_crypto_keyslot *slot);
void blk_crypto_reprogram_all_keys(struct blk_crypto_profile *profile);
void blk_crypto_profile_destroy(struct blk_crypto_profile *profile);
+int blk_crypto_import_key(struct blk_crypto_profile *profile,
+ const u8 *raw_key, size_t raw_key_size,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+int blk_crypto_generate_key(struct blk_crypto_profile *profile,
+ u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
+int blk_crypto_prepare_key(struct blk_crypto_profile *profile,
+ const u8 *lt_key, size_t lt_key_size,
+ u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE]);
+
void blk_crypto_intersect_capabilities(struct blk_crypto_profile *parent,
const struct blk_crypto_profile *child);
bool blk_crypto_has_capabilities(const struct blk_crypto_profile *target,
const struct blk_crypto_profile *reference);
@@ -6,10 +6,11 @@
#ifndef __LINUX_BLK_CRYPTO_H
#define __LINUX_BLK_CRYPTO_H
#include <linux/minmax.h>
#include <linux/types.h>
+#include <uapi/linux/blk-crypto.h>
enum blk_crypto_mode_num {
BLK_ENCRYPTION_MODE_INVALID,
BLK_ENCRYPTION_MODE_AES_256_XTS,
BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV,
new file mode 100644
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_BLK_CRYPTO_H
+#define _UAPI_LINUX_BLK_CRYPTO_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+struct blk_crypto_import_key_arg {
+ /* Raw key (input) */
+ __u64 raw_key_ptr;
+ __u64 raw_key_size;
+ /* Long-term wrapped key blob (output) */
+ __u64 lt_key_ptr;
+ __u64 lt_key_size;
+ __u64 reserved[4];
+};
+
+struct blk_crypto_generate_key_arg {
+ /* Long-term wrapped key blob (output) */
+ __u64 lt_key_ptr;
+ __u64 lt_key_size;
+ __u64 reserved[4];
+};
+
+struct blk_crypto_prepare_key_arg {
+ /* Long-term wrapped key blob (input) */
+ __u64 lt_key_ptr;
+ __u64 lt_key_size;
+ /* Ephemerally-wrapped key blob (output) */
+ __u64 eph_key_ptr;
+ __u64 eph_key_size;
+ __u64 reserved[4];
+};
+
+/*
+ * These ioctls share the block device ioctl space; see uapi/linux/fs.h.
+ * 140-141 are reserved for future blk-crypto ioctls; any more than that would
+ * require an additional allocation from the block device ioctl space.
+ */
+#define BLKCRYPTOIMPORTKEY _IOWR(0x12, 137, struct blk_crypto_import_key_arg)
+#define BLKCRYPTOGENERATEKEY _IOWR(0x12, 138, struct blk_crypto_generate_key_arg)
+#define BLKCRYPTOPREPAREKEY _IOWR(0x12, 139, struct blk_crypto_prepare_key_arg)
+
+#endif /* _UAPI_LINUX_BLK_CRYPTO_H */
@@ -201,14 +201,12 @@ struct fsxattr {
#define BLKDISCARDZEROES _IO(0x12,124)
#define BLKSECDISCARD _IO(0x12,125)
#define BLKROTATIONAL _IO(0x12,126)
#define BLKZEROOUT _IO(0x12,127)
#define BLKGETDISKSEQ _IOR(0x12,128,__u64)
-/*
- * A jump here: 130-136 are reserved for zoned block devices
- * (see uapi/linux/blkzoned.h)
- */
+/* 130-136 are used by zoned block device ioctls (uapi/linux/blkzoned.h) */
+/* 137-141 are used by blk-crypto ioctls (uapi/linux/blk-crypto.h) */
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
#define FIGETBSZ _IO(0x00,2) /* get the block size used for bmap */
#define FIFREEZE _IOWR('X', 119, int) /* Freeze */