@@ -86,21 +86,29 @@ static void mktme_program_package(void *hw_program_info)
/* Program a KeyID across the entire system. */
static int mktme_program_system(struct mktme_key_program *key_program,
- cpumask_var_t mktme_cpumask)
+ cpumask_var_t mktme_cpumask, int hotplug)
{
struct mktme_hw_program_info info = {
.key_program = key_program,
.status = MKTME_PROG_SUCCESS,
};
- get_online_cpus();
- on_each_cpu_mask(mktme_cpumask, mktme_program_package, &info, 1);
- put_online_cpus();
+
+ if (!hotplug) {
+ get_online_cpus();
+ on_each_cpu_mask(mktme_cpumask, mktme_program_package,
+ &info, 1);
+ put_online_cpus();
+ } else {
+ on_each_cpu_mask(mktme_cpumask, mktme_program_package,
+ &info, 1);
+ }
return info.status;
}
/* Copy the payload to the HW programming structure and program this KeyID */
-static int mktme_program_keyid(int keyid, struct mktme_payload *payload)
+static int mktme_program_keyid(int keyid, struct mktme_payload *payload,
+ cpumask_var_t mask, int hotplug)
{
struct mktme_key_program *kprog = NULL;
u8 kern_entropy[MKTME_AES_XTS_SIZE];
@@ -124,7 +132,7 @@ static int mktme_program_keyid(int keyid, struct mktme_payload *payload)
kprog->key_field_2[i] ^= kern_entropy[i];
}
}
- ret = mktme_program_system(kprog, mktme_leadcpus);
+ ret = mktme_program_system(kprog, mktme_leadcpus, hotplug);
kmem_cache_free(mktme_prog_cache, kprog);
return ret;
}
@@ -173,7 +181,7 @@ static int mktme_update_key(struct key *key,
/* Forget if key was user type. */
clear_bit(keyid, mktme_bitmap_user_type);
- ret = mktme_program_keyid(keyid, payload);
+ ret = mktme_program_keyid(keyid, payload, mktme_leadcpus, 0);
if (ret != MKTME_PROG_SUCCESS) {
pr_debug("%s: %s\n", __func__, mktme_program_err[ret]);
ret = -ENOKEY;
@@ -202,7 +210,7 @@ int mktme_instantiate_key(struct key *key, struct key_preparsed_payload *prep)
ret = -ENOKEY;
goto out;
}
- ret = mktme_program_keyid(keyid, payload);
+ ret = mktme_program_keyid(keyid, payload, mktme_leadcpus, 0);
if (ret != MKTME_PROG_SUCCESS) {
pr_debug("%s: %s\n", __func__, mktme_program_err[ret]);
ret = -ENOKEY;
@@ -375,6 +383,10 @@ struct key_type key_type_mktme = {
.destroy = mktme_destroy_key,
};
+/*
+ * Build mktme_leadcpus mask to include one cpu per physical package.
+ * The mask is used to program the Key Table on each physical package.
+ */
static int mktme_build_leadcpus_mask(void)
{
int online_cpu, mktme_cpu;
@@ -397,6 +409,102 @@ static int mktme_build_leadcpus_mask(void)
return 0;
}
+/* A new packages Key Table is programmed with data saved in mktme_vault. */
+static int mktme_program_new_package(cpumask_var_t mask)
+{
+ struct key *key;
+ int hotplug = 1;
+ int keyid, ret;
+
+ /* When a KeyID slot is freed, it's corresponding Key is 0 */
+ for (keyid = 1; keyid <= mktme_nr_keyids; keyid++) {
+ key = mktme_map_key_from_keyid(keyid);
+ if (!key)
+ continue;
+ /* If one key fails to program, fail the entire package. */
+ ret = mktme_program_keyid(keyid, &mktme_vault[keyid],
+ mask, hotplug);
+ if (ret != MKTME_PROG_SUCCESS) {
+ pr_debug("%s: %s\n", __func__, mktme_program_err[ret]);
+ ret = -ENOKEY;
+ break;
+ }
+ }
+ return ret;
+}
+
+static int mktme_hotplug_cpu_startup(unsigned int cpu)
+{
+ int lead_cpu, ret = 0;
+ cpumask_var_t newmask;
+ int pkgid = topology_physical_package_id(cpu);
+
+ mktme_map_lock();
+
+ /* Nothing to do if a lead CPU exists for this package. */
+ for_each_cpu(lead_cpu, mktme_leadcpus)
+ if (topology_physical_package_id(lead_cpu) == pkgid)
+ goto out_unlock;
+
+ /* No keys to program. Just add the new lead CPU to mask. */
+ if (!mktme_map_mapped_keyids())
+ goto out_add_cpu;
+
+ /* Keys need to be programmed. Confirm programming can be done. */
+ if (!mktme_savekeys &&
+ (bitmap_weight(mktme_bitmap_user_type, mktme_nr_keyids))) {
+ ret = -EPERM;
+ goto out_unlock;
+ }
+
+ /* Program only this packages Key Table, not all Key Tables. */
+ if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
+ cpumask_set_cpu(cpu, newmask);
+ ret = mktme_program_new_package(newmask);
+ if (ret < 0) {
+ free_cpumask_var(newmask);
+ goto out_unlock;
+ }
+
+ free_cpumask_var(newmask);
+out_add_cpu:
+ /* Make this cpu a lead cpu for all future Key programming requests. */
+ cpumask_set_cpu(cpu, mktme_leadcpus);
+out_unlock:
+ mktme_map_unlock();
+ return ret;
+}
+
+static int mktme_hotplug_cpu_teardown(unsigned int cpu)
+{
+ int pkgid, online_cpu;
+
+ mktme_map_lock();
+ /* Teardown cpu is not a lead cpu, nothing to do. */
+ if (!cpumask_test_and_clear_cpu(cpu, mktme_leadcpus))
+ goto out;
+ /*
+ * Teardown cpu is a lead cpu. If the physical package
+ * is still present, pick a new lead cpu. Beware: the
+ * teardown cpu is still in the online_cpu mask. Do
+ * not pick it again.
+ */
+ pkgid = topology_physical_package_id(cpu);
+ for_each_online_cpu(online_cpu)
+ if (online_cpu != cpu &&
+ pkgid == topology_physical_package_id(online_cpu)) {
+ cpumask_set_cpu(online_cpu, mktme_leadcpus);
+ break;
+ }
+out:
+ mktme_map_unlock();
+ /* Teardowns always succeed. */
+ return 0;
+}
+
/*
* Allocate the global mktme_map structure based on the available keyids.
* Create a cache for the hardware structure. Initialize the encrypt_count
@@ -405,7 +513,7 @@ static int mktme_build_leadcpus_mask(void)
*/
static int __init init_mktme(void)
{
- int ret;
+ int ret, cpuhp;
/* Verify keys are present */
if (!(mktme_nr_keyids > 0))
@@ -433,10 +541,19 @@ static int __init init_mktme(void)
if (!mktme_vault)
goto free_bitmap;
+ cpuhp = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN,
+ "keys/mktme_keys:online",
+ mktme_hotplug_cpu_startup,
+ mktme_hotplug_cpu_teardown);
+ if (cpuhp < 0)
+ goto free_vault;
+
ret = register_key_type(&key_type_mktme);
if (!ret)
return ret; /* SUCCESS */
+ cpuhp_remove_state_nocalls(cpuhp);
+free_vault:
kfree(mktme_vault);
free_bitmap:
bitmap_free(mktme_bitmap_user_type);