diff mbox series

[v2,09/14] arm64: Enable memory encrypt for Realms

Message ID 20240412084213.1733764-10-steven.price@arm.com (mailing list archive)
State New, archived
Headers show
Series arm64: Support for running as a guest in Arm CCA | expand

Commit Message

Steven Price April 12, 2024, 8:42 a.m. UTC
From: Suzuki K Poulose <suzuki.poulose@arm.com>

Use the memory encryption APIs to trigger a RSI call to request a
transition between protected memory and shared memory (or vice versa)
and updating the kernel's linear map of modified pages to flip the top
bit of the IPA. This requires that block mappings are not used in the
direct map for realm guests.

Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Co-developed-by: Steven Price <steven.price@arm.com>
Signed-off-by: Steven Price <steven.price@arm.com>
---
 arch/arm64/Kconfig                   |  3 ++
 arch/arm64/include/asm/mem_encrypt.h | 19 +++++++++++
 arch/arm64/kernel/rsi.c              | 12 +++++++
 arch/arm64/mm/pageattr.c             | 48 ++++++++++++++++++++++++++--
 4 files changed, 79 insertions(+), 3 deletions(-)
 create mode 100644 arch/arm64/include/asm/mem_encrypt.h

Comments

kernel test robot April 15, 2024, 3:13 a.m. UTC | #1
Hi Steven,

kernel test robot noticed the following build errors:

[auto build test ERROR on arm64/for-next/core]
[also build test ERROR on kvmarm/next efi/next tip/irq/core linus/master v6.9-rc3 next-20240412]
[cannot apply to arnd-asm-generic/master]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch#_base_tree_information]

url:    https://github.com/intel-lab-lkp/linux/commits/Steven-Price/arm64-rsi-Add-RSI-definitions/20240412-164852
base:   https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git for-next/core
patch link:    https://lore.kernel.org/r/20240412084213.1733764-10-steven.price%40arm.com
patch subject: [PATCH v2 09/14] arm64: Enable memory encrypt for Realms
config: arm64-allyesconfig (https://download.01.org/0day-ci/archive/20240415/202404151003.vkNApJiS-lkp@intel.com/config)
compiler: clang version 19.0.0git (https://github.com/llvm/llvm-project 8b3b4a92adee40483c27f26c478a384cd69c6f05)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240415/202404151003.vkNApJiS-lkp@intel.com/reproduce)

If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202404151003.vkNApJiS-lkp@intel.com/

All errors (new ones prefixed by >>):

   In file included from drivers/hv/hv.c:13:
   In file included from include/linux/mm.h:2208:
   include/linux/vmstat.h:508:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     508 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     509 |                            item];
         |                            ~~~~
   include/linux/vmstat.h:515:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     515 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     516 |                            NR_VM_NUMA_EVENT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~~
   include/linux/vmstat.h:522:36: warning: arithmetic between different enumeration types ('enum node_stat_item' and 'enum lru_list') [-Wenum-enum-conversion]
     522 |         return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
         |                               ~~~~~~~~~~~ ^ ~~~
   include/linux/vmstat.h:527:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     527 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     528 |                            NR_VM_NUMA_EVENT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~~
   include/linux/vmstat.h:536:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     536 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     537 |                            NR_VM_NUMA_EVENT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~~
>> drivers/hv/hv.c:132:10: error: call to undeclared function 'set_memory_decrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
     132 |                         ret = set_memory_decrypted((unsigned long)hv_cpu->post_msg_page, 1);
         |                               ^
   drivers/hv/hv.c:168:10: error: call to undeclared function 'set_memory_decrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
     168 |                         ret = set_memory_decrypted((unsigned long)
         |                               ^
>> drivers/hv/hv.c:218:11: error: call to undeclared function 'set_memory_encrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
     218 |                                 ret = set_memory_encrypted((unsigned long)
         |                                       ^
   drivers/hv/hv.c:230:11: error: call to undeclared function 'set_memory_encrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
     230 |                                 ret = set_memory_encrypted((unsigned long)
         |                                       ^
   drivers/hv/hv.c:239:11: error: call to undeclared function 'set_memory_encrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
     239 |                                 ret = set_memory_encrypted((unsigned long)
         |                                       ^
   5 warnings and 5 errors generated.
--
   In file included from drivers/hv/connection.c:16:
   In file included from include/linux/mm.h:2208:
   include/linux/vmstat.h:508:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     508 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     509 |                            item];
         |                            ~~~~
   include/linux/vmstat.h:515:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     515 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     516 |                            NR_VM_NUMA_EVENT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~~
   include/linux/vmstat.h:522:36: warning: arithmetic between different enumeration types ('enum node_stat_item' and 'enum lru_list') [-Wenum-enum-conversion]
     522 |         return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
         |                               ~~~~~~~~~~~ ^ ~~~
   include/linux/vmstat.h:527:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     527 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     528 |                            NR_VM_NUMA_EVENT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~~
   include/linux/vmstat.h:536:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     536 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     537 |                            NR_VM_NUMA_EVENT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~~
>> drivers/hv/connection.c:236:8: error: call to undeclared function 'set_memory_decrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
     236 |         ret = set_memory_decrypted((unsigned long)
         |               ^
>> drivers/hv/connection.c:340:2: error: call to undeclared function 'set_memory_encrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
     340 |         set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[0], 1);
         |         ^
   5 warnings and 2 errors generated.
--
   In file included from drivers/hv/channel.c:14:
   In file included from include/linux/mm.h:2208:
   include/linux/vmstat.h:508:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     508 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     509 |                            item];
         |                            ~~~~
   include/linux/vmstat.h:515:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     515 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     516 |                            NR_VM_NUMA_EVENT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~~
   include/linux/vmstat.h:522:36: warning: arithmetic between different enumeration types ('enum node_stat_item' and 'enum lru_list') [-Wenum-enum-conversion]
     522 |         return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
         |                               ~~~~~~~~~~~ ^ ~~~
   include/linux/vmstat.h:527:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     527 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     528 |                            NR_VM_NUMA_EVENT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~~
   include/linux/vmstat.h:536:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
     536 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~ ^
     537 |                            NR_VM_NUMA_EVENT_ITEMS +
         |                            ~~~~~~~~~~~~~~~~~~~~~~
>> drivers/hv/channel.c:442:8: error: call to undeclared function 'set_memory_decrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
     442 |         ret = set_memory_decrypted((unsigned long)kbuffer,
         |               ^
>> drivers/hv/channel.c:531:3: error: call to undeclared function 'set_memory_encrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
     531 |                 set_memory_encrypted((unsigned long)kbuffer,
         |                 ^
   drivers/hv/channel.c:848:8: error: call to undeclared function 'set_memory_encrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
     848 |         ret = set_memory_encrypted((unsigned long)gpadl->buffer,
         |               ^
   5 warnings and 3 errors generated.


vim +/set_memory_decrypted +132 drivers/hv/hv.c

3e7ee4902fe699 drivers/staging/hv/Hv.c Hank Janssen      2009-07-13   96  
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19   97  int hv_synic_alloc(void)
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19   98  {
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18   99  	int cpu, ret = -ENOMEM;
f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  100  	struct hv_per_cpu_context *hv_cpu;
f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  101  
f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  102  	/*
f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  103  	 * First, zero all per-cpu memory areas so hv_synic_free() can
f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  104  	 * detect what memory has been allocated and cleanup properly
f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  105  	 * after any failures.
f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  106  	 */
f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  107  	for_each_present_cpu(cpu) {
f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  108  		hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  109  		memset(hv_cpu, 0, sizeof(*hv_cpu));
f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  110  	}
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  111  
6396bb221514d2 drivers/hv/hv.c         Kees Cook         2018-06-12  112  	hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
597ff72f3de850 drivers/hv/hv.c         Jia-Ju Bai        2018-03-04  113  					 GFP_KERNEL);
9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  114  	if (hv_context.hv_numa_map == NULL) {
9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  115  		pr_err("Unable to allocate NUMA map\n");
9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  116  		goto err;
9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  117  	}
9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  118  
421b8f20d3c381 drivers/hv/hv.c         Vitaly Kuznetsov  2016-12-07  119  	for_each_present_cpu(cpu) {
f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  120  		hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  121  
37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  122  		tasklet_init(&hv_cpu->msg_dpc,
37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  123  			     vmbus_on_msg_dpc, (unsigned long) hv_cpu);
37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  124  
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  125  		if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  126  			hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  127  			if (hv_cpu->post_msg_page == NULL) {
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  128  				pr_err("Unable to allocate post msg page\n");
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  129  				goto err;
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  130  			}
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  131  
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24 @132  			ret = set_memory_decrypted((unsigned long)hv_cpu->post_msg_page, 1);
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  133  			if (ret) {
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  134  				pr_err("Failed to decrypt post msg page: %d\n", ret);
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  135  				/* Just leak the page, as it's unsafe to free the page. */
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  136  				hv_cpu->post_msg_page = NULL;
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  137  				goto err;
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  138  			}
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  139  
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  140  			memset(hv_cpu->post_msg_page, 0, PAGE_SIZE);
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  141  		}
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  142  
faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  143  		/*
faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  144  		 * Synic message and event pages are allocated by paravisor.
faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  145  		 * Skip these pages allocation here.
faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  146  		 */
d3a9d7e49d1531 drivers/hv/hv.c         Dexuan Cui        2023-08-24  147  		if (!ms_hyperv.paravisor_present && !hv_root_partition) {
37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  148  			hv_cpu->synic_message_page =
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  149  				(void *)get_zeroed_page(GFP_ATOMIC);
37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  150  			if (hv_cpu->synic_message_page == NULL) {
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  151  				pr_err("Unable to allocate SYNIC message page\n");
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  152  				goto err;
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  153  			}
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  154  
faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  155  			hv_cpu->synic_event_page =
faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  156  				(void *)get_zeroed_page(GFP_ATOMIC);
37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  157  			if (hv_cpu->synic_event_page == NULL) {
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  158  				pr_err("Unable to allocate SYNIC event page\n");
68f2f2bc163d44 drivers/hv/hv.c         Dexuan Cui        2023-08-24  159  
68f2f2bc163d44 drivers/hv/hv.c         Dexuan Cui        2023-08-24  160  				free_page((unsigned long)hv_cpu->synic_message_page);
68f2f2bc163d44 drivers/hv/hv.c         Dexuan Cui        2023-08-24  161  				hv_cpu->synic_message_page = NULL;
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  162  				goto err;
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  163  			}
faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  164  		}
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  165  
68f2f2bc163d44 drivers/hv/hv.c         Dexuan Cui        2023-08-24  166  		if (!ms_hyperv.paravisor_present &&
e3131f1c81448a drivers/hv/hv.c         Dexuan Cui        2023-08-24  167  		    (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  168  			ret = set_memory_decrypted((unsigned long)
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  169  				hv_cpu->synic_message_page, 1);
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  170  			if (ret) {
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  171  				pr_err("Failed to decrypt SYNIC msg page: %d\n", ret);
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  172  				hv_cpu->synic_message_page = NULL;
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  173  
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  174  				/*
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  175  				 * Free the event page here so that hv_synic_free()
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  176  				 * won't later try to re-encrypt it.
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  177  				 */
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  178  				free_page((unsigned long)hv_cpu->synic_event_page);
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  179  				hv_cpu->synic_event_page = NULL;
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  180  				goto err;
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  181  			}
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  182  
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  183  			ret = set_memory_decrypted((unsigned long)
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  184  				hv_cpu->synic_event_page, 1);
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  185  			if (ret) {
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  186  				pr_err("Failed to decrypt SYNIC event page: %d\n", ret);
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  187  				hv_cpu->synic_event_page = NULL;
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  188  				goto err;
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  189  			}
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  190  
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  191  			memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  192  			memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  193  		}
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  194  	}
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  195  
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  196  	return 0;
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  197  
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  198  err:
572086325ce9a9 drivers/hv/hv.c         Michael Kelley    2018-08-02  199  	/*
572086325ce9a9 drivers/hv/hv.c         Michael Kelley    2018-08-02  200  	 * Any memory allocations that succeeded will be freed when
572086325ce9a9 drivers/hv/hv.c         Michael Kelley    2018-08-02  201  	 * the caller cleans up by calling hv_synic_free()
572086325ce9a9 drivers/hv/hv.c         Michael Kelley    2018-08-02  202  	 */
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  203  	return ret;
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  204  }
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  205  
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  206  
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  207  void hv_synic_free(void)
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  208  {
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  209  	int cpu, ret;
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  210  
37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  211  	for_each_present_cpu(cpu) {
37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  212  		struct hv_per_cpu_context *hv_cpu
37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  213  			= per_cpu_ptr(hv_context.cpu_context, cpu);
37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  214  
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  215  		/* It's better to leak the page if the encryption fails. */
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  216  		if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  217  			if (hv_cpu->post_msg_page) {
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24 @218  				ret = set_memory_encrypted((unsigned long)
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  219  					hv_cpu->post_msg_page, 1);
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  220  				if (ret) {
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  221  					pr_err("Failed to encrypt post msg page: %d\n", ret);
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  222  					hv_cpu->post_msg_page = NULL;
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  223  				}
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  224  			}
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  225  		}
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  226  
68f2f2bc163d44 drivers/hv/hv.c         Dexuan Cui        2023-08-24  227  		if (!ms_hyperv.paravisor_present &&
e3131f1c81448a drivers/hv/hv.c         Dexuan Cui        2023-08-24  228  		    (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  229  			if (hv_cpu->synic_message_page) {
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  230  				ret = set_memory_encrypted((unsigned long)
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  231  					hv_cpu->synic_message_page, 1);
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  232  				if (ret) {
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  233  					pr_err("Failed to encrypt SYNIC msg page: %d\n", ret);
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  234  					hv_cpu->synic_message_page = NULL;
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  235  				}
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  236  			}
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  237  
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  238  			if (hv_cpu->synic_event_page) {
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  239  				ret = set_memory_encrypted((unsigned long)
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  240  					hv_cpu->synic_event_page, 1);
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  241  				if (ret) {
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  242  					pr_err("Failed to encrypt SYNIC event page: %d\n", ret);
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  243  					hv_cpu->synic_event_page = NULL;
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  244  				}
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  245  			}
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  246  		}
193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  247  
23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  248  		free_page((unsigned long)hv_cpu->post_msg_page);
37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  249  		free_page((unsigned long)hv_cpu->synic_event_page);
37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  250  		free_page((unsigned long)hv_cpu->synic_message_page);
37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  251  	}
37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  252  
9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  253  	kfree(hv_context.hv_numa_map);
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  254  }
2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  255
Suzuki K Poulose April 25, 2024, 1:42 p.m. UTC | #2
On 15/04/2024 04:13, kernel test robot wrote:
> Hi Steven,
> 
> kernel test robot noticed the following build errors:
> 
> [auto build test ERROR on arm64/for-next/core]
> [also build test ERROR on kvmarm/next efi/next tip/irq/core linus/master v6.9-rc3 next-20240412]
> [cannot apply to arnd-asm-generic/master]
> [If your patch is applied to the wrong git tree, kindly drop us a note.
> And when submitting patch, we suggest to use '--base' as documented in
> https://git-scm.com/docs/git-format-patch#_base_tree_information]
> 
> url:    https://github.com/intel-lab-lkp/linux/commits/Steven-Price/arm64-rsi-Add-RSI-definitions/20240412-164852
> base:   https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git for-next/core
> patch link:    https://lore.kernel.org/r/20240412084213.1733764-10-steven.price%40arm.com
> patch subject: [PATCH v2 09/14] arm64: Enable memory encrypt for Realms
> config: arm64-allyesconfig (https://download.01.org/0day-ci/archive/20240415/202404151003.vkNApJiS-lkp@intel.com/config)
> compiler: clang version 19.0.0git (https://github.com/llvm/llvm-project 8b3b4a92adee40483c27f26c478a384cd69c6f05)
> reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20240415/202404151003.vkNApJiS-lkp@intel.com/reproduce)
> 
> If you fix the issue in a separate patch/commit (i.e. not just a new version of
> the same patch/commit), kindly add following tags
> | Reported-by: kernel test robot <lkp@intel.com>
> | Closes: https://lore.kernel.org/oe-kbuild-all/202404151003.vkNApJiS-lkp@intel.com/
> 
> All errors (new ones prefixed by >>):
> 
>     In file included from drivers/hv/hv.c:13:
>     In file included from include/linux/mm.h:2208:
>     include/linux/vmstat.h:508:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
>       508 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>       509 |                            item];
>           |                            ~~~~
>     include/linux/vmstat.h:515:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
>       515 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>       516 |                            NR_VM_NUMA_EVENT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>     include/linux/vmstat.h:522:36: warning: arithmetic between different enumeration types ('enum node_stat_item' and 'enum lru_list') [-Wenum-enum-conversion]
>       522 |         return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
>           |                               ~~~~~~~~~~~ ^ ~~~
>     include/linux/vmstat.h:527:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
>       527 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>       528 |                            NR_VM_NUMA_EVENT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>     include/linux/vmstat.h:536:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
>       536 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>       537 |                            NR_VM_NUMA_EVENT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>>> drivers/hv/hv.c:132:10: error: call to undeclared function 'set_memory_decrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
>       132 |                         ret = set_memory_decrypted((unsigned long)hv_cpu->post_msg_page, 1);
>           |                               ^
>     drivers/hv/hv.c:168:10: error: call to undeclared function 'set_memory_decrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
>       168 |                         ret = set_memory_decrypted((unsigned long)
>           |                               ^
>>> drivers/hv/hv.c:218:11: error: call to undeclared function 'set_memory_encrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
>       218 |                                 ret = set_memory_encrypted((unsigned long)
>           |                                       ^
>     drivers/hv/hv.c:230:11: error: call to undeclared function 'set_memory_encrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
>       230 |                                 ret = set_memory_encrypted((unsigned long)
>           |                                       ^
>     drivers/hv/hv.c:239:11: error: call to undeclared function 'set_memory_encrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
>       239 |                                 ret = set_memory_encrypted((unsigned long)
>           |                                       ^
>     5 warnings and 5 errors generated.
> --
>     In file included from drivers/hv/connection.c:16:
>     In file included from include/linux/mm.h:2208:
>     include/linux/vmstat.h:508:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
>       508 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>       509 |                            item];
>           |                            ~~~~
>     include/linux/vmstat.h:515:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
>       515 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>       516 |                            NR_VM_NUMA_EVENT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>     include/linux/vmstat.h:522:36: warning: arithmetic between different enumeration types ('enum node_stat_item' and 'enum lru_list') [-Wenum-enum-conversion]
>       522 |         return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
>           |                               ~~~~~~~~~~~ ^ ~~~
>     include/linux/vmstat.h:527:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
>       527 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>       528 |                            NR_VM_NUMA_EVENT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>     include/linux/vmstat.h:536:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
>       536 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>       537 |                            NR_VM_NUMA_EVENT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>>> drivers/hv/connection.c:236:8: error: call to undeclared function 'set_memory_decrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
>       236 |         ret = set_memory_decrypted((unsigned long)
>           |               ^
>>> drivers/hv/connection.c:340:2: error: call to undeclared function 'set_memory_encrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
>       340 |         set_memory_encrypted((unsigned long)vmbus_connection.monitor_pages[0], 1);
>           |         ^
>     5 warnings and 2 errors generated.
> --
>     In file included from drivers/hv/channel.c:14:
>     In file included from include/linux/mm.h:2208:
>     include/linux/vmstat.h:508:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
>       508 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>       509 |                            item];
>           |                            ~~~~
>     include/linux/vmstat.h:515:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
>       515 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>       516 |                            NR_VM_NUMA_EVENT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>     include/linux/vmstat.h:522:36: warning: arithmetic between different enumeration types ('enum node_stat_item' and 'enum lru_list') [-Wenum-enum-conversion]
>       522 |         return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
>           |                               ~~~~~~~~~~~ ^ ~~~
>     include/linux/vmstat.h:527:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
>       527 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>       528 |                            NR_VM_NUMA_EVENT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>     include/linux/vmstat.h:536:43: warning: arithmetic between different enumeration types ('enum zone_stat_item' and 'enum numa_stat_item') [-Wenum-enum-conversion]
>       536 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>       537 |                            NR_VM_NUMA_EVENT_ITEMS +
>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>>> drivers/hv/channel.c:442:8: error: call to undeclared function 'set_memory_decrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
>       442 |         ret = set_memory_decrypted((unsigned long)kbuffer,
>           |               ^
>>> drivers/hv/channel.c:531:3: error: call to undeclared function 'set_memory_encrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
>       531 |                 set_memory_encrypted((unsigned long)kbuffer,
>           |                 ^
>     drivers/hv/channel.c:848:8: error: call to undeclared function 'set_memory_encrypted'; ISO C99 and later do not support implicit function declarations [-Wimplicit-function-declaration]
>       848 |         ret = set_memory_encrypted((unsigned long)gpadl->buffer,
>           |               ^
>     5 warnings and 3 errors generated.

Thats my mistake. The correct place for declaring set_memory_*crypted() 
is asm/set_memory.h not asm/mem_encrypt.h.

Steven, please could you fold this patch below :


diff --git a/arch/arm64/include/asm/mem_encrypt.h 
b/arch/arm64/include/asm/mem_encrypt.h
index 7381f9585321..e47265cd180a 100644
--- a/arch/arm64/include/asm/mem_encrypt.h
+++ b/arch/arm64/include/asm/mem_encrypt.h
@@ -14,6 +14,4 @@ static inline bool force_dma_unencrypted(struct device 
*dev)
         return is_realm_world();
  }

-int set_memory_encrypted(unsigned long addr, int numpages);
-int set_memory_decrypted(unsigned long addr, int numpages);
  #endif
diff --git a/arch/arm64/include/asm/set_memory.h 
b/arch/arm64/include/asm/set_memory.h
index 0f740b781187..9561b90fb43c 100644
--- a/arch/arm64/include/asm/set_memory.h
+++ b/arch/arm64/include/asm/set_memory.h
@@ -14,4 +14,6 @@ int set_direct_map_invalid_noflush(struct page *page);
  int set_direct_map_default_noflush(struct page *page);
  bool kernel_page_present(struct page *page);

+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);



Suzuki
> 
> 
> vim +/set_memory_decrypted +132 drivers/hv/hv.c
> 
> 3e7ee4902fe699 drivers/staging/hv/Hv.c Hank Janssen      2009-07-13   96
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19   97  int hv_synic_alloc(void)
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19   98  {
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18   99  	int cpu, ret = -ENOMEM;
> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  100  	struct hv_per_cpu_context *hv_cpu;
> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  101
> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  102  	/*
> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  103  	 * First, zero all per-cpu memory areas so hv_synic_free() can
> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  104  	 * detect what memory has been allocated and cleanup properly
> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  105  	 * after any failures.
> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  106  	 */
> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  107  	for_each_present_cpu(cpu) {
> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  108  		hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  109  		memset(hv_cpu, 0, sizeof(*hv_cpu));
> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  110  	}
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  111
> 6396bb221514d2 drivers/hv/hv.c         Kees Cook         2018-06-12  112  	hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct cpumask),
> 597ff72f3de850 drivers/hv/hv.c         Jia-Ju Bai        2018-03-04  113  					 GFP_KERNEL);
> 9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  114  	if (hv_context.hv_numa_map == NULL) {
> 9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  115  		pr_err("Unable to allocate NUMA map\n");
> 9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  116  		goto err;
> 9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  117  	}
> 9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  118
> 421b8f20d3c381 drivers/hv/hv.c         Vitaly Kuznetsov  2016-12-07  119  	for_each_present_cpu(cpu) {
> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  120  		hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  121
> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  122  		tasklet_init(&hv_cpu->msg_dpc,
> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  123  			     vmbus_on_msg_dpc, (unsigned long) hv_cpu);
> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  124
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  125  		if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  126  			hv_cpu->post_msg_page = (void *)get_zeroed_page(GFP_ATOMIC);
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  127  			if (hv_cpu->post_msg_page == NULL) {
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  128  				pr_err("Unable to allocate post msg page\n");
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  129  				goto err;
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  130  			}
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  131
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24 @132  			ret = set_memory_decrypted((unsigned long)hv_cpu->post_msg_page, 1);
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  133  			if (ret) {
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  134  				pr_err("Failed to decrypt post msg page: %d\n", ret);
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  135  				/* Just leak the page, as it's unsafe to free the page. */
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  136  				hv_cpu->post_msg_page = NULL;
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  137  				goto err;
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  138  			}
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  139
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  140  			memset(hv_cpu->post_msg_page, 0, PAGE_SIZE);
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  141  		}
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  142
> faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  143  		/*
> faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  144  		 * Synic message and event pages are allocated by paravisor.
> faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  145  		 * Skip these pages allocation here.
> faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  146  		 */
> d3a9d7e49d1531 drivers/hv/hv.c         Dexuan Cui        2023-08-24  147  		if (!ms_hyperv.paravisor_present && !hv_root_partition) {
> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  148  			hv_cpu->synic_message_page =
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  149  				(void *)get_zeroed_page(GFP_ATOMIC);
> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  150  			if (hv_cpu->synic_message_page == NULL) {
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  151  				pr_err("Unable to allocate SYNIC message page\n");
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  152  				goto err;
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  153  			}
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  154
> faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  155  			hv_cpu->synic_event_page =
> faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  156  				(void *)get_zeroed_page(GFP_ATOMIC);
> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  157  			if (hv_cpu->synic_event_page == NULL) {
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  158  				pr_err("Unable to allocate SYNIC event page\n");
> 68f2f2bc163d44 drivers/hv/hv.c         Dexuan Cui        2023-08-24  159
> 68f2f2bc163d44 drivers/hv/hv.c         Dexuan Cui        2023-08-24  160  				free_page((unsigned long)hv_cpu->synic_message_page);
> 68f2f2bc163d44 drivers/hv/hv.c         Dexuan Cui        2023-08-24  161  				hv_cpu->synic_message_page = NULL;
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  162  				goto err;
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  163  			}
> faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  164  		}
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  165
> 68f2f2bc163d44 drivers/hv/hv.c         Dexuan Cui        2023-08-24  166  		if (!ms_hyperv.paravisor_present &&
> e3131f1c81448a drivers/hv/hv.c         Dexuan Cui        2023-08-24  167  		    (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  168  			ret = set_memory_decrypted((unsigned long)
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  169  				hv_cpu->synic_message_page, 1);
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  170  			if (ret) {
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  171  				pr_err("Failed to decrypt SYNIC msg page: %d\n", ret);
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  172  				hv_cpu->synic_message_page = NULL;
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  173
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  174  				/*
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  175  				 * Free the event page here so that hv_synic_free()
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  176  				 * won't later try to re-encrypt it.
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  177  				 */
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  178  				free_page((unsigned long)hv_cpu->synic_event_page);
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  179  				hv_cpu->synic_event_page = NULL;
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  180  				goto err;
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  181  			}
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  182
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  183  			ret = set_memory_decrypted((unsigned long)
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  184  				hv_cpu->synic_event_page, 1);
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  185  			if (ret) {
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  186  				pr_err("Failed to decrypt SYNIC event page: %d\n", ret);
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  187  				hv_cpu->synic_event_page = NULL;
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  188  				goto err;
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  189  			}
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  190
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  191  			memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  192  			memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  193  		}
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  194  	}
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  195
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  196  	return 0;
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  197
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  198  err:
> 572086325ce9a9 drivers/hv/hv.c         Michael Kelley    2018-08-02  199  	/*
> 572086325ce9a9 drivers/hv/hv.c         Michael Kelley    2018-08-02  200  	 * Any memory allocations that succeeded will be freed when
> 572086325ce9a9 drivers/hv/hv.c         Michael Kelley    2018-08-02  201  	 * the caller cleans up by calling hv_synic_free()
> 572086325ce9a9 drivers/hv/hv.c         Michael Kelley    2018-08-02  202  	 */
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  203  	return ret;
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  204  }
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  205
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  206
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  207  void hv_synic_free(void)
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  208  {
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  209  	int cpu, ret;
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  210
> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  211  	for_each_present_cpu(cpu) {
> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  212  		struct hv_per_cpu_context *hv_cpu
> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  213  			= per_cpu_ptr(hv_context.cpu_context, cpu);
> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  214
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  215  		/* It's better to leak the page if the encryption fails. */
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  216  		if (ms_hyperv.paravisor_present && hv_isolation_type_tdx()) {
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  217  			if (hv_cpu->post_msg_page) {
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24 @218  				ret = set_memory_encrypted((unsigned long)
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  219  					hv_cpu->post_msg_page, 1);
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  220  				if (ret) {
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  221  					pr_err("Failed to encrypt post msg page: %d\n", ret);
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  222  					hv_cpu->post_msg_page = NULL;
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  223  				}
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  224  			}
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  225  		}
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  226
> 68f2f2bc163d44 drivers/hv/hv.c         Dexuan Cui        2023-08-24  227  		if (!ms_hyperv.paravisor_present &&
> e3131f1c81448a drivers/hv/hv.c         Dexuan Cui        2023-08-24  228  		    (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  229  			if (hv_cpu->synic_message_page) {
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  230  				ret = set_memory_encrypted((unsigned long)
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  231  					hv_cpu->synic_message_page, 1);
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  232  				if (ret) {
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  233  					pr_err("Failed to encrypt SYNIC msg page: %d\n", ret);
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  234  					hv_cpu->synic_message_page = NULL;
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  235  				}
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  236  			}
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  237
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  238  			if (hv_cpu->synic_event_page) {
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  239  				ret = set_memory_encrypted((unsigned long)
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  240  					hv_cpu->synic_event_page, 1);
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  241  				if (ret) {
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  242  					pr_err("Failed to encrypt SYNIC event page: %d\n", ret);
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  243  					hv_cpu->synic_event_page = NULL;
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  244  				}
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  245  			}
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  246  		}
> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  247
> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  248  		free_page((unsigned long)hv_cpu->post_msg_page);
> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  249  		free_page((unsigned long)hv_cpu->synic_event_page);
> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  250  		free_page((unsigned long)hv_cpu->synic_message_page);
> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  251  	}
> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  252
> 9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  253  	kfree(hv_context.hv_numa_map);
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  254  }
> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  255
>
Steven Price April 25, 2024, 3:52 p.m. UTC | #3
On 25/04/2024 14:42, Suzuki K Poulose wrote:
> On 15/04/2024 04:13, kernel test robot wrote:
>> Hi Steven,
>>
>> kernel test robot noticed the following build errors:
>>

<snip>

>>>> drivers/hv/channel.c:442:8: error: call to undeclared function
>>>> 'set_memory_decrypted'; ISO C99 and later do not support implicit
>>>> function declarations [-Wimplicit-function-declaration]
>>       442 |         ret = set_memory_decrypted((unsigned long)kbuffer,
>>           |               ^
>>>> drivers/hv/channel.c:531:3: error: call to undeclared function
>>>> 'set_memory_encrypted'; ISO C99 and later do not support implicit
>>>> function declarations [-Wimplicit-function-declaration]
>>       531 |                 set_memory_encrypted((unsigned long)kbuffer,
>>           |                 ^
>>     drivers/hv/channel.c:848:8: error: call to undeclared function
>> 'set_memory_encrypted'; ISO C99 and later do not support implicit
>> function declarations [-Wimplicit-function-declaration]
>>       848 |         ret = set_memory_encrypted((unsigned
>> long)gpadl->buffer,
>>           |               ^
>>     5 warnings and 3 errors generated.
> 
> Thats my mistake. The correct place for declaring set_memory_*crypted()
> is asm/set_memory.h not asm/mem_encrypt.h.
> 
> Steven, please could you fold this patch below :

Sure, I've folded into my local branch. Thanks for looking into the error.

Steve

> 
> diff --git a/arch/arm64/include/asm/mem_encrypt.h
> b/arch/arm64/include/asm/mem_encrypt.h
> index 7381f9585321..e47265cd180a 100644
> --- a/arch/arm64/include/asm/mem_encrypt.h
> +++ b/arch/arm64/include/asm/mem_encrypt.h
> @@ -14,6 +14,4 @@ static inline bool force_dma_unencrypted(struct device
> *dev)
>         return is_realm_world();
>  }
> 
> -int set_memory_encrypted(unsigned long addr, int numpages);
> -int set_memory_decrypted(unsigned long addr, int numpages);
>  #endif
> diff --git a/arch/arm64/include/asm/set_memory.h
> b/arch/arm64/include/asm/set_memory.h
> index 0f740b781187..9561b90fb43c 100644
> --- a/arch/arm64/include/asm/set_memory.h
> +++ b/arch/arm64/include/asm/set_memory.h
> @@ -14,4 +14,6 @@ int set_direct_map_invalid_noflush(struct page *page);
>  int set_direct_map_default_noflush(struct page *page);
>  bool kernel_page_present(struct page *page);
> 
> +int set_memory_encrypted(unsigned long addr, int numpages);
> +int set_memory_decrypted(unsigned long addr, int numpages);
> 
> 
> 
> Suzuki
Suzuki K Poulose April 25, 2024, 4:29 p.m. UTC | #4
On 25/04/2024 14:42, Suzuki K Poulose wrote:
> On 15/04/2024 04:13, kernel test robot wrote:
>> Hi Steven,
>>
>> kernel test robot noticed the following build errors:
>>
>> [auto build test ERROR on arm64/for-next/core]
>> [also build test ERROR on kvmarm/next efi/next tip/irq/core 
>> linus/master v6.9-rc3 next-20240412]
>> [cannot apply to arnd-asm-generic/master]
>> [If your patch is applied to the wrong git tree, kindly drop us a note.
>> And when submitting patch, we suggest to use '--base' as documented in
>> https://git-scm.com/docs/git-format-patch#_base_tree_information]
>>
>> url:    
>> https://github.com/intel-lab-lkp/linux/commits/Steven-Price/arm64-rsi-Add-RSI-definitions/20240412-164852
>> base:   
>> https://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux.git 
>> for-next/core
>> patch link:    
>> https://lore.kernel.org/r/20240412084213.1733764-10-steven.price%40arm.com
>> patch subject: [PATCH v2 09/14] arm64: Enable memory encrypt for Realms
>> config: arm64-allyesconfig 
>> (https://download.01.org/0day-ci/archive/20240415/202404151003.vkNApJiS-lkp@intel.com/config)
>> compiler: clang version 19.0.0git 
>> (https://github.com/llvm/llvm-project 
>> 8b3b4a92adee40483c27f26c478a384cd69c6f05)
>> reproduce (this is a W=1 build): 
>> (https://download.01.org/0day-ci/archive/20240415/202404151003.vkNApJiS-lkp@intel.com/reproduce)
>>
>> If you fix the issue in a separate patch/commit (i.e. not just a new 
>> version of
>> the same patch/commit), kindly add following tags
>> | Reported-by: kernel test robot <lkp@intel.com>
>> | Closes: 
>> https://lore.kernel.org/oe-kbuild-all/202404151003.vkNApJiS-lkp@intel.com/
>>
>> All errors (new ones prefixed by >>):
>>
>>     In file included from drivers/hv/hv.c:13:
>>     In file included from include/linux/mm.h:2208:
>>     include/linux/vmstat.h:508:43: warning: arithmetic between 
>> different enumeration types ('enum zone_stat_item' and 'enum 
>> numa_stat_item') [-Wenum-enum-conversion]
>>       508 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>>       509 |                            item];
>>           |                            ~~~~
>>     include/linux/vmstat.h:515:43: warning: arithmetic between 
>> different enumeration types ('enum zone_stat_item' and 'enum 
>> numa_stat_item') [-Wenum-enum-conversion]
>>       515 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>>       516 |                            NR_VM_NUMA_EVENT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>>     include/linux/vmstat.h:522:36: warning: arithmetic between 
>> different enumeration types ('enum node_stat_item' and 'enum 
>> lru_list') [-Wenum-enum-conversion]
>>       522 |         return node_stat_name(NR_LRU_BASE + lru) + 3; // 
>> skip "nr_"
>>           |                               ~~~~~~~~~~~ ^ ~~~
>>     include/linux/vmstat.h:527:43: warning: arithmetic between 
>> different enumeration types ('enum zone_stat_item' and 'enum 
>> numa_stat_item') [-Wenum-enum-conversion]
>>       527 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>>       528 |                            NR_VM_NUMA_EVENT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>>     include/linux/vmstat.h:536:43: warning: arithmetic between 
>> different enumeration types ('enum zone_stat_item' and 'enum 
>> numa_stat_item') [-Wenum-enum-conversion]
>>       536 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>>       537 |                            NR_VM_NUMA_EVENT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>>>> drivers/hv/hv.c:132:10: error: call to undeclared function 
>>>> 'set_memory_decrypted'; ISO C99 and later do not support implicit 
>>>> function declarations [-Wimplicit-function-declaration]
>>       132 |                         ret = 
>> set_memory_decrypted((unsigned long)hv_cpu->post_msg_page, 1);
>>           |                               ^
>>     drivers/hv/hv.c:168:10: error: call to undeclared function 
>> 'set_memory_decrypted'; ISO C99 and later do not support implicit 
>> function declarations [-Wimplicit-function-declaration]
>>       168 |                         ret = 
>> set_memory_decrypted((unsigned long)
>>           |                               ^
>>>> drivers/hv/hv.c:218:11: error: call to undeclared function 
>>>> 'set_memory_encrypted'; ISO C99 and later do not support implicit 
>>>> function declarations [-Wimplicit-function-declaration]
>>       218 |                                 ret = 
>> set_memory_encrypted((unsigned long)
>>           |                                       ^
>>     drivers/hv/hv.c:230:11: error: call to undeclared function 
>> 'set_memory_encrypted'; ISO C99 and later do not support implicit 
>> function declarations [-Wimplicit-function-declaration]
>>       230 |                                 ret = 
>> set_memory_encrypted((unsigned long)
>>           |                                       ^
>>     drivers/hv/hv.c:239:11: error: call to undeclared function 
>> 'set_memory_encrypted'; ISO C99 and later do not support implicit 
>> function declarations [-Wimplicit-function-declaration]
>>       239 |                                 ret = 
>> set_memory_encrypted((unsigned long)
>>           |                                       ^
>>     5 warnings and 5 errors generated.
>> -- 
>>     In file included from drivers/hv/connection.c:16:
>>     In file included from include/linux/mm.h:2208:
>>     include/linux/vmstat.h:508:43: warning: arithmetic between 
>> different enumeration types ('enum zone_stat_item' and 'enum 
>> numa_stat_item') [-Wenum-enum-conversion]
>>       508 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>>       509 |                            item];
>>           |                            ~~~~
>>     include/linux/vmstat.h:515:43: warning: arithmetic between 
>> different enumeration types ('enum zone_stat_item' and 'enum 
>> numa_stat_item') [-Wenum-enum-conversion]
>>       515 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>>       516 |                            NR_VM_NUMA_EVENT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>>     include/linux/vmstat.h:522:36: warning: arithmetic between 
>> different enumeration types ('enum node_stat_item' and 'enum 
>> lru_list') [-Wenum-enum-conversion]
>>       522 |         return node_stat_name(NR_LRU_BASE + lru) + 3; // 
>> skip "nr_"
>>           |                               ~~~~~~~~~~~ ^ ~~~
>>     include/linux/vmstat.h:527:43: warning: arithmetic between 
>> different enumeration types ('enum zone_stat_item' and 'enum 
>> numa_stat_item') [-Wenum-enum-conversion]
>>       527 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>>       528 |                            NR_VM_NUMA_EVENT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>>     include/linux/vmstat.h:536:43: warning: arithmetic between 
>> different enumeration types ('enum zone_stat_item' and 'enum 
>> numa_stat_item') [-Wenum-enum-conversion]
>>       536 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>>       537 |                            NR_VM_NUMA_EVENT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>>>> drivers/hv/connection.c:236:8: error: call to undeclared function 
>>>> 'set_memory_decrypted'; ISO C99 and later do not support implicit 
>>>> function declarations [-Wimplicit-function-declaration]
>>       236 |         ret = set_memory_decrypted((unsigned long)
>>           |               ^
>>>> drivers/hv/connection.c:340:2: error: call to undeclared function 
>>>> 'set_memory_encrypted'; ISO C99 and later do not support implicit 
>>>> function declarations [-Wimplicit-function-declaration]
>>       340 |         set_memory_encrypted((unsigned 
>> long)vmbus_connection.monitor_pages[0], 1);
>>           |         ^
>>     5 warnings and 2 errors generated.
>> -- 
>>     In file included from drivers/hv/channel.c:14:
>>     In file included from include/linux/mm.h:2208:
>>     include/linux/vmstat.h:508:43: warning: arithmetic between 
>> different enumeration types ('enum zone_stat_item' and 'enum 
>> numa_stat_item') [-Wenum-enum-conversion]
>>       508 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>>       509 |                            item];
>>           |                            ~~~~
>>     include/linux/vmstat.h:515:43: warning: arithmetic between 
>> different enumeration types ('enum zone_stat_item' and 'enum 
>> numa_stat_item') [-Wenum-enum-conversion]
>>       515 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>>       516 |                            NR_VM_NUMA_EVENT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>>     include/linux/vmstat.h:522:36: warning: arithmetic between 
>> different enumeration types ('enum node_stat_item' and 'enum 
>> lru_list') [-Wenum-enum-conversion]
>>       522 |         return node_stat_name(NR_LRU_BASE + lru) + 3; // 
>> skip "nr_"
>>           |                               ~~~~~~~~~~~ ^ ~~~
>>     include/linux/vmstat.h:527:43: warning: arithmetic between 
>> different enumeration types ('enum zone_stat_item' and 'enum 
>> numa_stat_item') [-Wenum-enum-conversion]
>>       527 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>>       528 |                            NR_VM_NUMA_EVENT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>>     include/linux/vmstat.h:536:43: warning: arithmetic between 
>> different enumeration types ('enum zone_stat_item' and 'enum 
>> numa_stat_item') [-Wenum-enum-conversion]
>>       536 |         return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~ ^
>>       537 |                            NR_VM_NUMA_EVENT_ITEMS +
>>           |                            ~~~~~~~~~~~~~~~~~~~~~~
>>>> drivers/hv/channel.c:442:8: error: call to undeclared function 
>>>> 'set_memory_decrypted'; ISO C99 and later do not support implicit 
>>>> function declarations [-Wimplicit-function-declaration]
>>       442 |         ret = set_memory_decrypted((unsigned long)kbuffer,
>>           |               ^
>>>> drivers/hv/channel.c:531:3: error: call to undeclared function 
>>>> 'set_memory_encrypted'; ISO C99 and later do not support implicit 
>>>> function declarations [-Wimplicit-function-declaration]
>>       531 |                 set_memory_encrypted((unsigned long)kbuffer,
>>           |                 ^
>>     drivers/hv/channel.c:848:8: error: call to undeclared function 
>> 'set_memory_encrypted'; ISO C99 and later do not support implicit 
>> function declarations [-Wimplicit-function-declaration]
>>       848 |         ret = set_memory_encrypted((unsigned 
>> long)gpadl->buffer,
>>           |               ^
>>     5 warnings and 3 errors generated.
> 
> Thats my mistake. The correct place for declaring set_memory_*crypted() 
> is asm/set_memory.h not asm/mem_encrypt.h.
> 
> Steven, please could you fold this patch below :
> 
> 
> diff --git a/arch/arm64/include/asm/mem_encrypt.h 
> b/arch/arm64/include/asm/mem_encrypt.h
> index 7381f9585321..e47265cd180a 100644
> --- a/arch/arm64/include/asm/mem_encrypt.h
> +++ b/arch/arm64/include/asm/mem_encrypt.h
> @@ -14,6 +14,4 @@ static inline bool force_dma_unencrypted(struct device 
> *dev)
>          return is_realm_world();
>   }
> 
> -int set_memory_encrypted(unsigned long addr, int numpages);
> -int set_memory_decrypted(unsigned long addr, int numpages);
>   #endif
> diff --git a/arch/arm64/include/asm/set_memory.h 
> b/arch/arm64/include/asm/set_memory.h
> index 0f740b781187..9561b90fb43c 100644
> --- a/arch/arm64/include/asm/set_memory.h
> +++ b/arch/arm64/include/asm/set_memory.h
> @@ -14,4 +14,6 @@ int set_direct_map_invalid_noflush(struct page *page);
>   int set_direct_map_default_noflush(struct page *page);
>   bool kernel_page_present(struct page *page);
> 
> +int set_memory_encrypted(unsigned long addr, int numpages);
> +int set_memory_decrypted(unsigned long addr, int numpages);
> 
> 

Emmanuele reports that these need to be exported as well, something
like:


diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 229b6d9990f5..de3843ce2aea 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -228,11 +228,13 @@ int set_memory_encrypted(unsigned long addr, int 
numpages)
  {
         return __set_memory_encrypted(addr, numpages, true);
  }
+EXPORT_SYMBOL_GPL(set_memory_encrypted);

  int set_memory_decrypted(unsigned long addr, int numpages)
  {
         return __set_memory_encrypted(addr, numpages, false);
  }
+EXPORT_SYMBOL_GPL(set_memory_decrypted);

  #ifdef CONFIG_DEBUG_PAGEALLOC
  void __kernel_map_pages(struct page *page, int numpages, int enable


> 
> Suzuki



>>
>>
>> vim +/set_memory_decrypted +132 drivers/hv/hv.c
>>
>> 3e7ee4902fe699 drivers/staging/hv/Hv.c Hank Janssen      2009-07-13   96
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19   
>> 97  int hv_synic_alloc(void)
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19   
>> 98  {
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18   
>> 99      int cpu, ret = -ENOMEM;
>> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  
>> 100      struct hv_per_cpu_context *hv_cpu;
>> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  101
>> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  
>> 102      /*
>> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  
>> 103       * First, zero all per-cpu memory areas so hv_synic_free() can
>> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  
>> 104       * detect what memory has been allocated and cleanup properly
>> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  
>> 105       * after any failures.
>> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  
>> 106       */
>> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  
>> 107      for_each_present_cpu(cpu) {
>> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  
>> 108          hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
>> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  
>> 109          memset(hv_cpu, 0, sizeof(*hv_cpu));
>> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  
>> 110      }
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  111
>> 6396bb221514d2 drivers/hv/hv.c         Kees Cook         2018-06-12  
>> 112      hv_context.hv_numa_map = kcalloc(nr_node_ids, sizeof(struct 
>> cpumask),
>> 597ff72f3de850 drivers/hv/hv.c         Jia-Ju Bai        2018-03-04  
>> 113                       GFP_KERNEL);
>> 9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  
>> 114      if (hv_context.hv_numa_map == NULL) {
>> 9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  
>> 115          pr_err("Unable to allocate NUMA map\n");
>> 9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  
>> 116          goto err;
>> 9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  
>> 117      }
>> 9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  118
>> 421b8f20d3c381 drivers/hv/hv.c         Vitaly Kuznetsov  2016-12-07  
>> 119      for_each_present_cpu(cpu) {
>> f25a7ece08bdb1 drivers/hv/hv.c         Michael Kelley    2018-08-10  
>> 120          hv_cpu = per_cpu_ptr(hv_context.cpu_context, cpu);
>> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  121
>> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  
>> 122          tasklet_init(&hv_cpu->msg_dpc,
>> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  
>> 123                   vmbus_on_msg_dpc, (unsigned long) hv_cpu);
>> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  124
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 125          if (ms_hyperv.paravisor_present && 
>> hv_isolation_type_tdx()) {
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 126              hv_cpu->post_msg_page = (void 
>> *)get_zeroed_page(GFP_ATOMIC);
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 127              if (hv_cpu->post_msg_page == NULL) {
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 128                  pr_err("Unable to allocate post msg page\n");
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 129                  goto err;
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 130              }
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  131
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24 
>> @132              ret = set_memory_decrypted((unsigned 
>> long)hv_cpu->post_msg_page, 1);
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 133              if (ret) {
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 134                  pr_err("Failed to decrypt post msg page: %d\n", 
>> ret);
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 135                  /* Just leak the page, as it's unsafe to free the 
>> page. */
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 136                  hv_cpu->post_msg_page = NULL;
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 137                  goto err;
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 138              }
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  139
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 140              memset(hv_cpu->post_msg_page, 0, PAGE_SIZE);
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 141          }
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  142
>> faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  
>> 143          /*
>> faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  
>> 144           * Synic message and event pages are allocated by paravisor.
>> faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  
>> 145           * Skip these pages allocation here.
>> faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  
>> 146           */
>> d3a9d7e49d1531 drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 147          if (!ms_hyperv.paravisor_present && !hv_root_partition) {
>> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  
>> 148              hv_cpu->synic_message_page =
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  
>> 149                  (void *)get_zeroed_page(GFP_ATOMIC);
>> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  
>> 150              if (hv_cpu->synic_message_page == NULL) {
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  
>> 151                  pr_err("Unable to allocate SYNIC message page\n");
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  
>> 152                  goto err;
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  
>> 153              }
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  154
>> faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  
>> 155              hv_cpu->synic_event_page =
>> faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  
>> 156                  (void *)get_zeroed_page(GFP_ATOMIC);
>> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  
>> 157              if (hv_cpu->synic_event_page == NULL) {
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  
>> 158                  pr_err("Unable to allocate SYNIC event page\n");
>> 68f2f2bc163d44 drivers/hv/hv.c         Dexuan Cui        2023-08-24  159
>> 68f2f2bc163d44 drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 160                  free_page((unsigned 
>> long)hv_cpu->synic_message_page);
>> 68f2f2bc163d44 drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 161                  hv_cpu->synic_message_page = NULL;
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  
>> 162                  goto err;
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  
>> 163              }
>> faff44069ff538 drivers/hv/hv.c         Tianyu Lan        2021-10-25  
>> 164          }
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  165
>> 68f2f2bc163d44 drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 166          if (!ms_hyperv.paravisor_present &&
>> e3131f1c81448a drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 167              (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 168              ret = set_memory_decrypted((unsigned long)
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 169                  hv_cpu->synic_message_page, 1);
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 170              if (ret) {
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 171                  pr_err("Failed to decrypt SYNIC msg page: %d\n", 
>> ret);
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 172                  hv_cpu->synic_message_page = NULL;
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  173
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 174                  /*
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 175                   * Free the event page here so that hv_synic_free()
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 176                   * won't later try to re-encrypt it.
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 177                   */
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 178                  free_page((unsigned long)hv_cpu->synic_event_page);
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 179                  hv_cpu->synic_event_page = NULL;
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 180                  goto err;
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 181              }
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  182
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 183              ret = set_memory_decrypted((unsigned long)
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 184                  hv_cpu->synic_event_page, 1);
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 185              if (ret) {
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 186                  pr_err("Failed to decrypt SYNIC event page: 
>> %d\n", ret);
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 187                  hv_cpu->synic_event_page = NULL;
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 188                  goto err;
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 189              }
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  190
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 191              memset(hv_cpu->synic_message_page, 0, PAGE_SIZE);
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 192              memset(hv_cpu->synic_event_page, 0, PAGE_SIZE);
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 193          }
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  
>> 194      }
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  195
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  
>> 196      return 0;
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  197
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  
>> 198  err:
>> 572086325ce9a9 drivers/hv/hv.c         Michael Kelley    2018-08-02  
>> 199      /*
>> 572086325ce9a9 drivers/hv/hv.c         Michael Kelley    2018-08-02  
>> 200       * Any memory allocations that succeeded will be freed when
>> 572086325ce9a9 drivers/hv/hv.c         Michael Kelley    2018-08-02  
>> 201       * the caller cleans up by calling hv_synic_free()
>> 572086325ce9a9 drivers/hv/hv.c         Michael Kelley    2018-08-02  
>> 202       */
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 203      return ret;
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  
>> 204  }
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  205
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  206
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  
>> 207  void hv_synic_free(void)
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  
>> 208  {
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 209      int cpu, ret;
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  210
>> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  
>> 211      for_each_present_cpu(cpu) {
>> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  
>> 212          struct hv_per_cpu_context *hv_cpu
>> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  
>> 213              = per_cpu_ptr(hv_context.cpu_context, cpu);
>> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  214
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 215          /* It's better to leak the page if the encryption fails. */
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 216          if (ms_hyperv.paravisor_present && 
>> hv_isolation_type_tdx()) {
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 217              if (hv_cpu->post_msg_page) {
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24 
>> @218                  ret = set_memory_encrypted((unsigned long)
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 219                      hv_cpu->post_msg_page, 1);
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 220                  if (ret) {
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 221                      pr_err("Failed to encrypt post msg page: 
>> %d\n", ret);
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 222                      hv_cpu->post_msg_page = NULL;
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 223                  }
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 224              }
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 225          }
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  226
>> 68f2f2bc163d44 drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 227          if (!ms_hyperv.paravisor_present &&
>> e3131f1c81448a drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 228              (hv_isolation_type_snp() || hv_isolation_type_tdx())) {
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 229              if (hv_cpu->synic_message_page) {
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 230                  ret = set_memory_encrypted((unsigned long)
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 231                      hv_cpu->synic_message_page, 1);
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 232                  if (ret) {
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 233                      pr_err("Failed to encrypt SYNIC msg page: 
>> %d\n", ret);
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 234                      hv_cpu->synic_message_page = NULL;
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 235                  }
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 236              }
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  237
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 238              if (hv_cpu->synic_event_page) {
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 239                  ret = set_memory_encrypted((unsigned long)
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 240                      hv_cpu->synic_event_page, 1);
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 241                  if (ret) {
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 242                      pr_err("Failed to encrypt SYNIC event page: 
>> %d\n", ret);
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 243                      hv_cpu->synic_event_page = NULL;
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 244                  }
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 245              }
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  
>> 246          }
>> 193061ea0a50c1 drivers/hv/hv.c         Tianyu Lan        2023-08-18  247
>> 23378295042a4b drivers/hv/hv.c         Dexuan Cui        2023-08-24  
>> 248          free_page((unsigned long)hv_cpu->post_msg_page);
>> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  
>> 249          free_page((unsigned long)hv_cpu->synic_event_page);
>> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  
>> 250          free_page((unsigned long)hv_cpu->synic_message_page);
>> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  
>> 251      }
>> 37cdd991fac810 drivers/hv/hv.c         Stephen Hemminger 2017-02-11  252
>> 9f01ec53458d9e drivers/hv/hv.c         K. Y. Srinivasan  2015-08-05  
>> 253      kfree(hv_context.hv_numa_map);
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  
>> 254  }
>> 2608fb65310341 drivers/hv/hv.c         Jason Wang        2013-06-19  255
>>
>
Emanuele Rocca April 25, 2024, 6:16 p.m. UTC | #5
Hi,

On 2024-04-25 05:29, Suzuki K Poulose wrote:
> Emmanuele reports that these need to be exported as well, something
> like:
> 
> 
> diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
> index 229b6d9990f5..de3843ce2aea 100644
> --- a/arch/arm64/mm/pageattr.c
> +++ b/arch/arm64/mm/pageattr.c
> @@ -228,11 +228,13 @@ int set_memory_encrypted(unsigned long addr, int
> numpages)
>  {
>         return __set_memory_encrypted(addr, numpages, true);
>  }
> +EXPORT_SYMBOL_GPL(set_memory_encrypted);
> 
>  int set_memory_decrypted(unsigned long addr, int numpages)
>  {
>         return __set_memory_encrypted(addr, numpages, false);
>  }
> +EXPORT_SYMBOL_GPL(set_memory_decrypted);
> 
>  #ifdef CONFIG_DEBUG_PAGEALLOC
>  void __kernel_map_pages(struct page *page, int numpages, int enable

Indeed, without exporting the symbols I was getting this build failure:

 ERROR: modpost: "set_memory_encrypted" [drivers/hv/hv_vmbus.ko] undefined!
 ERROR: modpost: "set_memory_decrypted" [drivers/hv/hv_vmbus.ko] undefined!

I can now build 6.9-rc1 w/ CCA guest patches if I apply Suzuki's
changes:

1) move set_memory_encrypted/decrypted from asm/mem_encrypt.h to
   asm/set_memory.h
2) export both symbols in mm/pageattr.c

See diff below.

Thanks,
  Emanuele

diff --git a/arch/arm64/include/asm/mem_encrypt.h b/arch/arm64/include/asm/mem_encrypt.h
index 7381f9585321..e47265cd180a 100644
--- a/arch/arm64/include/asm/mem_encrypt.h
+++ b/arch/arm64/include/asm/mem_encrypt.h
@@ -14,6 +14,4 @@ static inline bool force_dma_unencrypted(struct device *dev)
        return is_realm_world();
 }

-int set_memory_encrypted(unsigned long addr, int numpages);
-int set_memory_decrypted(unsigned long addr, int numpages);
 #endif
diff --git a/arch/arm64/include/asm/set_memory.h b/arch/arm64/include/asm/set_memory.h
index 0f740b781187..9561b90fb43c 100644
--- a/arch/arm64/include/asm/set_memory.h
+++ b/arch/arm64/include/asm/set_memory.h
@@ -14,4 +14,6 @@ int set_direct_map_invalid_noflush(struct page *page);
 int set_direct_map_default_noflush(struct page *page);
 bool kernel_page_present(struct page *page);

+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
 #endif /* _ASM_ARM64_SET_MEMORY_H */
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 229b6d9990f5..de3843ce2aea 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -228,11 +228,13 @@ int set_memory_encrypted(unsigned long addr, int numpages)
 {
        return __set_memory_encrypted(addr, numpages, true);
 }
+EXPORT_SYMBOL_GPL(set_memory_encrypted);

 int set_memory_decrypted(unsigned long addr, int numpages)
 {
        return __set_memory_encrypted(addr, numpages, false);
 }
+EXPORT_SYMBOL_GPL(set_memory_decrypted);

 #ifdef CONFIG_DEBUG_PAGEALLOC
 void __kernel_map_pages(struct page *page, int numpages, int enable)
Catalin Marinas May 14, 2024, 6 p.m. UTC | #6
On Fri, Apr 12, 2024 at 09:42:08AM +0100, Steven Price wrote:
>  static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
> @@ -41,6 +45,7 @@ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
>  	pte = clear_pte_bit(pte, cdata->clear_mask);
>  	pte = set_pte_bit(pte, cdata->set_mask);
>  
> +	/* TODO: Break before make for PROT_NS_SHARED updates */
>  	__set_pte(ptep, pte);
>  	return 0;

Oh, this TODO is problematic, not sure we can do it safely. There are
some patches on the list to trap faults from other CPUs if they happen
to access the page when broken but so far we pushed back as complex and
at risk of getting the logic wrong.

From an architecture perspective, you are changing the output address
and D8.16.1 requires a break-before-make sequence (FEAT_BBM doesn't
help). So we either come up with a way to do BMM safely (stop_machine()
maybe if it's not too expensive or some way to guarantee no accesses to
this page while being changed) or we get the architecture clarified on
the possible side-effects here ("unpredictable" doesn't help).

>  }
> @@ -192,6 +197,43 @@ int set_direct_map_default_noflush(struct page *page)
>  				   PAGE_SIZE, change_page_range, &data);
>  }
>  
> +static int __set_memory_encrypted(unsigned long addr,
> +				  int numpages,
> +				  bool encrypt)
> +{
> +	unsigned long set_prot = 0, clear_prot = 0;
> +	phys_addr_t start, end;
> +
> +	if (!is_realm_world())
> +		return 0;
> +
> +	WARN_ON(!__is_lm_address(addr));

Just return from this function if it's not a linear map address. No
point in corrupting other areas since __virt_to_phys() will get it
wrong.

> +	start = __virt_to_phys(addr);
> +	end = start + numpages * PAGE_SIZE;
> +
> +	if (encrypt) {
> +		clear_prot = PROT_NS_SHARED;
> +		set_memory_range_protected(start, end);
> +	} else {
> +		set_prot = PROT_NS_SHARED;
> +		set_memory_range_shared(start, end);
> +	}
> +
> +	return __change_memory_common(addr, PAGE_SIZE * numpages,
> +				      __pgprot(set_prot),
> +				      __pgprot(clear_prot));
> +}

Can someone summarise what the point of this protection bit is? The IPA
memory is marked as protected/unprotected already via the RSI call and
presumably the RMM disables/permits sharing with a non-secure hypervisor
accordingly irrespective of which alias the realm guest has the linear
mapping mapped to. What does it do with the top bit of the IPA? Is it
that the RMM will prevent (via Stage 2) access if the IPA does not match
the requested protection? IOW, it unmaps one or the other at Stage 2?

Also, the linear map is not the only one that points to this IPA. What
if this is a buffer mapped in user-space or remapped as non-cacheable
(upgraded to cacheable via FWB) in the kernel, the code above does not
(and cannot) change the user mappings.

It needs some digging into dma_direct_alloc() as well, it uses a
pgprot_decrypted() but that's not implemented by your patches. Not sure
it helps, it looks like the remap path in this function does not have a
dma_set_decrypted() call (or maybe I missed it).
Suzuki K Poulose May 15, 2024, 10:47 a.m. UTC | #7
On 14/05/2024 19:00, Catalin Marinas wrote:
> On Fri, Apr 12, 2024 at 09:42:08AM +0100, Steven Price wrote:
>>   static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
>> @@ -41,6 +45,7 @@ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
>>   	pte = clear_pte_bit(pte, cdata->clear_mask);
>>   	pte = set_pte_bit(pte, cdata->set_mask);
>>   
>> +	/* TODO: Break before make for PROT_NS_SHARED updates */
>>   	__set_pte(ptep, pte);
>>   	return 0;
> 
> Oh, this TODO is problematic, not sure we can do it safely. There are
> some patches on the list to trap faults from other CPUs if they happen
> to access the page when broken but so far we pushed back as complex and
> at risk of getting the logic wrong.
> 
>  From an architecture perspective, you are changing the output address
> and D8.16.1 requires a break-before-make sequence (FEAT_BBM doesn't
> help). So we either come up with a way to do BMM safely (stop_machine()
> maybe if it's not too expensive or some way to guarantee no accesses to
> this page while being changed) or we get the architecture clarified on
> the possible side-effects here ("unpredictable" doesn't help).

Thanks, we need to sort this out.


> 
>>   }
>> @@ -192,6 +197,43 @@ int set_direct_map_default_noflush(struct page *page)
>>   				   PAGE_SIZE, change_page_range, &data);
>>   }
>>   
>> +static int __set_memory_encrypted(unsigned long addr,
>> +				  int numpages,
>> +				  bool encrypt)
>> +{
>> +	unsigned long set_prot = 0, clear_prot = 0;
>> +	phys_addr_t start, end;
>> +
>> +	if (!is_realm_world())
>> +		return 0;
>> +
>> +	WARN_ON(!__is_lm_address(addr));
> 
> Just return from this function if it's not a linear map address. No
> point in corrupting other areas since __virt_to_phys() will get it
> wrong.
> 
>> +	start = __virt_to_phys(addr);
>> +	end = start + numpages * PAGE_SIZE;
>> +
>> +	if (encrypt) {
>> +		clear_prot = PROT_NS_SHARED;
>> +		set_memory_range_protected(start, end);
>> +	} else {
>> +		set_prot = PROT_NS_SHARED;
>> +		set_memory_range_shared(start, end);
>> +	}
>> +
>> +	return __change_memory_common(addr, PAGE_SIZE * numpages,
>> +				      __pgprot(set_prot),
>> +				      __pgprot(clear_prot));
>> +}
> 
> Can someone summarise what the point of this protection bit is? The IPA
> memory is marked as protected/unprotected already via the RSI call and
> presumably the RMM disables/permits sharing with a non-secure hypervisor
> accordingly irrespective of which alias the realm guest has the linear
> mapping mapped to. What does it do with the top bit of the IPA? Is it
> that the RMM will prevent (via Stage 2) access if the IPA does not match
> the requested protection? IOW, it unmaps one or the other at Stage 2?

The Realm's IPA space is split in half. The lower half is "protected"
and all pages backing the "protected" IPA is in the Realm world and
thus cannot be shared with the hypervisor. The upper half IPA is
"unprotected" (backed by Non-secure PAS pages) and can be accessed
by the Host/hyp.

The RSI call (RSI_IPA_STATE_SET) doesn't make an IPA unprotected. It
simply "invalidates" a (protected) IPA to "EMPTY" implying the Realm 
doesn't intend to use the "ipa" as RAM anymore and any access to it from
the Realm would trigger an SEA into the Realm. The RSI call triggers an 
exit to the host with the information and is a hint to the hypervisor to 
reclaim the page backing the IPA.

Now, given we need dynamic "sharing" of pages (instead of a dedicated
set of shared pages), "aliasing" of an IPA gives us shared pages.
i.e., If OS wants to share a page "x" (protected IPA) with the host,
we mark that as EMPTY via RSI call and then access the "x" with top-bit
set (aliasing the IPA x). This fault allows the hyp to map the page 
backing IPA "x" as "unprotected" at ALIAS(x) address.

Thus we treat the "top" bit as an attribute in the Realm.

> 
> Also, the linear map is not the only one that points to this IPA. What
> if this is a buffer mapped in user-space or remapped as non-cacheable
> (upgraded to cacheable via FWB) in the kernel, the code above does not
> (and cannot) change the user mappings.
> 
> It needs some digging into dma_direct_alloc() as well, it uses a
> pgprot_decrypted() but that's not implemented by your patches. Not sure
> it helps, it looks like the remap path in this function does not have a
> dma_set_decrypted() call (or maybe I missed it).

Good point. Will take a look.

Suzuki
Catalin Marinas May 16, 2024, 7:48 a.m. UTC | #8
On Wed, May 15, 2024 at 11:47:02AM +0100, Suzuki K Poulose wrote:
> On 14/05/2024 19:00, Catalin Marinas wrote:
> > On Fri, Apr 12, 2024 at 09:42:08AM +0100, Steven Price wrote:
> > Can someone summarise what the point of this protection bit is? The IPA
> > memory is marked as protected/unprotected already via the RSI call and
> > presumably the RMM disables/permits sharing with a non-secure hypervisor
> > accordingly irrespective of which alias the realm guest has the linear
> > mapping mapped to. What does it do with the top bit of the IPA? Is it
> > that the RMM will prevent (via Stage 2) access if the IPA does not match
> > the requested protection? IOW, it unmaps one or the other at Stage 2?
> 
> The Realm's IPA space is split in half. The lower half is "protected"
> and all pages backing the "protected" IPA is in the Realm world and
> thus cannot be shared with the hypervisor. The upper half IPA is
> "unprotected" (backed by Non-secure PAS pages) and can be accessed
> by the Host/hyp.

What about emulated device I/O where there's no backing RAM at an IPA.
Does it need to have the top bit set?

> The RSI call (RSI_IPA_STATE_SET) doesn't make an IPA unprotected. It
> simply "invalidates" a (protected) IPA to "EMPTY" implying the Realm doesn't
> intend to use the "ipa" as RAM anymore and any access to it from
> the Realm would trigger an SEA into the Realm. The RSI call triggers an exit
> to the host with the information and is a hint to the hypervisor to reclaim
> the page backing the IPA.
> 
> Now, given we need dynamic "sharing" of pages (instead of a dedicated
> set of shared pages), "aliasing" of an IPA gives us shared pages.
> i.e., If OS wants to share a page "x" (protected IPA) with the host,
> we mark that as EMPTY via RSI call and then access the "x" with top-bit
> set (aliasing the IPA x). This fault allows the hyp to map the page backing
> IPA "x" as "unprotected" at ALIAS(x) address.

Does the RMM sanity-checks that the NS hyp mappings are protected or
unprotected depending on the IPA range?

I assume that's also the case if the NS hyp is the first one to access a
page before the realm (e.g. inbound virtio transfer; no page allocated
yet because of a realm access).
Suzuki K Poulose May 16, 2024, 9:06 a.m. UTC | #9
Hi Catalin

On 16/05/2024 08:48, Catalin Marinas wrote:
> On Wed, May 15, 2024 at 11:47:02AM +0100, Suzuki K Poulose wrote:
>> On 14/05/2024 19:00, Catalin Marinas wrote:
>>> On Fri, Apr 12, 2024 at 09:42:08AM +0100, Steven Price wrote:
>>> Can someone summarise what the point of this protection bit is? The IPA
>>> memory is marked as protected/unprotected already via the RSI call and
>>> presumably the RMM disables/permits sharing with a non-secure hypervisor
>>> accordingly irrespective of which alias the realm guest has the linear
>>> mapping mapped to. What does it do with the top bit of the IPA? Is it
>>> that the RMM will prevent (via Stage 2) access if the IPA does not match
>>> the requested protection? IOW, it unmaps one or the other at Stage 2?
>>
>> The Realm's IPA space is split in half. The lower half is "protected"
>> and all pages backing the "protected" IPA is in the Realm world and
>> thus cannot be shared with the hypervisor. The upper half IPA is
>> "unprotected" (backed by Non-secure PAS pages) and can be accessed
>> by the Host/hyp.
> 
> What about emulated device I/O where there's no backing RAM at an IPA.
> Does it need to have the top bit set?

The behaviour depends on the IPA (i.e, protected vs unprotected).

1. Unprotected : All stage2 faults in the Unprotected half are serviced
by the Host, including the areas that may be "Memory" backed. RMM 
doesn't provide any guarantees on accesses to the unprotected half. 
i.e., host could even inject a Synchronous External abort, if an MMIO
region is not understood by it.

2. Protected : The behaviour depends on the "IPA State" (only applicable 
for the protected IPAs). The possible states are RIPAS_RAM,  RIPAS_EMPTY 
and RIPAS_DESTROYED(not visible for the Realm).

The default IPA state is RIPAS_EMPTY for all IPA and the state is 
controlled by Realm with the help of RMM (serviced by Host), except
during the Realm initialisation. i.e., the Host is allowed to "mark"
some IPAs as RIPAS_RAM (for e.g., initial images loaded into the Realm)
during Realm initiliasation (via RMI_RTT_INIT_RIPAS), which gets
measured by the RMM (and affects the Realm Initial Measurement). After
the Realm is activated, the Host can only perform the changes requested
by the Realm (RMM monitors this). Hence, the Realm at boot up, marks all
of its "Described RAM" areas as RIPAS_RAM (via RSI_IPA_STATE_SET), so
that it can go ahead and acccess the RAM as normal.


a) RIPAS_EMPTY: Any access to an IPA in RIPAS_EMPTY generates a 
Synchronous External Abort back to the Realm. (In the future, this may
be serviced by another entity in the Realm).

b) RIPAS_RAM: A stage2 fault at a mapped entry triggers a Realm Exit to 
the Host (except Instruction Aborts) and the host is allowed to map a
page (that is "scrubbed" by RMM) at stage2 and continue the execution.

[ ---->8  You may skip this ---
  c) RIPAS_DESTROYED: An IPA is turned to RIPAS_DESTROYED, if the host
     "unmaps" a protected IPA in RIPAS_RAM (via RMI_DATA_DESTROY). This
     implies that the Realm contents were destroyed with no way of
     restoring back. Any access to such IPA from the Realm also causes
     a Realm EXIT, however, the Host is not allowed to map anything back
     there and thus the vCPU cannot proceed with the execution.
----<8----
]

> 
>> The RSI call (RSI_IPA_STATE_SET) doesn't make an IPA unprotected. It
>> simply "invalidates" a (protected) IPA to "EMPTY" implying the Realm doesn't
>> intend to use the "ipa" as RAM anymore and any access to it from
>> the Realm would trigger an SEA into the Realm. The RSI call triggers an exit
>> to the host with the information and is a hint to the hypervisor to reclaim
>> the page backing the IPA.
>>
>> Now, given we need dynamic "sharing" of pages (instead of a dedicated
>> set of shared pages), "aliasing" of an IPA gives us shared pages.
>> i.e., If OS wants to share a page "x" (protected IPA) with the host,
>> we mark that as EMPTY via RSI call and then access the "x" with top-bit
>> set (aliasing the IPA x). This fault allows the hyp to map the page backing
>> IPA "x" as "unprotected" at ALIAS(x) address.
> 
> Does the RMM sanity-checks that the NS hyp mappings are protected or
> unprotected depending on the IPA range?

RMM moderates the mappings in the protected half (as mentioned above).
Any mapping in the unprotected half is not monitored. The host is
allowed unmap, remap with anything in the unprotected half.

> 
> I assume that's also the case if the NS hyp is the first one to access a
> page before the realm (e.g. inbound virtio transfer; no page allocated
> yet because of a realm access).
> 

Correct. The host need not map anything upfront in the Unprotected half
as it is allowed to map a page "with contents" at any time. A stage2 
fault can be serviced by the host to load a page with contents.

Suzuki
Catalin Marinas May 20, 2024, 4:53 p.m. UTC | #10
On Wed, May 15, 2024 at 11:47:02AM +0100, Suzuki K Poulose wrote:
> On 14/05/2024 19:00, Catalin Marinas wrote:
> > On Fri, Apr 12, 2024 at 09:42:08AM +0100, Steven Price wrote:
> > >   static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
> > > @@ -41,6 +45,7 @@ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
> > >   	pte = clear_pte_bit(pte, cdata->clear_mask);
> > >   	pte = set_pte_bit(pte, cdata->set_mask);
> > > +	/* TODO: Break before make for PROT_NS_SHARED updates */
> > >   	__set_pte(ptep, pte);
> > >   	return 0;
> > 
> > Oh, this TODO is problematic, not sure we can do it safely. There are
> > some patches on the list to trap faults from other CPUs if they happen
> > to access the page when broken but so far we pushed back as complex and
> > at risk of getting the logic wrong.
> > 
> >  From an architecture perspective, you are changing the output address
> > and D8.16.1 requires a break-before-make sequence (FEAT_BBM doesn't
> > help). So we either come up with a way to do BMM safely (stop_machine()
> > maybe if it's not too expensive or some way to guarantee no accesses to
> > this page while being changed) or we get the architecture clarified on
> > the possible side-effects here ("unpredictable" doesn't help).
> 
> Thanks, we need to sort this out.

Thanks for the clarification on RIPAS states and behaviour in one of
your replies. Thinking about this, since the page is marked as
RIPAS_EMPTY prior to changing the PTE, the address is going to fault
anyway as SEA if accessed. So actually breaking the PTE, TLBI, setting
the new PTE would not add any new behaviour. Of course, this assumes
that set_memory_decrypted() is never called on memory being currently
accessed (can we guarantee this?).

So, we need to make sure that there is no access to the linear map
between set_memory_range_shared() and the actual pte update with
__change_memory_common() in set_memory_decrypted(). At a quick look,
this seems to be the case (ignoring memory scrubbers, though dummy ones
just accessing memory are not safe anyway and unlikely to run in a
guest).

(I did come across the hv_uio_probe() which, if I read correctly, it
ends up calling set_memory_decrypted() with a vmalloc() address; let's
pretend this code doesn't exist ;))
Michael Kelley May 20, 2024, 8:32 p.m. UTC | #11
From: Catalin Marinas <catalin.marinas@arm.com> Sent: Monday, May 20, 2024 9:53 AM
> 
> On Wed, May 15, 2024 at 11:47:02AM +0100, Suzuki K Poulose wrote:
> > On 14/05/2024 19:00, Catalin Marinas wrote:
> > > On Fri, Apr 12, 2024 at 09:42:08AM +0100, Steven Price wrote:
> > > >   static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
> > > > @@ -41,6 +45,7 @@ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
> > > >   	pte = clear_pte_bit(pte, cdata->clear_mask);
> > > >   	pte = set_pte_bit(pte, cdata->set_mask);
> > > > +	/* TODO: Break before make for PROT_NS_SHARED updates */
> > > >   	__set_pte(ptep, pte);
> > > >   	return 0;
> > >
> > > Oh, this TODO is problematic, not sure we can do it safely. There are
> > > some patches on the list to trap faults from other CPUs if they happen
> > > to access the page when broken but so far we pushed back as complex and
> > > at risk of getting the logic wrong.
> > >
> > >  From an architecture perspective, you are changing the output address
> > > and D8.16.1 requires a break-before-make sequence (FEAT_BBM doesn't
> > > help). So we either come up with a way to do BMM safely (stop_machine()
> > > maybe if it's not too expensive or some way to guarantee no accesses to
> > > this page while being changed) or we get the architecture clarified on
> > > the possible side-effects here ("unpredictable" doesn't help).
> >
> > Thanks, we need to sort this out.
> 
> Thanks for the clarification on RIPAS states and behaviour in one of
> your replies. Thinking about this, since the page is marked as
> RIPAS_EMPTY prior to changing the PTE, the address is going to fault
> anyway as SEA if accessed. So actually breaking the PTE, TLBI, setting
> the new PTE would not add any new behaviour. Of course, this assumes
> that set_memory_decrypted() is never called on memory being currently
> accessed (can we guarantee this?).

While I worked on CoCo VM support on Hyper-V for x86 -- both AMD
SEV-SNP and Intel TDX, I haven't ramped up on the ARM64 CoCo
VM architecture yet.  With that caveat in mind, the assumption is that callers
of set_memory_decrypted() and set_memory_encrypted() ensure that
the target memory isn't currently being accessed.   But there's a big
exception:  load_unaligned_zeropad() can generate accesses that the
caller can't control.  If load_unaligned_zeropad() touches a page that is
in transition between decrypted and encrypted, a SEV-SNP or TDX architectural
fault could occur.  On x86, those fault handlers detect this case, and
fix things up.  The Hyper-V case requires a different approach, and marks
the PTEs as "not present" before initiating a transition between decrypted
and encrypted, and marks the PTEs "present" again after the transition.
This approach causes a reference generated by load_unaligned_zeropad() 
to take the normal page fault route, and use the page-fault-based fixup for
load_unaligned_zeropad(). See commit 0f34d11234868 for the Hyper-V case.

> 
> So, we need to make sure that there is no access to the linear map
> between set_memory_range_shared() and the actual pte update with
> __change_memory_common() in set_memory_decrypted(). At a quick look,
> this seems to be the case (ignoring memory scrubbers, though dummy ones
> just accessing memory are not safe anyway and unlikely to run in a
> guest).
> 
> (I did come across the hv_uio_probe() which, if I read correctly, it
> ends up calling set_memory_decrypted() with a vmalloc() address; let's
> pretend this code doesn't exist ;))

While the Hyper-V UIO driver is perhaps a bit of an outlier, the Hyper-V
netvsc driver also does set_memory_decrypted() on 16 Mbyte vmalloc()
allocations, and there's not really a viable way to avoid this. The
SEV-SNP and TDX code handles this case.   Support for this case will
probably also be needed for CoCo guests on Hyper-V on ARM64.

Michael
Catalin Marinas May 21, 2024, 10:14 a.m. UTC | #12
On Mon, May 20, 2024 at 08:32:43PM +0000, Michael Kelley wrote:
> From: Catalin Marinas <catalin.marinas@arm.com> Sent: Monday, May 20, 2024 9:53 AM
> > > > On Fri, Apr 12, 2024 at 09:42:08AM +0100, Steven Price wrote:
> > > > >   static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
> > > > > @@ -41,6 +45,7 @@ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
> > > > >   	pte = clear_pte_bit(pte, cdata->clear_mask);
> > > > >   	pte = set_pte_bit(pte, cdata->set_mask);
> > > > > +	/* TODO: Break before make for PROT_NS_SHARED updates */
> > > > >   	__set_pte(ptep, pte);
> > > > >   	return 0;
[...]
> > Thanks for the clarification on RIPAS states and behaviour in one of
> > your replies. Thinking about this, since the page is marked as
> > RIPAS_EMPTY prior to changing the PTE, the address is going to fault
> > anyway as SEA if accessed. So actually breaking the PTE, TLBI, setting
> > the new PTE would not add any new behaviour. Of course, this assumes
> > that set_memory_decrypted() is never called on memory being currently
> > accessed (can we guarantee this?).
> 
> While I worked on CoCo VM support on Hyper-V for x86 -- both AMD
> SEV-SNP and Intel TDX, I haven't ramped up on the ARM64 CoCo
> VM architecture yet.  With that caveat in mind, the assumption is that callers
> of set_memory_decrypted() and set_memory_encrypted() ensure that
> the target memory isn't currently being accessed.   But there's a big
> exception:  load_unaligned_zeropad() can generate accesses that the
> caller can't control.  If load_unaligned_zeropad() touches a page that is
> in transition between decrypted and encrypted, a SEV-SNP or TDX architectural
> fault could occur.  On x86, those fault handlers detect this case, and
> fix things up.  The Hyper-V case requires a different approach, and marks
> the PTEs as "not present" before initiating a transition between decrypted
> and encrypted, and marks the PTEs "present" again after the transition.

Thanks. The load_unaligned_zeropad() case is a good point. I thought
we'd get away with this on arm64 since accessing such decrypted page
would trigger a synchronous exception but looking at the code, the
do_sea() path never calls fixup_exception(), so we just kill the whole
kernel.

> This approach causes a reference generated by load_unaligned_zeropad() 
> to take the normal page fault route, and use the page-fault-based fixup for
> load_unaligned_zeropad(). See commit 0f34d11234868 for the Hyper-V case.

I think for arm64 set_memory_decrypted() (and encrypted) would have to
first make the PTE invalid, TLBI, set the RIPAS_EMPTY state, set the new
PTE. Any page fault due to invalid PTE would be handled by the exception
fixup in load_unaligned_zeropad(). This way we wouldn't get any
synchronous external abort (SEA) in standard uses. Not sure we need to
do anything hyper-v specific as in the commit above.

> > (I did come across the hv_uio_probe() which, if I read correctly, it
> > ends up calling set_memory_decrypted() with a vmalloc() address; let's
> > pretend this code doesn't exist ;))
> 
> While the Hyper-V UIO driver is perhaps a bit of an outlier, the Hyper-V
> netvsc driver also does set_memory_decrypted() on 16 Mbyte vmalloc()
> allocations, and there's not really a viable way to avoid this. The
> SEV-SNP and TDX code handles this case.   Support for this case will
> probably also be needed for CoCo guests on Hyper-V on ARM64.

Ah, I was hoping we can ignore it. So the arm64 set_memory_*() code will
have to detect and change both the vmalloc map and the linear map.
Currently this patchset assumes the latter only.

Such buffers may end up in user space as well but I think at the
set_memory_decrypted() call there aren't any such mappings and
subsequent remap_pfn_range() etc. would handle the permissions properly
through the vma->vm_page_prot attributes (assuming that someone set
those pgprot attributes).
Michael Kelley May 21, 2024, 3:58 p.m. UTC | #13
From: Catalin Marinas <catalin.marinas@arm.com>Sent: Tuesday, May 21, 2024 3:14 AM
> 
> On Mon, May 20, 2024 at 08:32:43PM +0000, Michael Kelley wrote:
> > From: Catalin Marinas <catalin.marinas@arm.com> Sent: Monday, May 20, 2024 9:53 AM
> > > > > On Fri, Apr 12, 2024 at 09:42:08AM +0100, Steven Price wrote:
> > > > > >   static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
> > > > > > @@ -41,6 +45,7 @@ static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
> > > > > >   	pte = clear_pte_bit(pte, cdata->clear_mask);
> > > > > >   	pte = set_pte_bit(pte, cdata->set_mask);
> > > > > > +	/* TODO: Break before make for PROT_NS_SHARED updates */
> > > > > >   	__set_pte(ptep, pte);
> > > > > >   	return 0;
> [...]
> > > Thanks for the clarification on RIPAS states and behaviour in one of
> > > your replies. Thinking about this, since the page is marked as
> > > RIPAS_EMPTY prior to changing the PTE, the address is going to fault
> > > anyway as SEA if accessed. So actually breaking the PTE, TLBI, setting
> > > the new PTE would not add any new behaviour. Of course, this assumes
> > > that set_memory_decrypted() is never called on memory being currently
> > > accessed (can we guarantee this?).
> >
> > While I worked on CoCo VM support on Hyper-V for x86 -- both AMD
> > SEV-SNP and Intel TDX, I haven't ramped up on the ARM64 CoCo
> > VM architecture yet.  With that caveat in mind, the assumption is that callers
> > of set_memory_decrypted() and set_memory_encrypted() ensure that
> > the target memory isn't currently being accessed.   But there's a big
> > exception:  load_unaligned_zeropad() can generate accesses that the
> > caller can't control.  If load_unaligned_zeropad() touches a page that is
> > in transition between decrypted and encrypted, a SEV-SNP or TDX architectural
> > fault could occur.  On x86, those fault handlers detect this case, and
> > fix things up.  The Hyper-V case requires a different approach, and marks
> > the PTEs as "not present" before initiating a transition between decrypted
> > and encrypted, and marks the PTEs "present" again after the transition.
> 
> Thanks. The load_unaligned_zeropad() case is a good point. I thought
> we'd get away with this on arm64 since accessing such decrypted page
> would trigger a synchronous exception but looking at the code, the
> do_sea() path never calls fixup_exception(), so we just kill the whole
> kernel.
> 
> > This approach causes a reference generated by load_unaligned_zeropad()
> > to take the normal page fault route, and use the page-fault-based fixup for
> > load_unaligned_zeropad(). See commit 0f34d11234868 for the Hyper-V case.
> 
> I think for arm64 set_memory_decrypted() (and encrypted) would have to
> first make the PTE invalid, TLBI, set the RIPAS_EMPTY state, set the new
> PTE. Any page fault due to invalid PTE would be handled by the exception
> fixup in load_unaligned_zeropad(). This way we wouldn't get any
> synchronous external abort (SEA) in standard uses. Not sure we need to
> do anything hyper-v specific as in the commit above.

Sounds good to me. I tried to do the same for all the x86 cases (instead of
just handling the Hyper-V paravisor), since that would completely decouple
TDX/SEV-SNP from load_unaligned_zeropad(). It worked for TDX. But
SEV-SNP does the PVALIDATE instruction during a decrypted<->encrypted
transition, and PVALIDATE inconveniently requires the virtual address as
input. It only uses the vaddr to translate to the paddr, but with the vaddr
PTE "not present", PVALIDATE fails. Sigh. This problem will probably come
back again when/if Coconut or any other paravisor redirects #VC/#VE to
the paravisor. But I disgress ....

> 
> > > (I did come across the hv_uio_probe() which, if I read correctly, it
> > > ends up calling set_memory_decrypted() with a vmalloc() address; let's
> > > pretend this code doesn't exist ;))
> >
> > While the Hyper-V UIO driver is perhaps a bit of an outlier, the Hyper-V
> > netvsc driver also does set_memory_decrypted() on 16 Mbyte vmalloc()
> > allocations, and there's not really a viable way to avoid this. The
> > SEV-SNP and TDX code handles this case.   Support for this case will
> > probably also be needed for CoCo guests on Hyper-V on ARM64.
> 
> Ah, I was hoping we can ignore it. So the arm64 set_memory_*() code will
> have to detect and change both the vmalloc map and the linear map.

Yep.

> Currently this patchset assumes the latter only.
> 
> Such buffers may end up in user space as well but I think at the
> set_memory_decrypted() call there aren't any such mappings and
> subsequent remap_pfn_range() etc. would handle the permissions properly
> through the vma->vm_page_prot attributes (assuming that someone set
> those pgprot attributes).

Yes, I'm pretty sure that's what we've seen on the x86 side.

Michael
diff mbox series

Patch

diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 7b11c98b3e84..ffd4685a3029 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -20,6 +20,7 @@  config ARM64
 	select ARCH_ENABLE_SPLIT_PMD_PTLOCK if PGTABLE_LEVELS > 2
 	select ARCH_ENABLE_THP_MIGRATION if TRANSPARENT_HUGEPAGE
 	select ARCH_HAS_CACHE_LINE_SIZE
+	select ARCH_HAS_CC_PLATFORM
 	select ARCH_HAS_CURRENT_STACK_POINTER
 	select ARCH_HAS_DEBUG_VIRTUAL
 	select ARCH_HAS_DEBUG_VM_PGTABLE
@@ -40,6 +41,8 @@  config ARM64
 	select ARCH_HAS_SETUP_DMA_OPS
 	select ARCH_HAS_SET_DIRECT_MAP
 	select ARCH_HAS_SET_MEMORY
+	select ARCH_HAS_MEM_ENCRYPT
+	select ARCH_HAS_FORCE_DMA_UNENCRYPTED
 	select ARCH_STACKWALK
 	select ARCH_HAS_STRICT_KERNEL_RWX
 	select ARCH_HAS_STRICT_MODULE_RWX
diff --git a/arch/arm64/include/asm/mem_encrypt.h b/arch/arm64/include/asm/mem_encrypt.h
new file mode 100644
index 000000000000..7381f9585321
--- /dev/null
+++ b/arch/arm64/include/asm/mem_encrypt.h
@@ -0,0 +1,19 @@ 
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 ARM Ltd.
+ */
+
+#ifndef __ASM_MEM_ENCRYPT_H
+#define __ASM_MEM_ENCRYPT_H
+
+#include <asm/rsi.h>
+
+/* All DMA must be to non-secure memory for now */
+static inline bool force_dma_unencrypted(struct device *dev)
+{
+	return is_realm_world();
+}
+
+int set_memory_encrypted(unsigned long addr, int numpages);
+int set_memory_decrypted(unsigned long addr, int numpages);
+#endif
diff --git a/arch/arm64/kernel/rsi.c b/arch/arm64/kernel/rsi.c
index 5c8ed3aaa35f..ba3f346e7a91 100644
--- a/arch/arm64/kernel/rsi.c
+++ b/arch/arm64/kernel/rsi.c
@@ -6,6 +6,7 @@ 
 #include <linux/jump_label.h>
 #include <linux/memblock.h>
 #include <linux/swiotlb.h>
+#include <linux/cc_platform.h>
 
 #include <asm/rsi.h>
 
@@ -19,6 +20,17 @@  unsigned int phys_mask_shift = CONFIG_ARM64_PA_BITS;
 DEFINE_STATIC_KEY_FALSE_RO(rsi_present);
 EXPORT_SYMBOL(rsi_present);
 
+bool cc_platform_has(enum cc_attr attr)
+{
+	switch (attr) {
+	case CC_ATTR_MEM_ENCRYPT:
+		return is_realm_world();
+	default:
+		return false;
+	}
+}
+EXPORT_SYMBOL_GPL(cc_platform_has);
+
 static bool rsi_version_matches(void)
 {
 	unsigned long ver;
diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c
index 0c4e3ecf989d..229b6d9990f5 100644
--- a/arch/arm64/mm/pageattr.c
+++ b/arch/arm64/mm/pageattr.c
@@ -5,10 +5,12 @@ 
 #include <linux/kernel.h>
 #include <linux/mm.h>
 #include <linux/module.h>
+#include <linux/mem_encrypt.h>
 #include <linux/sched.h>
 #include <linux/vmalloc.h>
 
 #include <asm/cacheflush.h>
+#include <asm/pgtable-prot.h>
 #include <asm/set_memory.h>
 #include <asm/tlbflush.h>
 #include <asm/kfence.h>
@@ -23,14 +25,16 @@  bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED
 bool can_set_direct_map(void)
 {
 	/*
-	 * rodata_full and DEBUG_PAGEALLOC require linear map to be
-	 * mapped at page granularity, so that it is possible to
+	 * rodata_full, DEBUG_PAGEALLOC and a Realm guest all require linear
+	 * map to be mapped at page granularity, so that it is possible to
 	 * protect/unprotect single pages.
 	 *
 	 * KFENCE pool requires page-granular mapping if initialized late.
+	 *
+	 * Realms need to make pages shared/protected at page granularity.
 	 */
 	return rodata_full || debug_pagealloc_enabled() ||
-	       arm64_kfence_can_set_direct_map();
+		arm64_kfence_can_set_direct_map() || is_realm_world();
 }
 
 static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
@@ -41,6 +45,7 @@  static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
 	pte = clear_pte_bit(pte, cdata->clear_mask);
 	pte = set_pte_bit(pte, cdata->set_mask);
 
+	/* TODO: Break before make for PROT_NS_SHARED updates */
 	__set_pte(ptep, pte);
 	return 0;
 }
@@ -192,6 +197,43 @@  int set_direct_map_default_noflush(struct page *page)
 				   PAGE_SIZE, change_page_range, &data);
 }
 
+static int __set_memory_encrypted(unsigned long addr,
+				  int numpages,
+				  bool encrypt)
+{
+	unsigned long set_prot = 0, clear_prot = 0;
+	phys_addr_t start, end;
+
+	if (!is_realm_world())
+		return 0;
+
+	WARN_ON(!__is_lm_address(addr));
+	start = __virt_to_phys(addr);
+	end = start + numpages * PAGE_SIZE;
+
+	if (encrypt) {
+		clear_prot = PROT_NS_SHARED;
+		set_memory_range_protected(start, end);
+	} else {
+		set_prot = PROT_NS_SHARED;
+		set_memory_range_shared(start, end);
+	}
+
+	return __change_memory_common(addr, PAGE_SIZE * numpages,
+				      __pgprot(set_prot),
+				      __pgprot(clear_prot));
+}
+
+int set_memory_encrypted(unsigned long addr, int numpages)
+{
+	return __set_memory_encrypted(addr, numpages, true);
+}
+
+int set_memory_decrypted(unsigned long addr, int numpages)
+{
+	return __set_memory_encrypted(addr, numpages, false);
+}
+
 #ifdef CONFIG_DEBUG_PAGEALLOC
 void __kernel_map_pages(struct page *page, int numpages, int enable)
 {