@@ -173,8 +173,52 @@ void __init sme_early_init(void)
/* Update the protection map with memory encryption mask */
for (i = 0; i < ARRAY_SIZE(protection_map); i++)
protection_map[i] = __pgprot(pgprot_val(protection_map[i]) | sme_me_mask);
+
+ if (sev_active)
+ swiotlb_force = 1;
}
+static void *sme_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
+ gfp_t gfp, unsigned long attrs)
+{
+ void *vaddr;
+
+ vaddr = x86_swiotlb_alloc_coherent(dev, size, dma_handle, gfp, attrs);
+ if (!vaddr)
+ return NULL;
+
+ /* Clear the SME encryption bit for DMA use */
+ sme_set_mem_dec(vaddr, size);
+
+ /* Remove the encryption bit from the DMA address */
+ *dma_handle &= ~sme_me_mask;
+
+ return vaddr;
+}
+
+static void sme_free(struct device *dev, size_t size, void *vaddr,
+ dma_addr_t dma_handle, unsigned long attrs)
+{
+ /* Set the SME encryption bit for re-use as encrypted */
+ sme_set_mem_enc(vaddr, size);
+
+ x86_swiotlb_free_coherent(dev, size, vaddr, dma_handle, attrs);
+}
+
+static struct dma_map_ops sme_dma_ops = {
+ .alloc = sme_alloc,
+ .free = sme_free,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
+ .sync_single_for_device = swiotlb_sync_single_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device,
+ .mapping_error = swiotlb_dma_mapping_error,
+};
+
/* Architecture __weak replacement functions */
void __init mem_encrypt_init(void)
{
@@ -184,6 +228,10 @@ void __init mem_encrypt_init(void)
/* Make SWIOTLB use an unencrypted DMA area */
swiotlb_clear_encryption();
+ /* Use SEV DMA operations if SEV is active */
+ if (sev_active)
+ dma_ops = &sme_dma_ops;
+
pr_info("memory encryption active\n");
}