Message ID | 20200611222128.28826-5-jcrouse@codeaurora.org (mailing list archive) |
---|---|
State | Superseded |
Headers | show |
Series | iommu/arm-smmu: Enable split pagetable support | expand |
Hi Jordan, Thank you for the patch! Yet something to improve: [auto build test ERROR on linus/master] [also build test ERROR on v5.7 next-20200611] [cannot apply to iommu/next robh/for-next arm/for-next keystone/next rockchip/for-next arm64/for-next/core shawnguo/for-next soc/for-next] [if your patch is applied to the wrong git tree, please drop us a note to help improve the system. BTW, we also suggest to use '--base' option to specify the base tree in git format-patch, please see https://stackoverflow.com/a/37406982] url: https://github.com/0day-ci/linux/commits/Jordan-Crouse/iommu-arm-smmu-Enable-split-pagetable-support/20200612-094718 base: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git b961f8dc8976c091180839f4483d67b7c2ca2578 config: arm64-randconfig-s031-20200612 (attached as .config) compiler: aarch64-linux-gcc (GCC) 9.3.0 reproduce: # apt-get install sparse # sparse version: v0.6.1-250-g42323db3-dirty # save the attached .config to linux build tree make W=1 C=1 ARCH=arm64 CF='-fdiagnostic-prefix -D__CHECK_ENDIAN__' If you fix the issue, kindly add following tag as appropriate Reported-by: kernel test robot <lkp@intel.com> All errors (new ones prefixed by >>, old ones prefixed by <<): drivers/iommu/arm-smmu.c: In function 'arm_smmu_init_domain_context': >> drivers/iommu/arm-smmu.c:804:21: error: 'dev' undeclared (first use in this function); did you mean 'cdev'? 804 | smmu_domain->dev = dev; | ^~~ | cdev drivers/iommu/arm-smmu.c:804:21: note: each undeclared identifier is reported only once for each function it appears in vim +804 drivers/iommu/arm-smmu.c 669 670 static int arm_smmu_init_domain_context(struct iommu_domain *domain, 671 struct arm_smmu_device *smmu) 672 { 673 int irq, start, ret = 0; 674 unsigned long ias, oas; 675 struct io_pgtable_ops *pgtbl_ops; 676 struct io_pgtable_cfg pgtbl_cfg; 677 enum io_pgtable_fmt fmt; 678 struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); 679 struct arm_smmu_cfg *cfg = &smmu_domain->cfg; 680 681 mutex_lock(&smmu_domain->init_mutex); 682 if (smmu_domain->smmu) 683 goto out_unlock; 684 685 if (domain->type == IOMMU_DOMAIN_IDENTITY) { 686 smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS; 687 smmu_domain->smmu = smmu; 688 goto out_unlock; 689 } 690 691 /* 692 * Mapping the requested stage onto what we support is surprisingly 693 * complicated, mainly because the spec allows S1+S2 SMMUs without 694 * support for nested translation. That means we end up with the 695 * following table: 696 * 697 * Requested Supported Actual 698 * S1 N S1 699 * S1 S1+S2 S1 700 * S1 S2 S2 701 * S1 S1 S1 702 * N N N 703 * N S1+S2 S2 704 * N S2 S2 705 * N S1 S1 706 * 707 * Note that you can't actually request stage-2 mappings. 708 */ 709 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) 710 smmu_domain->stage = ARM_SMMU_DOMAIN_S2; 711 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) 712 smmu_domain->stage = ARM_SMMU_DOMAIN_S1; 713 714 /* 715 * Choosing a suitable context format is even more fiddly. Until we 716 * grow some way for the caller to express a preference, and/or move 717 * the decision into the io-pgtable code where it arguably belongs, 718 * just aim for the closest thing to the rest of the system, and hope 719 * that the hardware isn't esoteric enough that we can't assume AArch64 720 * support to be a superset of AArch32 support... 721 */ 722 if (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_L) 723 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_L; 724 if (IS_ENABLED(CONFIG_IOMMU_IO_PGTABLE_ARMV7S) && 725 !IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_ARM_LPAE) && 726 (smmu->features & ARM_SMMU_FEAT_FMT_AARCH32_S) && 727 (smmu_domain->stage == ARM_SMMU_DOMAIN_S1)) 728 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH32_S; 729 if ((IS_ENABLED(CONFIG_64BIT) || cfg->fmt == ARM_SMMU_CTX_FMT_NONE) && 730 (smmu->features & (ARM_SMMU_FEAT_FMT_AARCH64_64K | 731 ARM_SMMU_FEAT_FMT_AARCH64_16K | 732 ARM_SMMU_FEAT_FMT_AARCH64_4K))) 733 cfg->fmt = ARM_SMMU_CTX_FMT_AARCH64; 734 735 if (cfg->fmt == ARM_SMMU_CTX_FMT_NONE) { 736 ret = -EINVAL; 737 goto out_unlock; 738 } 739 740 switch (smmu_domain->stage) { 741 case ARM_SMMU_DOMAIN_S1: 742 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; 743 start = smmu->num_s2_context_banks; 744 ias = smmu->va_size; 745 oas = smmu->ipa_size; 746 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { 747 fmt = ARM_64_LPAE_S1; 748 } else if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH32_L) { 749 fmt = ARM_32_LPAE_S1; 750 ias = min(ias, 32UL); 751 oas = min(oas, 40UL); 752 } else { 753 fmt = ARM_V7S; 754 ias = min(ias, 32UL); 755 oas = min(oas, 32UL); 756 } 757 smmu_domain->flush_ops = &arm_smmu_s1_tlb_ops; 758 break; 759 case ARM_SMMU_DOMAIN_NESTED: 760 /* 761 * We will likely want to change this if/when KVM gets 762 * involved. 763 */ 764 case ARM_SMMU_DOMAIN_S2: 765 cfg->cbar = CBAR_TYPE_S2_TRANS; 766 start = 0; 767 ias = smmu->ipa_size; 768 oas = smmu->pa_size; 769 if (cfg->fmt == ARM_SMMU_CTX_FMT_AARCH64) { 770 fmt = ARM_64_LPAE_S2; 771 } else { 772 fmt = ARM_32_LPAE_S2; 773 ias = min(ias, 40UL); 774 oas = min(oas, 40UL); 775 } 776 if (smmu->version == ARM_SMMU_V2) 777 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v2; 778 else 779 smmu_domain->flush_ops = &arm_smmu_s2_tlb_ops_v1; 780 break; 781 default: 782 ret = -EINVAL; 783 goto out_unlock; 784 } 785 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, 786 smmu->num_context_banks); 787 if (ret < 0) 788 goto out_unlock; 789 790 cfg->cbndx = ret; 791 if (smmu->version < ARM_SMMU_V2) { 792 cfg->irptndx = atomic_inc_return(&smmu->irptndx); 793 cfg->irptndx %= smmu->num_context_irqs; 794 } else { 795 cfg->irptndx = cfg->cbndx; 796 } 797 798 if (smmu_domain->stage == ARM_SMMU_DOMAIN_S2) 799 cfg->vmid = cfg->cbndx + 1; 800 else 801 cfg->asid = cfg->cbndx; 802 803 smmu_domain->smmu = smmu; > 804 smmu_domain->dev = dev; 805 806 pgtbl_cfg = (struct io_pgtable_cfg) { 807 .pgsize_bitmap = smmu->pgsize_bitmap, 808 .ias = ias, 809 .oas = oas, 810 .coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENT_WALK, 811 .tlb = smmu_domain->flush_ops, 812 .iommu_dev = smmu->dev, 813 }; 814 815 if (smmu->impl && smmu->impl->init_context) { 816 ret = smmu->impl->init_context(smmu_domain, &pgtbl_cfg); 817 if (ret) 818 goto out_unlock; 819 } 820 821 if (smmu_domain->non_strict) 822 pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_NON_STRICT; 823 824 pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); 825 if (!pgtbl_ops) { 826 ret = -ENOMEM; 827 goto out_clear_smmu; 828 } 829 830 /* Update the domain's page sizes to reflect the page table format */ 831 domain->pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; 832 833 if (pgtbl_cfg.quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) { 834 domain->geometry.aperture_start = ~0UL << ias; 835 domain->geometry.aperture_end = ~0UL; 836 } else { 837 domain->geometry.aperture_end = (1UL << ias) - 1; 838 } 839 840 domain->geometry.force_aperture = true; 841 842 /* Initialise the context bank with our page table cfg */ 843 arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); 844 arm_smmu_write_context_bank(smmu, cfg->cbndx); 845 846 /* 847 * Request context fault interrupt. Do this last to avoid the 848 * handler seeing a half-initialised domain state. 849 */ 850 irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; 851 ret = devm_request_irq(smmu->dev, irq, arm_smmu_context_fault, 852 IRQF_SHARED, "arm-smmu-context-fault", domain); 853 if (ret < 0) { 854 dev_err(smmu->dev, "failed to request context IRQ %d (%u)\n", 855 cfg->irptndx, irq); 856 cfg->irptndx = ARM_SMMU_INVALID_IRPTNDX; 857 } 858 859 mutex_unlock(&smmu_domain->init_mutex); 860 861 /* Publish page table ops for map/unmap */ 862 smmu_domain->pgtbl_ops = pgtbl_ops; 863 return 0; 864 865 out_clear_smmu: 866 __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); 867 smmu_domain->smmu = NULL; 868 out_unlock: 869 mutex_unlock(&smmu_domain->init_mutex); 870 return ret; 871 } 872 --- 0-DAY CI Kernel Test Service, Intel Corporation https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c index 048de2681670..743d75b9ff3f 100644 --- a/drivers/iommu/arm-smmu.c +++ b/drivers/iommu/arm-smmu.c @@ -801,6 +801,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, cfg->asid = cfg->cbndx; smmu_domain->smmu = smmu; + smmu_domain->dev = dev; pgtbl_cfg = (struct io_pgtable_cfg) { .pgsize_bitmap = smmu->pgsize_bitmap, diff --git a/drivers/iommu/arm-smmu.h b/drivers/iommu/arm-smmu.h index 5f2de20e883b..d33cfe26b2f5 100644 --- a/drivers/iommu/arm-smmu.h +++ b/drivers/iommu/arm-smmu.h @@ -345,6 +345,7 @@ struct arm_smmu_domain { struct mutex init_mutex; /* Protects smmu pointer */ spinlock_t cb_lock; /* Serialises ATS1* ops and TLB syncs */ struct iommu_domain domain; + struct device *dev; /* Device attached to this domain */ }; static inline u32 arm_smmu_lpae_tcr(struct io_pgtable_cfg *cfg)
Add a link to the pointer to the struct device that is attached to a domain. This makes it easy to get the pointer if it is needed in the implementation specific code. Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org> --- drivers/iommu/arm-smmu.c | 1 + drivers/iommu/arm-smmu.h | 1 + 2 files changed, 2 insertions(+)