/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include #include #include #include #include #include #include #include #include #include #include #include #include #include "msm_iommu_hw-8xxx.h" #include "msm_iommu.h" #define MRC(reg, processor, op1, crn, crm, op2) \ __asm__ __volatile__ ( \ " mrc " #processor "," #op1 ", %0," #crn "," #crm "," #op2 "\n" \ : "=r" (reg)) #define RCP15_PRRR(reg) MRC(reg, p15, 0, c10, c2, 0) #define RCP15_NMRR(reg) MRC(reg, p15, 0, c10, c2, 1) /* bitmap of the page sizes currently supported */ #define MSM_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) static int msm_iommu_tex_class[4]; DEFINE_SPINLOCK(msm_iommu_lock); struct msm_priv { unsigned long *pgtable; struct list_head list_attached; struct iommu_domain domain; }; static struct msm_priv *to_msm_priv(struct iommu_domain *dom) { return container_of(dom, struct msm_priv, domain); } static int __enable_clocks(struct msm_iommu_drvdata *drvdata) { int ret; ret = clk_enable(drvdata->pclk); if (ret) goto fail; if (drvdata->clk) { ret = clk_enable(drvdata->clk); if (ret) clk_disable(drvdata->pclk); } fail: return ret; } static void __disable_clocks(struct msm_iommu_drvdata *drvdata) { clk_disable(drvdata->clk); clk_disable(drvdata->pclk); } static int __flush_iotlb(struct iommu_domain *domain) { struct msm_priv *priv = to_msm_priv(domain); struct msm_iommu_drvdata *iommu_drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata; int ret = 0; #ifndef CONFIG_IOMMU_PGTABLES_L2 unsigned long *fl_table = priv->pgtable; int i; if (!list_empty(&priv->list_attached)) { dmac_flush_range(fl_table, fl_table + SZ_16K); for (i = 0; i < NUM_FL_PTE; i++) if ((fl_table[i] & 0x03) == FL_TYPE_TABLE) { void *sl_table = __va(fl_table[i] & FL_BASE_MASK); dmac_flush_range(sl_table, sl_table + SZ_4K); } } #endif list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent); iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); BUG_ON(!iommu_drvdata); ret = __enable_clocks(iommu_drvdata); if (ret) goto fail; SET_CTX_TLBIALL(iommu_drvdata->base, ctx_drvdata->num, 0); __disable_clocks(iommu_drvdata); } fail: return ret; } static void __reset_context(void __iomem *base, int ctx) { SET_BPRCOSH(base, ctx, 0); SET_BPRCISH(base, ctx, 0); SET_BPRCNSH(base, ctx, 0); SET_BPSHCFG(base, ctx, 0); SET_BPMTCFG(base, ctx, 0); SET_ACTLR(base, ctx, 0); SET_SCTLR(base, ctx, 0); SET_FSRRESTORE(base, ctx, 0); SET_TTBR0(base, ctx, 0); SET_TTBR1(base, ctx, 0); SET_TTBCR(base, ctx, 0); SET_BFBCR(base, ctx, 0); SET_PAR(base, ctx, 0); SET_FAR(base, ctx, 0); SET_CTX_TLBIALL(base, ctx, 0); SET_TLBFLPTER(base, ctx, 0); SET_TLBSLPTER(base, ctx, 0); SET_TLBLKCR(base, ctx, 0); SET_PRRR(base, ctx, 0); SET_NMRR(base, ctx, 0); } static void __program_context(void __iomem *base, int ctx, phys_addr_t pgtable) { unsigned int prrr, nmrr; __reset_context(base, ctx); /* Set up HTW mode */ /* TLB miss configuration: perform HTW on miss */ SET_TLBMCFG(base, ctx, 0x3); /* V2P configuration: HTW for access */ SET_V2PCFG(base, ctx, 0x3); SET_TTBCR(base, ctx, 0); SET_TTBR0_PA(base, ctx, (pgtable >> 14)); /* Invalidate the TLB for this context */ SET_CTX_TLBIALL(base, ctx, 0); /* Set interrupt number to "secure" interrupt */ SET_IRPTNDX(base, ctx, 0); /* Enable context fault interrupt */ SET_CFEIE(base, ctx, 1); /* Stall access on a context fault and let the handler deal with it */ SET_CFCFG(base, ctx, 1); /* Redirect all cacheable requests to L2 slave port. */ SET_RCISH(base, ctx, 1); SET_RCOSH(base, ctx, 1); SET_RCNSH(base, ctx, 1);
outputs:
  PublicIP:
    description: Address for registering endpoints in the cloud.
    value: {get_attr: [undercloud_VLANPort, fixed_ips, 0, ip_address]}
resources:
  # Override the main template which can also supply a static route.
  undercloud_99VLANPort:
    type: OS::Heat::StructuredDeployment
    properties:
      config: {get_resource: undercloudVLANPortConfig}
      server: {get_resource: undercloud}
      signal_transport: NO_SIGNAL
  undercloudVLANPortConfig:
    type: OS::Heat::StructuredConfig
    properties:
      config:
        neutron:
          ovs:
            public_interface_tag_ip:
              Fn::Join:
              - '/'
              - - {get_attr: [undercloud_VLANPort, fixed_ips, 0, ip_address]}
                - '24'
                # This should also be pulled out of the subnet. May need a
                # neutron fix too - XXX make into a parameter and feed it
                # in via _undercloud.sh for now.
            # Tell the instance to apply the default route.
            # Reinstate when https://bugs.launchpad.net/heat/+bug/1336656 is
            # sorted
            # public_interface_route:
            #   get_attr: [undercloud_VLANPort, fixed_ips, 0, subnet, gateway_ip]
  undercloud_VLANPort:
    type: OS::Neutron::Port