Discussion:
[PATCH v2 2/8] ARM: NOMMU: Update MPU accessors to use cp15 helpers
Vladimir Murzin
2017-07-21 13:12:30 UTC
Permalink
Currently, inline assembly for accessing to MPU's cp15 lacks volatile
keyword which opens possibility to compiler to optimise such accesses
as soon as we start using them more intensively. Rather than fixing
inline asm, lets move MPU accessors to use cp15 helpers which do the
right thing.

Tested-by: Szemző András <***@esh.hu>
Signed-off-by: Vladimir Murzin <***@arm.com>
---
arch/arm/mm/pmsa-v7.c | 48 ++++++++++++++++++++++++++----------------------
1 file changed, 26 insertions(+), 22 deletions(-)

diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
index ee3cf51..5b55f8f 100644
--- a/arch/arm/mm/pmsa-v7.c
+++ b/arch/arm/mm/pmsa-v7.c
@@ -12,63 +12,67 @@

#include "mm.h"

+#define DRBAR __ACCESS_CP15(c6, 0, c1, 0)
+#define IRBAR __ACCESS_CP15(c6, 0, c1, 1)
+#define DRSR __ACCESS_CP15(c6, 0, c1, 2)
+#define IRSR __ACCESS_CP15(c6, 0, c1, 3)
+#define DRACR __ACCESS_CP15(c6, 0, c1, 4)
+#define IRACR __ACCESS_CP15(c6, 0, c1, 5)
+#define RNGNR __ACCESS_CP15(c6, 0, c2, 0)
+
/* Region number */
-static void rgnr_write(u32 v)
+static inline void rgnr_write(u32 v)
{
- asm("mcr p15, 0, %0, c6, c2, 0" : : "r" (v));
+ write_sysreg(v, RNGNR);
}

/* Data-side / unified region attributes */

/* Region access control register */
-static void dracr_write(u32 v)
+static inline void dracr_write(u32 v)
{
- asm("mcr p15, 0, %0, c6, c1, 4" : : "r" (v));
+ write_sysreg(v, DRACR);
}

/* Region size register */
-static void drsr_write(u32 v)
+static inline void drsr_write(u32 v)
{
- asm("mcr p15, 0, %0, c6, c1, 2" : : "r" (v));
+ write_sysreg(v, DRSR);
}

/* Region base address register */
-static void drbar_write(u32 v)
+static inline void drbar_write(u32 v)
{
- asm("mcr p15, 0, %0, c6, c1, 0" : : "r" (v));
+ write_sysreg(v, DRBAR);
}

-static u32 drbar_read(void)
+static inline u32 drbar_read(void)
{
- u32 v;
- asm("mrc p15, 0, %0, c6, c1, 0" : "=r" (v));
- return v;
+ return read_sysreg(DRBAR);
}
/* Optional instruction-side region attributes */

/* I-side Region access control register */
-static void iracr_write(u32 v)
+static inline void iracr_write(u32 v)
{
- asm("mcr p15, 0, %0, c6, c1, 5" : : "r" (v));
+ write_sysreg(v, IRACR);
}

/* I-side Region size register */
-static void irsr_write(u32 v)
+static inline void irsr_write(u32 v)
{
- asm("mcr p15, 0, %0, c6, c1, 3" : : "r" (v));
+ write_sysreg(v, IRSR);
}

/* I-side Region base address register */
-static void irbar_write(u32 v)
+static inline void irbar_write(u32 v)
{
- asm("mcr p15, 0, %0, c6, c1, 1" : : "r" (v));
+ write_sysreg(v, IRBAR);
}

-static unsigned long irbar_read(void)
+static inline u32 irbar_read(void)
{
- unsigned long v;
- asm("mrc p15, 0, %0, c6, c1, 1" : "=r" (v));
- return v;
+ return read_sysreg(IRBAR);
}

/* MPU initialisation functions */
--
2.0.0
Vladimir Murzin
2017-07-21 13:12:31 UTC
Permalink
Currently, there are several issues with how MPU is setup:

1. We won't boot if MPU is missing
2. We won't boot if use XIP
3. Further extension of MPU setup requires asm skills

The 1st point can be relaxed, so we can continue with boot CPU even if
MPU is missed and fail boot for secondaries only. To address the 2nd
point we could create region covering CONFIG_XIP_PHYS_ADDR - _end and
that might work for the first stage of MPU enable, but due to MPU's
alignment requirement we could cover too much, IOW we need more
flexibility in how we're partitioning memory regions... and it'd be
hardly possible to archive because of the 3rd point.

This patch is trying to address 1st and 3rd issues and paves the path
for 2nd and further improvements.

The most visible change introduced with this patch is that we start
using mpu_rgn_info array (as it was supposed?), so change in MPU setup
done by boot CPU is recorded there and feed to secondaries. It
allows us to keep minimal region setup for boot CPU and do the rest in
C. Since we start programming MPU regions in C evaluation of MPU
constrains (number of regions supported and minimal region order) can
be done once, which in turn open possibility to free-up "probe"
region early.

Tested-by: Szemző András <***@esh.hu>
Signed-off-by: Vladimir Murzin <***@arm.com>
---
arch/arm/include/asm/mpu.h | 2 +-
arch/arm/include/asm/smp.h | 2 +-
arch/arm/kernel/asm-offsets.c | 11 ++++++
arch/arm/kernel/head-nommu.S | 80 +++++++++++++++++++++++++++++++++----------
arch/arm/kernel/smp.c | 2 +-
arch/arm/mm/pmsa-v7.c | 70 +++++++++++++++++++++++++++----------
6 files changed, 127 insertions(+), 40 deletions(-)

diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h
index edec5cf..403462e 100644
--- a/arch/arm/include/asm/mpu.h
+++ b/arch/arm/include/asm/mpu.h
@@ -62,7 +62,7 @@ struct mpu_rgn {
};

struct mpu_rgn_info {
- u32 mpuir;
+ unsigned int used;
struct mpu_rgn rgns[MPU_MAX_REGIONS];
};
extern struct mpu_rgn_info mpu_rgn_info;
diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
index 3d6dc8b..709a559 100644
--- a/arch/arm/include/asm/smp.h
+++ b/arch/arm/include/asm/smp.h
@@ -60,7 +60,7 @@ asmlinkage void secondary_start_kernel(void);
*/
struct secondary_data {
union {
- unsigned long mpu_rgn_szr;
+ struct mpu_rgn_info *mpu_rgn_info;
u64 pgdir;
};
unsigned long swapper_pg_dir;
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 6080082..0098d3c4 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -23,6 +23,7 @@
#include <asm/mach/arch.h>
#include <asm/thread_info.h>
#include <asm/memory.h>
+#include <asm/mpu.h>
#include <asm/procinfo.h>
#include <asm/suspend.h>
#include <asm/vdso_datapage.h>
@@ -183,5 +184,15 @@ int main(void)
#ifdef CONFIG_VDSO
DEFINE(VDSO_DATA_SIZE, sizeof(union vdso_data_store));
#endif
+ BLANK();
+#ifdef CONFIG_ARM_MPU
+ DEFINE(MPU_RNG_INFO_RNGS, offsetof(struct mpu_rgn_info, rgns));
+ DEFINE(MPU_RNG_INFO_USED, offsetof(struct mpu_rgn_info, used));
+
+ DEFINE(MPU_RNG_SIZE, sizeof(struct mpu_rgn));
+ DEFINE(MPU_RGN_DRBAR, offsetof(struct mpu_rgn, drbar));
+ DEFINE(MPU_RGN_DRSR, offsetof(struct mpu_rgn, drsr));
+ DEFINE(MPU_RGN_DRACR, offsetof(struct mpu_rgn, dracr));
+#endif
return 0;
}
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 2e21e08..5f90a5f 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -13,6 +13,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
+#include <linux/errno.h>

#include <asm/assembler.h>
#include <asm/ptrace.h>
@@ -110,8 +111,8 @@ ENTRY(secondary_startup)

#ifdef CONFIG_ARM_MPU
/* Use MPU region info supplied by __cpu_up */
- ldr r6, [r7] @ get secondary_data.mpu_szr
- bl __setup_mpu @ Initialize the MPU
+ ldr r6, [r7] @ get secondary_data.mpu_rgn_info
+ bl __secondary_setup_mpu @ Initialize the MPU
#endif

badr lr, 1f @ return (PIC) address
@@ -204,13 +205,13 @@ ENTRY(__setup_mpu)
mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0
and r0, r0, #(MMFR0_PMSA) @ PMSA field
teq r0, #(MMFR0_PMSAv7) @ PMSA v7
- bne __error_p @ Fail: ARM_MPU on NOT v7 PMSA
+ bxne lr

/* Determine whether the D/I-side memory map is unified. We set the
* flags here and continue to use them for the rest of this function */
mrc p15, 0, r0, c0, c0, 4 @ MPUIR
ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
- beq __error_p @ Fail: ARM_MPU and no MPU
+ bxeq lr
tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified

/* Setup second region first to free up r6 */
@@ -238,27 +239,70 @@ ENTRY(__setup_mpu)
setup_region r0, r5, r6, MPU_INSTR_SIDE @ 0x0, BG region, enabled
2: isb

- /* Vectors region */
- set_region_nr r0, #MPU_VECTORS_REGION
+ /* Enable the MPU */
+ mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR
+ bic r0, r0, #CR_BR @ Disable the 'default mem-map'
+ orr r0, r0, #CR_M @ Set SCTRL.M (MPU on)
+ mcr p15, 0, r0, c1, c0, 0 @ Enable MPU
+ isb
+
+ ret lr
+ENDPROC(__setup_mpu)
+
+#ifdef CONFIG_SMP
+/*
+ * r6: pointer at mpu_rgn_info
+ */
+
+ENTRY(__secondary_setup_mpu)
+ /* Probe for v7 PMSA compliance */
+ mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0
+ and r0, r0, #(MMFR0_PMSA) @ PMSA field
+ teq r0, #(MMFR0_PMSAv7) @ PMSA v7
+ bne __error_p
+
+ /* Determine whether the D/I-side memory map is unified. We set the
+ * flags here and continue to use them for the rest of this function */
+ mrc p15, 0, r0, c0, c0, 4 @ MPUIR
+ ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
+ beq __error_p
+
+ ldr r4, [r6, #MPU_RNG_INFO_USED]
+ mov r5, #MPU_RNG_SIZE
+ add r3, r6, #MPU_RNG_INFO_RNGS
+ mla r3, r4, r5, r3
+
+1:
+ tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified
+ sub r3, r3, #MPU_RNG_SIZE
+ sub r4, r4, #1
+
+ set_region_nr r0, r4
isb
- /* Shared, inaccessible to PL0, rw PL1 */
- mov r0, #CONFIG_VECTORS_BASE @ Cover from VECTORS_BASE
- ldr r5,=(MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL)
- /* Writing N to bits 5:1 (RSR_SZ) --> region size 2^N+1 */
- mov r6, #(((2 * PAGE_SHIFT - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN)

- setup_region r0, r5, r6, MPU_DATA_SIDE @ VECTORS_BASE, PL0 NA, enabled
- beq 3f @ Memory-map not unified
- setup_region r0, r5, r6, MPU_INSTR_SIDE @ VECTORS_BASE, PL0 NA, enabled
-3: isb
+ ldr r0, [r3, #MPU_RGN_DRBAR]
+ ldr r6, [r3, #MPU_RGN_DRSR]
+ ldr r5, [r3, #MPU_RGN_DRACR]
+
+ setup_region r0, r5, r6, MPU_DATA_SIDE
+ beq 2f
+ setup_region r0, r5, r6, MPU_INSTR_SIDE
+2: isb
+
+ mrc p15, 0, r0, c0, c0, 4 @ Reevaluate the MPUIR
+ cmp r4, #0
+ bgt 1b

/* Enable the MPU */
mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR
- bic r0, r0, #CR_BR @ Disable the 'default mem-map'
+ bic r0, r0, #CR_BR @ Disable the 'default mem-map'
orr r0, r0, #CR_M @ Set SCTRL.M (MPU on)
mcr p15, 0, r0, c1, c0, 0 @ Enable MPU
isb
+
ret lr
-ENDPROC(__setup_mpu)
-#endif
+ENDPROC(__secondary_setup_mpu)
+
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_ARM_MPU */
#include "head-common.S"
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index c9a0a52..b4fbf00 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -114,7 +114,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
*/
secondary_data.stack = task_stack_page(idle) + THREAD_START_SP;
#ifdef CONFIG_ARM_MPU
- secondary_data.mpu_rgn_szr = mpu_rgn_info.rgns[MPU_RAM_REGION].drsr;
+ secondary_data.mpu_rgn_info = &mpu_rgn_info;
#endif

#ifdef CONFIG_MMU
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
index 5b55f8f..029d204 100644
--- a/arch/arm/mm/pmsa-v7.c
+++ b/arch/arm/mm/pmsa-v7.c
@@ -12,6 +12,9 @@

#include "mm.h"

+static unsigned int __initdata mpu_min_region_order;
+static unsigned int __initdata mpu_max_regions;
+
#define DRBAR __ACCESS_CP15(c6, 0, c1, 0)
#define IRBAR __ACCESS_CP15(c6, 0, c1, 1)
#define DRSR __ACCESS_CP15(c6, 0, c1, 2)
@@ -75,6 +78,11 @@ static inline u32 irbar_read(void)
return read_sysreg(IRBAR);
}

+static int __init mpu_present(void)
+{
+ return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
+}
+
/* MPU initialisation functions */
void __init adjust_lowmem_bounds_mpu(void)
{
@@ -85,6 +93,9 @@ void __init adjust_lowmem_bounds_mpu(void)
phys_addr_t mem_start;
phys_addr_t mem_end;

+ if (!mpu_present())
+ return;
+
for_each_memblock(memory, reg) {
if (first) {
/*
@@ -146,12 +157,7 @@ void __init adjust_lowmem_bounds_mpu(void)

}

-static int mpu_present(void)
-{
- return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
-}
-
-static int mpu_max_regions(void)
+static int __init __mpu_max_regions(void)
{
/*
* We don't support a different number of I/D side regions so if we
@@ -159,6 +165,7 @@ static int mpu_max_regions(void)
* whichever side has a smaller number of supported regions.
*/
u32 dregions, iregions, mpuir;
+
mpuir = read_cpuid(CPUID_MPUIR);

dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
@@ -171,15 +178,16 @@ static int mpu_max_regions(void)
return min(dregions, iregions);
}

-static int mpu_iside_independent(void)
+static int __init mpu_iside_independent(void)
{
/* MPUIR.nU specifies whether there is *not* a unified memory map */
return read_cpuid(CPUID_MPUIR) & MPUIR_nU;
}

-static int mpu_min_region_order(void)
+static int __init __mpu_min_region_order(void)
{
u32 drbar_result, irbar_result;
+
/* We've kept a region free for this probing */
rgnr_write(MPU_PROBE_REGION);
isb();
@@ -198,22 +206,24 @@ static int mpu_min_region_order(void)
}
isb(); /* Ensure that MPU region operations have completed */
/* Return whichever result is larger */
+
return __ffs(max(drbar_result, irbar_result));
}

-static int mpu_setup_region(unsigned int number, phys_addr_t start,
+static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
unsigned int size_order, unsigned int properties)
{
u32 size_data;

/* We kept a region free for probing resolution of MPU regions*/
- if (number > mpu_max_regions() || number == MPU_PROBE_REGION)
+ if (number > mpu_max_regions
+ || number >= MPU_MAX_REGIONS)
return -ENOENT;

if (size_order > 32)
return -ENOMEM;

- if (size_order < mpu_min_region_order())
+ if (size_order < mpu_min_region_order)
return -ENOMEM;

/* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */
@@ -240,6 +250,9 @@ static int mpu_setup_region(unsigned int number, phys_addr_t start,
mpu_rgn_info.rgns[number].dracr = properties;
mpu_rgn_info.rgns[number].drbar = start;
mpu_rgn_info.rgns[number].drsr = size_data;
+
+ mpu_rgn_info.used++;
+
return 0;
}

@@ -248,19 +261,38 @@ static int mpu_setup_region(unsigned int number, phys_addr_t start,
*/
void __init mpu_setup(void)
{
- int region_err;
+ int region = 0, err = 0;
+
if (!mpu_present())
return;

- region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET,
- ilog2(memblock.memory.regions[0].size),
- MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL);
- if (region_err) {
- panic("MPU region initialization failure! %d", region_err);
+ /* Free-up MPU_PROBE_REGION */
+ mpu_min_region_order = __mpu_min_region_order();
+
+ /* How many regions are supported */
+ mpu_max_regions = __mpu_max_regions();
+
+ /* Now setup MPU (order is important) */
+
+ /* Background */
+ err |= mpu_setup_region(region++, 0, 32,
+ MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA);
+
+ /* RAM */
+ err |= mpu_setup_region(region++, PHYS_OFFSET,
+ ilog2(memblock.memory.regions[0].size),
+ MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL);
+
+ /* Vectors */
+ err |= mpu_setup_region(region++, vectors_base,
+ ilog2(2 * PAGE_SIZE),
+ MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL);
+ if (err) {
+ panic("MPU region initialization failure! %d", err);
} else {
pr_info("Using ARMv7 PMSA Compliant MPU. "
- "Region independence: %s, Max regions: %d\n",
+ "Region independence: %s, Used %d of %d regions\n",
mpu_iside_independent() ? "Yes" : "No",
- mpu_max_regions());
+ mpu_rgn_info.used, mpu_max_regions);
}
}
--
2.0.0
Vladimir Murzin
2017-07-21 13:12:29 UTC
Permalink
Having MPU handling code in dedicated module makes it easier to
enhance/maintain it.

Tested-by: Szemző András <***@esh.hu>
Signed-off-by: Vladimir Murzin <***@arm.com>
---
arch/arm/include/asm/mpu.h | 16 ++-
arch/arm/mm/Makefile | 1 +
arch/arm/mm/nommu.c | 254 +------------------------------------------
arch/arm/mm/pmsa-v7.c | 262 +++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 276 insertions(+), 257 deletions(-)
create mode 100644 arch/arm/mm/pmsa-v7.c

diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h
index c3247cc..edec5cf 100644
--- a/arch/arm/include/asm/mpu.h
+++ b/arch/arm/include/asm/mpu.h
@@ -1,8 +1,6 @@
#ifndef __ARM_MPU_H
#define __ARM_MPU_H

-#ifdef CONFIG_ARM_MPU
-
/* MPUIR layout */
#define MPUIR_nU 1
#define MPUIR_DREGION 8
@@ -69,8 +67,18 @@ struct mpu_rgn_info {
};
extern struct mpu_rgn_info mpu_rgn_info;

-#endif /* __ASSEMBLY__ */
+#ifdef CONFIG_ARM_MPU
+
+extern void __init adjust_lowmem_bounds_mpu(void);
+extern void __init mpu_setup(void);

-#endif /* CONFIG_ARM_MPU */
+#else
+
+static inline void adjust_lowmem_bounds_mpu(void) {}
+static inline void mpu_setup(void) {}
+
+#endif /* !CONFIG_ARM_MPU */
+
+#endif /* __ASSEMBLY__ */

#endif
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 950d19b..bdb2ec1 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \

ifneq ($(CONFIG_MMU),y)
obj-y += nommu.o
+obj-$(CONFIG_ARM_MPU) += pmsa-v7.o
endif

obj-$(CONFIG_ARM_PTDUMP) += dump.o
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 3b8e728..4c56b56 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -27,259 +27,7 @@ unsigned long vectors_base;

#ifdef CONFIG_ARM_MPU
struct mpu_rgn_info mpu_rgn_info;
-
-/* Region number */
-static void rgnr_write(u32 v)
-{
- asm("mcr p15, 0, %0, c6, c2, 0" : : "r" (v));
-}
-
-/* Data-side / unified region attributes */
-
-/* Region access control register */
-static void dracr_write(u32 v)
-{
- asm("mcr p15, 0, %0, c6, c1, 4" : : "r" (v));
-}
-
-/* Region size register */
-static void drsr_write(u32 v)
-{
- asm("mcr p15, 0, %0, c6, c1, 2" : : "r" (v));
-}
-
-/* Region base address register */
-static void drbar_write(u32 v)
-{
- asm("mcr p15, 0, %0, c6, c1, 0" : : "r" (v));
-}
-
-static u32 drbar_read(void)
-{
- u32 v;
- asm("mrc p15, 0, %0, c6, c1, 0" : "=r" (v));
- return v;
-}
-/* Optional instruction-side region attributes */
-
-/* I-side Region access control register */
-static void iracr_write(u32 v)
-{
- asm("mcr p15, 0, %0, c6, c1, 5" : : "r" (v));
-}
-
-/* I-side Region size register */
-static void irsr_write(u32 v)
-{
- asm("mcr p15, 0, %0, c6, c1, 3" : : "r" (v));
-}
-
-/* I-side Region base address register */
-static void irbar_write(u32 v)
-{
- asm("mcr p15, 0, %0, c6, c1, 1" : : "r" (v));
-}
-
-static unsigned long irbar_read(void)
-{
- unsigned long v;
- asm("mrc p15, 0, %0, c6, c1, 1" : "=r" (v));
- return v;
-}
-
-/* MPU initialisation functions */
-void __init adjust_lowmem_bounds_mpu(void)
-{
- phys_addr_t phys_offset = PHYS_OFFSET;
- phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
- struct memblock_region *reg;
- bool first = true;
- phys_addr_t mem_start;
- phys_addr_t mem_end;
-
- for_each_memblock(memory, reg) {
- if (first) {
- /*
- * Initially only use memory continuous from
- * PHYS_OFFSET */
- if (reg->base != phys_offset)
- panic("First memory bank must be contiguous from PHYS_OFFSET");
-
- mem_start = reg->base;
- mem_end = reg->base + reg->size;
- specified_mem_size = reg->size;
- first = false;
- } else {
- /*
- * memblock auto merges contiguous blocks, remove
- * all blocks afterwards in one go (we can't remove
- * blocks separately while iterating)
- */
- pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
- &mem_end, &reg->base);
- memblock_remove(reg->base, 0 - reg->base);
- break;
- }
- }
-
- /*
- * MPU has curious alignment requirements: Size must be power of 2, and
- * region start must be aligned to the region size
- */
- if (phys_offset != 0)
- pr_info("PHYS_OFFSET != 0 => MPU Region size constrained by alignment requirements\n");
-
- /*
- * Maximum aligned region might overflow phys_addr_t if phys_offset is
- * 0. Hence we keep everything below 4G until we take the smaller of
- * the aligned_region_size and rounded_mem_size, one of which is
- * guaranteed to be smaller than the maximum physical address.
- */
- aligned_region_size = (phys_offset - 1) ^ (phys_offset);
- /* Find the max power-of-two sized region that fits inside our bank */
- rounded_mem_size = (1 << __fls(specified_mem_size)) - 1;
-
- /* The actual region size is the smaller of the two */
- aligned_region_size = aligned_region_size < rounded_mem_size
- ? aligned_region_size + 1
- : rounded_mem_size + 1;
-
- if (aligned_region_size != specified_mem_size) {
- pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
- &specified_mem_size, &aligned_region_size);
- memblock_remove(mem_start + aligned_region_size,
- specified_mem_size - aligned_region_size);
-
- mem_end = mem_start + aligned_region_size;
- }
-
- pr_debug("MPU Region from %pa size %pa (end %pa))\n",
- &phys_offset, &aligned_region_size, &mem_end);
-
-}
-
-static int mpu_present(void)
-{
- return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
-}
-
-static int mpu_max_regions(void)
-{
- /*
- * We don't support a different number of I/D side regions so if we
- * have separate instruction and data memory maps then return
- * whichever side has a smaller number of supported regions.
- */
- u32 dregions, iregions, mpuir;
- mpuir = read_cpuid(CPUID_MPUIR);
-
- dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
-
- /* Check for separate d-side and i-side memory maps */
- if (mpuir & MPUIR_nU)
- iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION;
-
- /* Use the smallest of the two maxima */
- return min(dregions, iregions);
-}
-
-static int mpu_iside_independent(void)
-{
- /* MPUIR.nU specifies whether there is *not* a unified memory map */
- return read_cpuid(CPUID_MPUIR) & MPUIR_nU;
-}
-
-static int mpu_min_region_order(void)
-{
- u32 drbar_result, irbar_result;
- /* We've kept a region free for this probing */
- rgnr_write(MPU_PROBE_REGION);
- isb();
- /*
- * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum
- * region order
- */
- drbar_write(0xFFFFFFFC);
- drbar_result = irbar_result = drbar_read();
- drbar_write(0x0);
- /* If the MPU is non-unified, we use the larger of the two minima*/
- if (mpu_iside_independent()) {
- irbar_write(0xFFFFFFFC);
- irbar_result = irbar_read();
- irbar_write(0x0);
- }
- isb(); /* Ensure that MPU region operations have completed */
- /* Return whichever result is larger */
- return __ffs(max(drbar_result, irbar_result));
-}
-
-static int mpu_setup_region(unsigned int number, phys_addr_t start,
- unsigned int size_order, unsigned int properties)
-{
- u32 size_data;
-
- /* We kept a region free for probing resolution of MPU regions*/
- if (number > mpu_max_regions() || number == MPU_PROBE_REGION)
- return -ENOENT;
-
- if (size_order > 32)
- return -ENOMEM;
-
- if (size_order < mpu_min_region_order())
- return -ENOMEM;
-
- /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */
- size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN;
-
- dsb(); /* Ensure all previous data accesses occur with old mappings */
- rgnr_write(number);
- isb();
- drbar_write(start);
- dracr_write(properties);
- isb(); /* Propagate properties before enabling region */
- drsr_write(size_data);
-
- /* Check for independent I-side registers */
- if (mpu_iside_independent()) {
- irbar_write(start);
- iracr_write(properties);
- isb();
- irsr_write(size_data);
- }
- isb();
-
- /* Store region info (we treat i/d side the same, so only store d) */
- mpu_rgn_info.rgns[number].dracr = properties;
- mpu_rgn_info.rgns[number].drbar = start;
- mpu_rgn_info.rgns[number].drsr = size_data;
- return 0;
-}
-
-/*
-* Set up default MPU regions, doing nothing if there is no MPU
-*/
-void __init mpu_setup(void)
-{
- int region_err;
- if (!mpu_present())
- return;
-
- region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET,
- ilog2(memblock.memory.regions[0].size),
- MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL);
- if (region_err) {
- panic("MPU region initialization failure! %d", region_err);
- } else {
- pr_info("Using ARMv7 PMSA Compliant MPU. "
- "Region independence: %s, Max regions: %d\n",
- mpu_iside_independent() ? "Yes" : "No",
- mpu_max_regions());
- }
-}
-#else
-static void adjust_lowmem_bounds_mpu(void) {}
-static void __init mpu_setup(void) {}
-#endif /* CONFIG_ARM_MPU */
+#endif

#ifdef CONFIG_CPU_CP15
#ifdef CONFIG_CPU_HIGH_VECTOR
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
new file mode 100644
index 0000000..ee3cf51
--- /dev/null
+++ b/arch/arm/mm/pmsa-v7.c
@@ -0,0 +1,262 @@
+/*
+ * linux/arch/arm/mm/nommu.c
+ *
+ * ARM uCLinux supporting functions.
+ */
+
+#include <linux/memblock.h>
+
+#include <asm/cp15.h>
+#include <asm/cputype.h>
+#include <asm/mpu.h>
+
+#include "mm.h"
+
+/* Region number */
+static void rgnr_write(u32 v)
+{
+ asm("mcr p15, 0, %0, c6, c2, 0" : : "r" (v));
+}
+
+/* Data-side / unified region attributes */
+
+/* Region access control register */
+static void dracr_write(u32 v)
+{
+ asm("mcr p15, 0, %0, c6, c1, 4" : : "r" (v));
+}
+
+/* Region size register */
+static void drsr_write(u32 v)
+{
+ asm("mcr p15, 0, %0, c6, c1, 2" : : "r" (v));
+}
+
+/* Region base address register */
+static void drbar_write(u32 v)
+{
+ asm("mcr p15, 0, %0, c6, c1, 0" : : "r" (v));
+}
+
+static u32 drbar_read(void)
+{
+ u32 v;
+ asm("mrc p15, 0, %0, c6, c1, 0" : "=r" (v));
+ return v;
+}
+/* Optional instruction-side region attributes */
+
+/* I-side Region access control register */
+static void iracr_write(u32 v)
+{
+ asm("mcr p15, 0, %0, c6, c1, 5" : : "r" (v));
+}
+
+/* I-side Region size register */
+static void irsr_write(u32 v)
+{
+ asm("mcr p15, 0, %0, c6, c1, 3" : : "r" (v));
+}
+
+/* I-side Region base address register */
+static void irbar_write(u32 v)
+{
+ asm("mcr p15, 0, %0, c6, c1, 1" : : "r" (v));
+}
+
+static unsigned long irbar_read(void)
+{
+ unsigned long v;
+ asm("mrc p15, 0, %0, c6, c1, 1" : "=r" (v));
+ return v;
+}
+
+/* MPU initialisation functions */
+void __init adjust_lowmem_bounds_mpu(void)
+{
+ phys_addr_t phys_offset = PHYS_OFFSET;
+ phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
+ struct memblock_region *reg;
+ bool first = true;
+ phys_addr_t mem_start;
+ phys_addr_t mem_end;
+
+ for_each_memblock(memory, reg) {
+ if (first) {
+ /*
+ * Initially only use memory continuous from
+ * PHYS_OFFSET */
+ if (reg->base != phys_offset)
+ panic("First memory bank must be contiguous from PHYS_OFFSET");
+
+ mem_start = reg->base;
+ mem_end = reg->base + reg->size;
+ specified_mem_size = reg->size;
+ first = false;
+ } else {
+ /*
+ * memblock auto merges contiguous blocks, remove
+ * all blocks afterwards in one go (we can't remove
+ * blocks separately while iterating)
+ */
+ pr_notice("Ignoring RAM after %pa, memory at %pa ignored\n",
+ &mem_end, &reg->base);
+ memblock_remove(reg->base, 0 - reg->base);
+ break;
+ }
+ }
+
+ /*
+ * MPU has curious alignment requirements: Size must be power of 2, and
+ * region start must be aligned to the region size
+ */
+ if (phys_offset != 0)
+ pr_info("PHYS_OFFSET != 0 => MPU Region size constrained by alignment requirements\n");
+
+ /*
+ * Maximum aligned region might overflow phys_addr_t if phys_offset is
+ * 0. Hence we keep everything below 4G until we take the smaller of
+ * the aligned_region_size and rounded_mem_size, one of which is
+ * guaranteed to be smaller than the maximum physical address.
+ */
+ aligned_region_size = (phys_offset - 1) ^ (phys_offset);
+ /* Find the max power-of-two sized region that fits inside our bank */
+ rounded_mem_size = (1 << __fls(specified_mem_size)) - 1;
+
+ /* The actual region size is the smaller of the two */
+ aligned_region_size = aligned_region_size < rounded_mem_size
+ ? aligned_region_size + 1
+ : rounded_mem_size + 1;
+
+ if (aligned_region_size != specified_mem_size) {
+ pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
+ &specified_mem_size, &aligned_region_size);
+ memblock_remove(mem_start + aligned_region_size,
+ specified_mem_size - aligned_region_size);
+
+ mem_end = mem_start + aligned_region_size;
+ }
+
+ pr_debug("MPU Region from %pa size %pa (end %pa))\n",
+ &phys_offset, &aligned_region_size, &mem_end);
+
+}
+
+static int mpu_present(void)
+{
+ return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
+}
+
+static int mpu_max_regions(void)
+{
+ /*
+ * We don't support a different number of I/D side regions so if we
+ * have separate instruction and data memory maps then return
+ * whichever side has a smaller number of supported regions.
+ */
+ u32 dregions, iregions, mpuir;
+ mpuir = read_cpuid(CPUID_MPUIR);
+
+ dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;
+
+ /* Check for separate d-side and i-side memory maps */
+ if (mpuir & MPUIR_nU)
+ iregions = (mpuir & MPUIR_IREGION_SZMASK) >> MPUIR_IREGION;
+
+ /* Use the smallest of the two maxima */
+ return min(dregions, iregions);
+}
+
+static int mpu_iside_independent(void)
+{
+ /* MPUIR.nU specifies whether there is *not* a unified memory map */
+ return read_cpuid(CPUID_MPUIR) & MPUIR_nU;
+}
+
+static int mpu_min_region_order(void)
+{
+ u32 drbar_result, irbar_result;
+ /* We've kept a region free for this probing */
+ rgnr_write(MPU_PROBE_REGION);
+ isb();
+ /*
+ * As per ARM ARM, write 0xFFFFFFFC to DRBAR to find the minimum
+ * region order
+ */
+ drbar_write(0xFFFFFFFC);
+ drbar_result = irbar_result = drbar_read();
+ drbar_write(0x0);
+ /* If the MPU is non-unified, we use the larger of the two minima*/
+ if (mpu_iside_independent()) {
+ irbar_write(0xFFFFFFFC);
+ irbar_result = irbar_read();
+ irbar_write(0x0);
+ }
+ isb(); /* Ensure that MPU region operations have completed */
+ /* Return whichever result is larger */
+ return __ffs(max(drbar_result, irbar_result));
+}
+
+static int mpu_setup_region(unsigned int number, phys_addr_t start,
+ unsigned int size_order, unsigned int properties)
+{
+ u32 size_data;
+
+ /* We kept a region free for probing resolution of MPU regions*/
+ if (number > mpu_max_regions() || number == MPU_PROBE_REGION)
+ return -ENOENT;
+
+ if (size_order > 32)
+ return -ENOMEM;
+
+ if (size_order < mpu_min_region_order())
+ return -ENOMEM;
+
+ /* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */
+ size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN;
+
+ dsb(); /* Ensure all previous data accesses occur with old mappings */
+ rgnr_write(number);
+ isb();
+ drbar_write(start);
+ dracr_write(properties);
+ isb(); /* Propagate properties before enabling region */
+ drsr_write(size_data);
+
+ /* Check for independent I-side registers */
+ if (mpu_iside_independent()) {
+ irbar_write(start);
+ iracr_write(properties);
+ isb();
+ irsr_write(size_data);
+ }
+ isb();
+
+ /* Store region info (we treat i/d side the same, so only store d) */
+ mpu_rgn_info.rgns[number].dracr = properties;
+ mpu_rgn_info.rgns[number].drbar = start;
+ mpu_rgn_info.rgns[number].drsr = size_data;
+ return 0;
+}
+
+/*
+* Set up default MPU regions, doing nothing if there is no MPU
+*/
+void __init mpu_setup(void)
+{
+ int region_err;
+ if (!mpu_present())
+ return;
+
+ region_err = mpu_setup_region(MPU_RAM_REGION, PHYS_OFFSET,
+ ilog2(memblock.memory.regions[0].size),
+ MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL);
+ if (region_err) {
+ panic("MPU region initialization failure! %d", region_err);
+ } else {
+ pr_info("Using ARMv7 PMSA Compliant MPU. "
+ "Region independence: %s, Max regions: %d\n",
+ mpu_iside_independent() ? "Yes" : "No",
+ mpu_max_regions());
+ }
+}
--
2.0.0
Vladimir Murzin
2017-07-21 13:12:34 UTC
Permalink
This patch makes it possible to use MPU with v7M cores.

Tested-by: Szemző András <***@esh.hu>
Signed-off-by: Vladimir Murzin <***@arm.com>
---
arch/arm/Kconfig-nommu | 4 +--
arch/arm/include/asm/cputype.h | 10 ++++++++
arch/arm/include/asm/v7m.h | 10 ++++++++
arch/arm/kernel/head-nommu.S | 56 ++++++++++++++++++++++++++++++------------
arch/arm/mm/pmsa-v7.c | 53 +++++++++++++++++++++++++++++++++++++--
5 files changed, 113 insertions(+), 20 deletions(-)

diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu
index 6d18395..930e000 100644
--- a/arch/arm/Kconfig-nommu
+++ b/arch/arm/Kconfig-nommu
@@ -52,8 +52,8 @@ config REMAP_VECTORS_TO_RAM

config ARM_MPU
bool 'Use the ARM v7 PMSA Compliant MPU'
- depends on !XIP_KERNEL && CPU_V7
- default y
+ depends on !XIP_KERNEL && (CPU_V7 || CPU_V7M)
+ default y if CPU_V7
help
Some ARM systems without an MMU have instead a Memory Protection
Unit (MPU) that defines the type and permissions for regions of
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index b62eaeb..abaac5e 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -173,6 +173,11 @@ static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
return read_cpuid(CPUID_CACHETYPE);
}

+static inline unsigned int __attribute_const__ read_cpuid_mputype(void)
+{
+ return read_cpuid(CPUID_MPUIR);
+}
+
#elif defined(CONFIG_CPU_V7M)

static inline unsigned int __attribute_const__ read_cpuid_id(void)
@@ -185,6 +190,11 @@ static inline unsigned int __attribute_const__ read_cpuid_cachetype(void)
return readl(BASEADDR_V7M_SCB + V7M_SCB_CTR);
}

+static inline unsigned int __attribute_const__ read_cpuid_mputype(void)
+{
+ return readl(BASEADDR_V7M_SCB + MPU_TYPE);
+}
+
#else /* ifdef CONFIG_CPU_CP15 / elif defined(CONFIG_CPU_V7M) */

static inline unsigned int __attribute_const__ read_cpuid_id(void)
diff --git a/arch/arm/include/asm/v7m.h b/arch/arm/include/asm/v7m.h
index 1fd775c..5de776c 100644
--- a/arch/arm/include/asm/v7m.h
+++ b/arch/arm/include/asm/v7m.h
@@ -57,6 +57,16 @@
#define V7M_SCB_CCSIDR 0x80 /* Cache size ID register */
#define V7M_SCB_CSSELR 0x84 /* Cache size selection register */

+/* Memory-mapped MPU registers for M-class */
+#define MPU_TYPE 0x90
+#define MPU_CTRL 0x94
+#define MPU_CTRL_ENABLE 1
+#define MPU_CTRL_PRIVDEFENA (1 << 2)
+
+#define MPU_RNR 0x98
+#define MPU_RBAR 0x9c
+#define MPU_RASR 0xa0
+
/* Cache opeartions */
#define V7M_SCB_ICIALLU 0x250 /* I-cache invalidate all to PoU */
#define V7M_SCB_ICIMVAU 0x258 /* I-cache invalidate by MVA to PoU */
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 5f90a5f..0d64b8b 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -176,19 +176,33 @@ ENDPROC(__after_proc_init)
#ifdef CONFIG_ARM_MPU


+#ifndef CONFIG_CPU_V7M
/* Set which MPU region should be programmed */
-.macro set_region_nr tmp, rgnr
+.macro set_region_nr tmp, rgnr, unused
mov \tmp, \rgnr @ Use static region numbers
mcr p15, 0, \tmp, c6, c2, 0 @ Write RGNR
.endm

/* Setup a single MPU region, either D or I side (D-side for unified) */
-.macro setup_region bar, acr, sr, side = MPU_DATA_SIDE
+.macro setup_region bar, acr, sr, side = MPU_DATA_SIDE, unused
mcr p15, 0, \bar, c6, c1, (0 + \side) @ I/DRBAR
mcr p15, 0, \acr, c6, c1, (4 + \side) @ I/DRACR
mcr p15, 0, \sr, c6, c1, (2 + \side) @ I/DRSR
.endm
+#else
+.macro set_region_nr tmp, rgnr, base
+ mov \tmp, \rgnr
+ str \tmp, [\base, #MPU_RNR]
+.endm
+
+.macro setup_region bar, acr, sr, unused, base
+ lsl \acr, \acr, #16
+ orr \acr, \acr, \sr
+ str \bar, [\base, #MPU_RBAR]
+ str \acr, [\base, #MPU_RASR]
+.endm

+#endif
/*
* Setup the MPU and initial MPU Regions. We create the following regions:
* Region 0: Use this for probing the MPU details, so leave disabled.
@@ -202,48 +216,58 @@ ENDPROC(__after_proc_init)
ENTRY(__setup_mpu)

/* Probe for v7 PMSA compliance */
- mrc p15, 0, r0, c0, c1, 4 @ Read ID_MMFR0
+M_CLASS(movw r12, #:lower16:BASEADDR_V7M_SCB)
+M_CLASS(movt r12, #:upper16:BASEADDR_V7M_SCB)
+
+AR_CLASS(mrc p15, 0, r0, c0, c1, 4) @ Read ID_MMFR0
+M_CLASS(ldr r0, [r12, 0x50])
and r0, r0, #(MMFR0_PMSA) @ PMSA field
teq r0, #(MMFR0_PMSAv7) @ PMSA v7
bxne lr

/* Determine whether the D/I-side memory map is unified. We set the
* flags here and continue to use them for the rest of this function */
- mrc p15, 0, r0, c0, c0, 4 @ MPUIR
+AR_CLASS(mrc p15, 0, r0, c0, c0, 4) @ MPUIR
+M_CLASS(ldr r0, [r12, #MPU_TYPE])
ands r5, r0, #MPUIR_DREGION_SZMASK @ 0 size d region => No MPU
bxeq lr
tst r0, #MPUIR_nU @ MPUIR_nU = 0 for unified

/* Setup second region first to free up r6 */
- set_region_nr r0, #MPU_RAM_REGION
+ set_region_nr r0, #MPU_RAM_REGION, r12
isb
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)

- setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
- beq 1f @ Memory-map not unified
- setup_region r0, r5, r6, MPU_INSTR_SIDE @ PHYS_OFFSET, shared, enabled
+ setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled
+ beq 1f @ Memory-map not unified
+ setup_region r0, r5, r6, MPU_INSTR_SIDE, r12 @ PHYS_OFFSET, shared, enabled
1: isb

/* First/background region */
- set_region_nr r0, #MPU_BG_REGION
+ set_region_nr r0, #MPU_BG_REGION, r12
isb
/* Execute Never, strongly ordered, inaccessible to PL0, rw PL1 */
mov r0, #0 @ BG region starts at 0x0
ldr r5,=(MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA)
mov r6, #MPU_RSR_ALL_MEM @ 4GB region, enabled

- setup_region r0, r5, r6, MPU_DATA_SIDE @ 0x0, BG region, enabled
- beq 2f @ Memory-map not unified
- setup_region r0, r5, r6, MPU_INSTR_SIDE @ 0x0, BG region, enabled
+ setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ 0x0, BG region, enabled
+ beq 2f @ Memory-map not unified
+ setup_region r0, r5, r6, MPU_INSTR_SIDE r12 @ 0x0, BG region, enabled
2: isb

/* Enable the MPU */
- mrc p15, 0, r0, c1, c0, 0 @ Read SCTLR
- bic r0, r0, #CR_BR @ Disable the 'default mem-map'
- orr r0, r0, #CR_M @ Set SCTRL.M (MPU on)
- mcr p15, 0, r0, c1, c0, 0 @ Enable MPU
+AR_CLASS(mrc p15, 0, r0, c1, c0, 0) @ Read SCTLR
+AR_CLASS(bic r0, r0, #CR_BR) @ Disable the 'default mem-map'
+AR_CLASS(orr r0, r0, #CR_M) @ Set SCTRL.M (MPU on)
+AR_CLASS(mcr p15, 0, r0, c1, c0, 0) @ Enable MPU
+
+M_CLASS(ldr r0, [r12, #MPU_CTRL])
+M_CLASS(bic r0, #MPU_CTRL_PRIVDEFENA)
+M_CLASS(orr r0, #MPU_CTRL_ENABLE)
+M_CLASS(str r0, [r12, #MPU_CTRL])
isb

ret lr
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
index 029d204..72f1a9f 100644
--- a/arch/arm/mm/pmsa-v7.c
+++ b/arch/arm/mm/pmsa-v7.c
@@ -15,6 +15,8 @@
static unsigned int __initdata mpu_min_region_order;
static unsigned int __initdata mpu_max_regions;

+#ifndef CONFIG_CPU_V7M
+
#define DRBAR __ACCESS_CP15(c6, 0, c1, 0)
#define IRBAR __ACCESS_CP15(c6, 0, c1, 1)
#define DRSR __ACCESS_CP15(c6, 0, c1, 2)
@@ -78,6 +80,51 @@ static inline u32 irbar_read(void)
return read_sysreg(IRBAR);
}

+#else
+
+static inline void rgnr_write(u32 v)
+{
+ writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RNR);
+}
+
+/* Data-side / unified region attributes */
+
+/* Region access control register */
+static inline void dracr_write(u32 v)
+{
+ u32 rsr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(15, 0);
+
+ writel_relaxed((v << 16) | rsr, BASEADDR_V7M_SCB + MPU_RASR);
+}
+
+/* Region size register */
+static inline void drsr_write(u32 v)
+{
+ u32 racr = readl_relaxed(BASEADDR_V7M_SCB + MPU_RASR) & GENMASK(31, 16);
+
+ writel_relaxed(v | racr, BASEADDR_V7M_SCB + MPU_RASR);
+}
+
+/* Region base address register */
+static inline void drbar_write(u32 v)
+{
+ writel_relaxed(v, BASEADDR_V7M_SCB + MPU_RBAR);
+}
+
+static inline u32 drbar_read(void)
+{
+ return readl_relaxed(BASEADDR_V7M_SCB + MPU_RBAR);
+}
+
+/* ARMv7-M only supports a unified MPU, so I-side operations are nop */
+
+static inline void iracr_write(u32 v) {}
+static inline void irsr_write(u32 v) {}
+static inline void irbar_write(u32 v) {}
+static inline unsigned long irbar_read(void) {return 0;}
+
+#endif
+
static int __init mpu_present(void)
{
return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
@@ -166,7 +213,7 @@ static int __init __mpu_max_regions(void)
*/
u32 dregions, iregions, mpuir;

- mpuir = read_cpuid(CPUID_MPUIR);
+ mpuir = read_cpuid_mputype();

dregions = iregions = (mpuir & MPUIR_DREGION_SZMASK) >> MPUIR_DREGION;

@@ -181,7 +228,7 @@ static int __init __mpu_max_regions(void)
static int __init mpu_iside_independent(void)
{
/* MPUIR.nU specifies whether there is *not* a unified memory map */
- return read_cpuid(CPUID_MPUIR) & MPUIR_nU;
+ return read_cpuid_mputype() & MPUIR_nU;
}

static int __init __mpu_min_region_order(void)
@@ -284,9 +331,11 @@ void __init mpu_setup(void)
MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL);

/* Vectors */
+#ifndef CONFIG_CPU_V7M
err |= mpu_setup_region(region++, vectors_base,
ilog2(2 * PAGE_SIZE),
MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL);
+#endif
if (err) {
panic("MPU region initialization failure! %d", err);
} else {
--
2.0.0
Vladimir Murzin
2017-07-21 13:12:33 UTC
Permalink
The last user of CONFIG_VECTORS_BASE has gone, so kill it.

Reported-by: Afzal Mohammed <***@gmail.com>
Signed-off-by: Vladimir Murzin <***@arm.com>
---
arch/arm/Kconfig | 9 ---------
1 file changed, 9 deletions(-)

diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 139c638..f3b0c0f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -239,15 +239,6 @@ config NEED_RET_TO_USER
config ARCH_MTD_XIP
bool

-config VECTORS_BASE
- hex
- default 0xffff0000 if MMU || CPU_HIGH_VECTOR
- default DRAM_BASE if REMAP_VECTORS_TO_RAM
- default 0x00000000
- help
- The base address of exception vectors. This must be two pages
- in size.
-
config ARM_PATCH_PHYS_VIRT
bool "Patch physical to virtual translations at runtime" if EMBEDDED
default y
--
2.0.0
Vladimir Murzin
2017-07-21 13:12:35 UTC
Permalink
PMSAv7 defines curious alignment requirements to the regions:
- size must be power of 2, and
- region start must be aligned to the region size

Because of that we currently adjust lowmem bounds plus we assign
only one MPU region to cover memory all these lead to significant amount of
memory could be wasted. As an example, consider 64Mb of memory at
0x70000000 - it fits alignment requirements nicely; now, imagine that
2Mb of memory is reserved for coherent DMA allocation, so now Linux is
expected to see 62Mb of memory... and here annoying thing happens -
memory gets truncated to 32Mb (we've lost 30Mb!), i.e. MPU layout
looks like:

0: base 0x70000000, size 0x2000000

This patch tries to allocate as much as possible MPU slots to minimise
amount of truncated memory. Moreover, with this patch MPU subregions
starting to get used. MPU subregions allow us reduce the number of MPU
slots used. For example given above, MPU layout looks like:

0: base 0x70000000, size 0x2000000
1: base 0x72000000, size 0x1000000
2: base 0x73000000, size 0x1000000, disable subreg 7 (0x73e00000 - 0x73ffffff)

Where without subregions we'd get:

0: base 0x70000000, size 0x2000000
1: base 0x72000000, size 0x1000000
2: base 0x73000000, size 0x800000
3: base 0x73800000, size 0x400000
4: base 0x73c00000, size 0x200000

To achieve better layout we fist try to cover specified memory as is
(maybe with help of subregions) and if we failed, we truncate memory
to fit alignment requirements (so it occupies one MPU slot) and
perform one more attempt with the reminder, and so on till we either
cover all memory or run out of MPU slots.

Tested-by: Szemző András <***@esh.hu>
Signed-off-by: Vladimir Murzin <***@arm.com>
---
arch/arm/include/asm/mpu.h | 5 ++
arch/arm/mm/pmsa-v7.c | 190 ++++++++++++++++++++++++++++++++++-----------
2 files changed, 149 insertions(+), 46 deletions(-)

diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h
index 403462e..5db37a6 100644
--- a/arch/arm/include/asm/mpu.h
+++ b/arch/arm/include/asm/mpu.h
@@ -15,6 +15,11 @@
/* MPU D/I Size Register fields */
#define MPU_RSR_SZ 1
#define MPU_RSR_EN 0
+#define MPU_RSR_SD 8
+
+/* Number of subregions (SD) */
+#define MPU_NR_SUBREGS 8
+#define MPU_MIN_SUBREG_SIZE 256

/* The D/I RSR value for an enabled region spanning the whole of memory */
#define MPU_RSR_ALL_MEM 63
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
index 72f1a9f..c1f1fc7 100644
--- a/arch/arm/mm/pmsa-v7.c
+++ b/arch/arm/mm/pmsa-v7.c
@@ -4,6 +4,7 @@
* ARM uCLinux supporting functions.
*/

+#include <linux/bitops.h>
#include <linux/memblock.h>

#include <asm/cp15.h>
@@ -12,9 +13,20 @@

#include "mm.h"

+struct region {
+ phys_addr_t base;
+ phys_addr_t size;
+ unsigned long subreg;
+};
+
+static struct region __initdata mem[MPU_MAX_REGIONS];
+
static unsigned int __initdata mpu_min_region_order;
static unsigned int __initdata mpu_max_regions;

+static int __init __mpu_min_region_order(void);
+static int __init __mpu_max_regions(void);
+
#ifndef CONFIG_CPU_V7M

#define DRBAR __ACCESS_CP15(c6, 0, c1, 0)
@@ -130,19 +142,120 @@ static int __init mpu_present(void)
return ((read_cpuid_ext(CPUID_EXT_MMFR0) & MMFR0_PMSA) == MMFR0_PMSAv7);
}

+static bool __init try_split_region(phys_addr_t base, phys_addr_t size, struct region *region)
+{
+ unsigned long subreg, bslots, sslots;
+ phys_addr_t abase = base & ~(size - 1);
+ phys_addr_t asize = base + size - abase;
+ phys_addr_t p2size = 1 << __fls(asize);
+ phys_addr_t bdiff, sdiff;
+
+ if (p2size != asize)
+ p2size *= 2;
+
+ bdiff = base - abase;
+ sdiff = p2size - asize;
+ subreg = p2size / MPU_NR_SUBREGS;
+
+ if ((bdiff % subreg) || (sdiff % subreg))
+ return false;
+
+ bslots = bdiff / subreg;
+ sslots = sdiff / subreg;
+
+ if (bslots || sslots) {
+ int i;
+
+ if (subreg < MPU_MIN_SUBREG_SIZE)
+ return false;
+
+ if (bslots + sslots > MPU_NR_SUBREGS)
+ return false;
+
+ for (i = 0; i < bslots; i++)
+ _set_bit(i, &region->subreg);
+
+ for (i = 1; i <= sslots; i++)
+ _set_bit(MPU_NR_SUBREGS - i, &region->subreg);
+ }
+
+ region->base = abase;
+ region->size = p2size;
+
+ return true;
+}
+
+static int __init allocate_region(phys_addr_t base, phys_addr_t size,
+ unsigned int limit, struct region *regions)
+{
+ int count = 0;
+ phys_addr_t diff = size;
+ int attempts = MPU_MAX_REGIONS;
+
+ while (diff) {
+ /* Try cover region as is (maybe with help of subregions) */
+ if (try_split_region(base, size, &regions[count])) {
+ count++;
+ base += size;
+ diff -= size;
+ size = diff;
+ } else {
+ /*
+ * Maximum aligned region might overflow phys_addr_t
+ * if "base" is 0. Hence we keep everything below 4G
+ * until we take the smaller of the aligned region
+ * size ("asize") and rounded region size ("p2size"),
+ * one of which is guaranteed to be smaller than the
+ * maximum physical address.
+ */
+ phys_addr_t asize = (base - 1) ^ base;
+ phys_addr_t p2size = (1 << __fls(diff)) - 1;
+
+ size = asize < p2size ? asize + 1 : p2size + 1;
+ }
+
+ if (count > limit)
+ break;
+
+ if (!attempts)
+ break;
+
+ attempts--;
+ }
+
+ return count;
+}
+
/* MPU initialisation functions */
void __init adjust_lowmem_bounds_mpu(void)
{
phys_addr_t phys_offset = PHYS_OFFSET;
- phys_addr_t aligned_region_size, specified_mem_size, rounded_mem_size;
+ phys_addr_t specified_mem_size, total_mem_size = 0;
struct memblock_region *reg;
bool first = true;
phys_addr_t mem_start;
phys_addr_t mem_end;
+ unsigned int mem_max_regions;
+ int num, i;

if (!mpu_present())
return;

+ /* Free-up MPU_PROBE_REGION */
+ mpu_min_region_order = __mpu_min_region_order();
+
+ /* How many regions are supported */
+ mpu_max_regions = __mpu_max_regions();
+
+ mem_max_regions = min((unsigned int)MPU_MAX_REGIONS, mpu_max_regions);
+
+ /* We need to keep one slot for background region */
+ mem_max_regions--;
+
+#ifndef CONFIG_CPU_V7M
+ /* ... and one for vectors */
+ mem_max_regions--;
+#endif
for_each_memblock(memory, reg) {
if (first) {
/*
@@ -168,40 +281,23 @@ void __init adjust_lowmem_bounds_mpu(void)
}
}

- /*
- * MPU has curious alignment requirements: Size must be power of 2, and
- * region start must be aligned to the region size
- */
- if (phys_offset != 0)
- pr_info("PHYS_OFFSET != 0 => MPU Region size constrained by alignment requirements\n");
-
- /*
- * Maximum aligned region might overflow phys_addr_t if phys_offset is
- * 0. Hence we keep everything below 4G until we take the smaller of
- * the aligned_region_size and rounded_mem_size, one of which is
- * guaranteed to be smaller than the maximum physical address.
- */
- aligned_region_size = (phys_offset - 1) ^ (phys_offset);
- /* Find the max power-of-two sized region that fits inside our bank */
- rounded_mem_size = (1 << __fls(specified_mem_size)) - 1;
+ num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);

- /* The actual region size is the smaller of the two */
- aligned_region_size = aligned_region_size < rounded_mem_size
- ? aligned_region_size + 1
- : rounded_mem_size + 1;
+ for (i = 0; i < num; i++) {
+ unsigned long subreg = mem[i].size / MPU_NR_SUBREGS;

- if (aligned_region_size != specified_mem_size) {
- pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
- &specified_mem_size, &aligned_region_size);
- memblock_remove(mem_start + aligned_region_size,
- specified_mem_size - aligned_region_size);
+ total_mem_size += mem[i].size - subreg * hweight_long(mem[i].subreg);

- mem_end = mem_start + aligned_region_size;
+ pr_debug("MPU: base %pa size %pa disable subregions: %*pbl\n",
+ &mem[i].base, &mem[i].size, MPU_NR_SUBREGS, &mem[i].subreg);
}

- pr_debug("MPU Region from %pa size %pa (end %pa))\n",
- &phys_offset, &aligned_region_size, &mem_end);
-
+ if (total_mem_size != specified_mem_size) {
+ pr_warn("Truncating memory from %pa to %pa (MPU region constraints)",
+ &specified_mem_size, &total_mem_size);
+ memblock_remove(mem_start + total_mem_size,
+ specified_mem_size - total_mem_size);
+ }
}

static int __init __mpu_max_regions(void)
@@ -258,7 +354,8 @@ static int __init __mpu_min_region_order(void)
}

static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
- unsigned int size_order, unsigned int properties)
+ unsigned int size_order, unsigned int properties,
+ unsigned int subregions)
{
u32 size_data;

@@ -275,6 +372,7 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start,

/* Writing N to bits 5:1 (RSR_SZ) specifies region size 2^N+1 */
size_data = ((size_order - 1) << MPU_RSR_SZ) | 1 << MPU_RSR_EN;
+ size_data |= subregions << MPU_RSR_SD;

dsb(); /* Ensure all previous data accesses occur with old mappings */
rgnr_write(number);
@@ -308,33 +406,33 @@ static int __init mpu_setup_region(unsigned int number, phys_addr_t start,
*/
void __init mpu_setup(void)
{
- int region = 0, err = 0;
+ int i, region = 0, err = 0;

if (!mpu_present())
return;

- /* Free-up MPU_PROBE_REGION */
- mpu_min_region_order = __mpu_min_region_order();
-
- /* How many regions are supported */
- mpu_max_regions = __mpu_max_regions();
-
- /* Now setup MPU (order is important) */
+ /* Setup MPU (order is important) */

/* Background */
err |= mpu_setup_region(region++, 0, 32,
- MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA);
+ MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA,
+ 0);

/* RAM */
- err |= mpu_setup_region(region++, PHYS_OFFSET,
- ilog2(memblock.memory.regions[0].size),
- MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL);
+ for (i = 0; i < ARRAY_SIZE(mem); i++) {
+ if (!mem[i].size)
+ continue;
+
+ err |= mpu_setup_region(region++, mem[i].base, ilog2(mem[i].size),
+ MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL,
+ mem[i].subreg);
+ }

/* Vectors */
#ifndef CONFIG_CPU_V7M
- err |= mpu_setup_region(region++, vectors_base,
- ilog2(2 * PAGE_SIZE),
- MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL);
+ err |= mpu_setup_region(region++, vectors_base, ilog2(2 * PAGE_SIZE),
+ MPU_AP_PL1RW_PL0NA | MPU_RGN_NORMAL,
+ 0);
#endif
if (err) {
panic("MPU region initialization failure! %d", err);
--
2.0.0
Vladimir Murzin
2017-07-21 13:12:32 UTC
Permalink
It seems that MPU never worked with XIP, so we just disallow such
combination.

Tested-by: Szemző András <***@esh.hu>
Signed-off-by: Vladimir Murzin <***@arm.com>
---
arch/arm/Kconfig-nommu | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu
index b757634..6d18395 100644
--- a/arch/arm/Kconfig-nommu
+++ b/arch/arm/Kconfig-nommu
@@ -52,7 +52,7 @@ config REMAP_VECTORS_TO_RAM

config ARM_MPU
bool 'Use the ARM v7 PMSA Compliant MPU'
- depends on CPU_V7
+ depends on !XIP_KERNEL && CPU_V7
default y
help
Some ARM systems without an MMU have instead a Memory Protection
--
2.0.0
Vladimir Murzin
2017-07-21 13:12:36 UTC
Permalink
Currently, there is assumption in early MPU setup code that kernel
image is located in RAM, which is obviously not true for XIP. To run
code from ROM we need to make sure that it is covered by MPU. However,
due to we allocate regions (semi-)dynamically we can run into issue of
trimming region we are running from in case ROM spawns several MPU
regions. To help deal with that the new config option is introduced
which basically allows to construct ROM layout in MPU friendly
fashion.

Signed-off-by: Vladimir Murzin <***@arm.com>
---
arch/arm/Kconfig-nommu | 15 ++++++++++++++-
arch/arm/include/asm/mpu.h | 1 +
arch/arm/kernel/head-nommu.S | 10 ++++++++++
arch/arm/kernel/vmlinux-xip.lds.S | 3 +++
arch/arm/mm/pmsa-v7.c | 28 +++++++++++++++++++++++++++-
5 files changed, 55 insertions(+), 2 deletions(-)

diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu
index 930e000..79c2af7 100644
--- a/arch/arm/Kconfig-nommu
+++ b/arch/arm/Kconfig-nommu
@@ -52,7 +52,7 @@ config REMAP_VECTORS_TO_RAM

config ARM_MPU
bool 'Use the ARM v7 PMSA Compliant MPU'
- depends on !XIP_KERNEL && (CPU_V7 || CPU_V7M)
+ depends on CPU_V7 || CPU_V7M
default y if CPU_V7
help
Some ARM systems without an MMU have instead a Memory Protection
@@ -61,3 +61,16 @@ config ARM_MPU

If your CPU has an MPU then you should choose 'y' here unless you
know that you do not want to use the MPU.
+
+config ARM_MPU_MINALIGN
+ hex "Minimum alignment of MPU region"
+ depends on ARM_MPU && XIP_KERNEL
+ default "0x100000"
+ help
+ PMSAv7 defines following restrictions to the regions:
+ * size must be power of 2, and
+ * region start must be aligned to the region size
+ With a XIP kernel we need to make sure that (almost) all kernel image
+ is covered by MPU, so we will not trim region we are running from.
+ To help with that this option is given. Along with XIP_PHYS_ADDR it
+ allows to adopt kernel layout to meet aforementioned requirements.
diff --git a/arch/arm/include/asm/mpu.h b/arch/arm/include/asm/mpu.h
index 5db37a6..65699f3 100644
--- a/arch/arm/include/asm/mpu.h
+++ b/arch/arm/include/asm/mpu.h
@@ -41,6 +41,7 @@
#endif

/* Access permission bits of ACR (only define those that we use)*/
+#define MPU_AP_PL1RO_PL0NA (0x5 << 8)
#define MPU_AP_PL1RW_PL0RW (0x3 << 8)
#define MPU_AP_PL1RW_PL0R0 (0x2 << 8)
#define MPU_AP_PL1RW_PL0NA (0x1 << 8)
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 0d64b8b..5674c2e 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -69,8 +69,13 @@ ENTRY(stext)

#ifdef CONFIG_ARM_MPU
/* Calculate the size of a region covering just the kernel */
+#ifndef CONFIG_XIP_KERNEL
ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
ldr r6, =(_end) @ Cover whole kernel
+#else
+ ldr r5, =CONFIG_XIP_PHYS_ADDR @ ROM start
+ ldr r6, =(_exiprom) @ ROM end
+#endif
sub r6, r6, r5 @ Minimum size of region to map
clz r6, r6 @ Region size must be 2^N...
rsb r6, r6, #31 @ ...so round up region size
@@ -237,8 +242,13 @@ M_CLASS(ldr r0, [r12, #MPU_TYPE])
set_region_nr r0, #MPU_RAM_REGION, r12
isb
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
+#ifndef CONFIG_XIP_KERNEL
ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
+#else
+ ldr r0, =CONFIG_XIP_PHYS_ADDR @ ROM start
+ ldr r5,=(MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL)
+#endif

setup_region r0, r5, r6, MPU_DATA_SIDE, r12 @ PHYS_OFFSET, shared, enabled
beq 1f @ Memory-map not unified
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index 8265b11..b7344d5 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -205,6 +205,9 @@ SECTIONS
PERCPU_SECTION(L1_CACHE_BYTES)
#endif

+#ifdef CONFIG_ARM_MPU
+ . = ALIGN(CONFIG_ARM_MPU_MINALIGN);
+#endif
_exiprom = .; /* End of XIP ROM area */
__data_loc = ALIGN(4); /* location in binary */
. = PAGE_OFFSET + TEXT_OFFSET;
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
index c1f1fc7..808384b 100644
--- a/arch/arm/mm/pmsa-v7.c
+++ b/arch/arm/mm/pmsa-v7.c
@@ -10,6 +10,7 @@
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/mpu.h>
+#include <asm/sections.h>

#include "mm.h"

@@ -20,6 +21,9 @@ struct region {
};

static struct region __initdata mem[MPU_MAX_REGIONS];
+#ifdef CONFIG_XIP_KERNEL
+static struct region __initdata xip[MPU_MAX_REGIONS];
+#endif

static unsigned int __initdata mpu_min_region_order;
static unsigned int __initdata mpu_max_regions;
@@ -229,7 +233,6 @@ static int __init allocate_region(phys_addr_t base, phys_addr_t size,
/* MPU initialisation functions */
void __init adjust_lowmem_bounds_mpu(void)
{
- phys_addr_t phys_offset = PHYS_OFFSET;
phys_addr_t specified_mem_size, total_mem_size = 0;
struct memblock_region *reg;
bool first = true;
@@ -256,8 +259,19 @@ void __init adjust_lowmem_bounds_mpu(void)
/* ... and one for vectors */
mem_max_regions--;
#endif
+
+#ifdef CONFIG_XIP_KERNEL
+ /* plus some regions to cover XIP ROM */
+ num = allocate_region(CONFIG_XIP_PHYS_ADDR, __pa(_exiprom) - CONFIG_XIP_PHYS_ADDR,
+ mem_max_regions, xip);
+
+ mem_max_regions -= num;
+#endif
+
for_each_memblock(memory, reg) {
if (first) {
+ phys_addr_t phys_offset = PHYS_OFFSET;
+
/*
* Initially only use memory continuous from
* PHYS_OFFSET */
@@ -418,6 +432,18 @@ void __init mpu_setup(void)
MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA,
0);

+#ifdef CONFIG_XIP_KERNEL
+ /* ROM */
+ for (i = 0; i < ARRAY_SIZE(xip); i++) {
+ if (!xip[i].size)
+ continue;
+
+ err |= mpu_setup_region(region++, xip[i].base, ilog2(xip[i].size),
+ MPU_AP_PL1RO_PL0NA | MPU_RGN_NORMAL,
+ xip[i].subreg);
+ }
+#endif
+
/* RAM */
for (i = 0; i < ARRAY_SIZE(mem); i++) {
if (!mem[i].size)
--
2.0.0
Loading...