|
| 1 | +/* |
| 2 | + * Cache.cpp |
| 3 | + * |
| 4 | + * Created on: 22 Nov 2019 |
| 5 | + * Author: David |
| 6 | + */ |
| 7 | + |
| 8 | +#include <Hardware/Cache.h> |
| 9 | + |
| 10 | +#if USE_CACHE |
| 11 | + |
| 12 | +#if SAME70 |
| 13 | +# include <core_cm7.h> |
| 14 | + |
| 15 | +extern uint32_t _nocache_ram_start; |
| 16 | +extern uint32_t _nocache_ram_end; |
| 17 | + |
| 18 | +# if USE_MPU |
| 19 | +# include <mpu_armv7.h> |
| 20 | + |
| 21 | +// Macro ARM_MPU_RASR_EX is incorrectly defined in CMSIS 5.4.0, see https://github.com/ARM-software/CMSIS_5/releases. Redefine it here. |
| 22 | + |
| 23 | +# undef ARM_MPU_RASR_EX |
| 24 | + |
| 25 | +/** |
| 26 | +* MPU Region Attribute and Size Register Value |
| 27 | +* |
| 28 | +* \param DisableExec Instruction access disable bit, 1= disable instruction fetches. |
| 29 | +* \param AccessPermission Data access permissions, allows you to configure read/write access for User and Privileged mode. |
| 30 | +* \param AccessAttributes Memory access attribution, see \ref ARM_MPU_ACCESS_. |
| 31 | +* \param SubRegionDisable Sub-region disable field. |
| 32 | +* \param Size Region size of the region to be configured, for example 4K, 8K. |
| 33 | +*/ |
| 34 | +# define ARM_MPU_RASR_EX(DisableExec, AccessPermission, AccessAttributes, SubRegionDisable, Size) \ |
| 35 | + ((((DisableExec) << MPU_RASR_XN_Pos) & MPU_RASR_XN_Msk) | \ |
| 36 | + (((AccessPermission) << MPU_RASR_AP_Pos) & MPU_RASR_AP_Msk) | \ |
| 37 | + (((AccessAttributes) & (MPU_RASR_TEX_Msk | MPU_RASR_S_Msk | MPU_RASR_C_Msk | MPU_RASR_B_Msk))) | \ |
| 38 | + (((SubRegionDisable) << MPU_RASR_SRD_Pos) & MPU_RASR_SRD_Msk) | \ |
| 39 | + (((Size) << MPU_RASR_SIZE_Pos) & MPU_RASR_SIZE_Msk) | \ |
| 40 | + (((MPU_RASR_ENABLE_Msk)))) |
| 41 | + |
| 42 | +# endif |
| 43 | + |
| 44 | +#endif |
| 45 | + |
| 46 | +#if SAM4E |
| 47 | +# include <cmcc/cmcc.h> |
| 48 | +#endif |
| 49 | + |
| 50 | +static bool enabled = false; |
| 51 | + |
| 52 | +void Cache::Init() |
| 53 | +{ |
| 54 | +#if SAME70 |
| 55 | + |
| 56 | +# if USE_MPU |
| 57 | + // Set up the MPU so that we can have a non-cacheable RAM region, and so that we can trap accesses to non-existent memory |
| 58 | + // Where regions overlap, the region with the highest region number takes priority |
| 59 | + constexpr ARM_MPU_Region_t regionTable[] = |
| 60 | + { |
| 61 | + // Flash memory: read-only, execute allowed, cacheable |
| 62 | + { |
| 63 | + ARM_MPU_RBAR(0, IFLASH_ADDR), |
| 64 | + ARM_MPU_RASR_EX(0u, ARM_MPU_AP_RO, ARM_MPU_ACCESS_NORMAL(ARM_MPU_CACHEP_WB_WRA, ARM_MPU_CACHEP_WB_WRA, 1u), 0u, ARM_MPU_REGION_SIZE_1MB) |
| 65 | + }, |
| 66 | + // First 256kb RAM, read-write, cacheable, execute disabled. Parts of this are overridden later. |
| 67 | + { |
| 68 | + ARM_MPU_RBAR(1, IRAM_ADDR), |
| 69 | + ARM_MPU_RASR_EX(1u, ARM_MPU_AP_FULL, ARM_MPU_ACCESS_NORMAL(ARM_MPU_CACHEP_WB_WRA, ARM_MPU_CACHEP_WB_WRA, 1u), 0u, ARM_MPU_REGION_SIZE_256KB) |
| 70 | + }, |
| 71 | + // Final 128kb RAM, read-write, cacheable, execute disabled |
| 72 | + { |
| 73 | + ARM_MPU_RBAR(2, IRAM_ADDR + 0x00040000), |
| 74 | + ARM_MPU_RASR_EX(1u, ARM_MPU_AP_FULL, ARM_MPU_ACCESS_NORMAL(ARM_MPU_CACHEP_WB_WRA, ARM_MPU_CACHEP_WB_WRA, 1u), 0u, ARM_MPU_REGION_SIZE_128KB) |
| 75 | + }, |
| 76 | + // Non-cachable RAM. This must be before normal RAM because it includes CAN buffers which must be within first 64kb. |
| 77 | + // Read write, execute disabled, non-cacheable |
| 78 | + { |
| 79 | + ARM_MPU_RBAR(3, IRAM_ADDR), |
| 80 | + ARM_MPU_RASR_EX(1u, ARM_MPU_AP_FULL, ARM_MPU_ACCESS_ORDERED, 0, ARM_MPU_REGION_SIZE_64KB) |
| 81 | + }, |
| 82 | + // RAMFUNC memory. Read-only (the code has already been written to it), execution allowed. The initialised data memory follows, so it must be RW. |
| 83 | + // 256 bytes is enough at present (check the linker memory map if adding more RAMFUNCs). |
| 84 | + { |
| 85 | + ARM_MPU_RBAR(4, IRAM_ADDR + 0x00010000), |
| 86 | + ARM_MPU_RASR_EX(0u, ARM_MPU_AP_FULL, ARM_MPU_ACCESS_NORMAL(ARM_MPU_CACHEP_WB_WRA, ARM_MPU_CACHEP_WB_WRA, 1u), 0u, ARM_MPU_REGION_SIZE_256B) |
| 87 | + }, |
| 88 | + // Peripherals |
| 89 | + { |
| 90 | + ARM_MPU_RBAR(5, 0x40000000), |
| 91 | + ARM_MPU_RASR_EX(1u, ARM_MPU_AP_FULL, ARM_MPU_ACCESS_DEVICE(1u), 0u, ARM_MPU_REGION_SIZE_16MB) |
| 92 | + }, |
| 93 | + // USBHS |
| 94 | + { |
| 95 | + ARM_MPU_RBAR(6, 0xA0100000), |
| 96 | + ARM_MPU_RASR_EX(1u, ARM_MPU_AP_FULL, ARM_MPU_ACCESS_DEVICE(1u), 0u, ARM_MPU_REGION_SIZE_1MB) |
| 97 | + }, |
| 98 | + // ROM |
| 99 | + { |
| 100 | + ARM_MPU_RBAR(7, IROM_ADDR), |
| 101 | + ARM_MPU_RASR_EX(0u, ARM_MPU_AP_RO, ARM_MPU_ACCESS_NORMAL(ARM_MPU_CACHEP_WB_WRA, ARM_MPU_CACHEP_WB_WRA, 1u), 0u, ARM_MPU_REGION_SIZE_4MB) |
| 102 | + }, |
| 103 | + // ARM Private Peripheral Bus |
| 104 | + { |
| 105 | + ARM_MPU_RBAR(8, 0xE0000000), |
| 106 | + ARM_MPU_RASR_EX(1u, ARM_MPU_AP_FULL, ARM_MPU_ACCESS_ORDERED, 0u, ARM_MPU_REGION_SIZE_1MB) |
| 107 | + } |
| 108 | + }; |
| 109 | + |
| 110 | + // Ensure MPU is disabled |
| 111 | + ARM_MPU_Disable(); |
| 112 | + |
| 113 | + // Clear all regions |
| 114 | + const uint32_t numRegions = (MPU->TYPE & MPU_TYPE_DREGION_Msk) >> MPU_TYPE_DREGION_Pos; |
| 115 | + for (unsigned int region = 0; region < numRegions; ++region) |
| 116 | + { |
| 117 | + ARM_MPU_ClrRegion(region); |
| 118 | + } |
| 119 | + |
| 120 | + // Load regions from our table |
| 121 | + ARM_MPU_Load(regionTable, ARRAY_SIZE(regionTable)); |
| 122 | + |
| 123 | + // Enable the MPU, disabling the default map but allowing exception handlers to use it |
| 124 | + ARM_MPU_Enable(0x01); |
| 125 | +# endif |
| 126 | + |
| 127 | +#elif SAM4E |
| 128 | + cmcc_config g_cmcc_cfg; |
| 129 | + cmcc_get_config_defaults(&g_cmcc_cfg); |
| 130 | + cmcc_init(CMCC, &g_cmcc_cfg); |
| 131 | +#endif |
| 132 | +} |
| 133 | + |
| 134 | +void Cache::Enable() |
| 135 | +{ |
| 136 | + if (!enabled) |
| 137 | + { |
| 138 | + enabled = true; |
| 139 | +#if SAME70 |
| 140 | + SCB_EnableICache(); |
| 141 | + SCB_EnableDCache(); |
| 142 | + |
| 143 | +#elif SAM4E |
| 144 | + cmcc_invalidate_all(CMCC); |
| 145 | + cmcc_enable(CMCC); |
| 146 | +#endif |
| 147 | + } |
| 148 | +} |
| 149 | + |
| 150 | +void Cache::Disable() |
| 151 | +{ |
| 152 | + if (enabled) |
| 153 | + { |
| 154 | +#if SAME70 |
| 155 | + SCB_DisableICache(); |
| 156 | + SCB_DisableDCache(); |
| 157 | +#elif SAM4E |
| 158 | + cmcc_disable(CMCC); |
| 159 | +#endif |
| 160 | + enabled = false; |
| 161 | + } |
| 162 | +} |
| 163 | + |
| 164 | +#if SAME70 |
| 165 | + |
| 166 | +void Cache::Flush(const volatile void *start, size_t length) |
| 167 | +{ |
| 168 | + if (enabled) |
| 169 | + { |
| 170 | + // We assume that the DMA buffer is entirely inside or entirely outside the non-cached RAM area |
| 171 | + if (start < (void*)&_nocache_ram_start || start >= (void*)&_nocache_ram_end) |
| 172 | + { |
| 173 | + const uint32_t startAddr = reinterpret_cast<uint32_t>(start); |
| 174 | + SCB_CleanDCache_by_Addr(reinterpret_cast<uint32_t*>(startAddr & ~3), length + (startAddr & 3)); |
| 175 | + } |
| 176 | + } |
| 177 | +} |
| 178 | + |
| 179 | +#endif |
| 180 | + |
| 181 | +void Cache::Invalidate(const volatile void *start, size_t length) |
| 182 | +{ |
| 183 | + if (enabled) |
| 184 | + { |
| 185 | +#if SAME70 |
| 186 | + // We assume that the DMA buffer is entirely inside or entirely outside the non-cached RAM area |
| 187 | + if (start < (void*)&_nocache_ram_start || start >= (void*)&_nocache_ram_end) |
| 188 | + { |
| 189 | + // Caution! if any part of the cache line is dirty, the written data will be lost! |
| 190 | + const uint32_t startAddr = reinterpret_cast<uint32_t>(start); |
| 191 | + SCB_InvalidateDCache_by_Addr(reinterpret_cast<uint32_t*>(startAddr & ~3), length + (startAddr & 3)); |
| 192 | + } |
| 193 | +#elif SAM4E |
| 194 | + // The cache is only 2kb on the SAM4E so we just invalidate the whole cache |
| 195 | + cmcc_invalidate_all(CMCC); |
| 196 | +#endif |
| 197 | + } |
| 198 | +} |
| 199 | + |
| 200 | +#if SAM4E |
| 201 | + |
| 202 | +uint32_t Cache::GetHitCount() |
| 203 | +{ |
| 204 | + return cmcc_get_monitor_cnt(CMCC); |
| 205 | +} |
| 206 | + |
| 207 | +#endif |
| 208 | + |
| 209 | +#endif |
| 210 | + |
| 211 | +// Entry points that can be called from ASF C code |
| 212 | +void CacheFlushBeforeDMAReceive(const volatile void *start, size_t length) { Cache::FlushBeforeDMAReceive(start, length); } |
| 213 | +void CacheInvalidateAfterDMAReceive(const volatile void *start, size_t length) { Cache::InvalidateAfterDMAReceive(start, length); } |
| 214 | +void CacheFlushBeforeDMASend(const volatile void *start, size_t length) { Cache::FlushBeforeDMASend(start, length); } |
| 215 | + |
| 216 | +// End |
0 commit comments