Replace cache clean/invalidate by MPU config.

This commit is contained in:
HiFiPhile
2024-11-02 12:45:32 +01:00
parent 933ac29d77
commit 56f53a6132
5 changed files with 425 additions and 115 deletions

View File

@@ -24,30 +24,43 @@
* This file is part of the TinyUSB stack.
*/
#include "bsp/board_api.h"
#include "board.h"
#include "board/clock_config.h"
#include "board/pin_mux.h"
#include "board.h"
#include "bsp/board_api.h"
// Suppress warning caused by mcu driver
#ifdef __GNUC__
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#endif
#include "fsl_clock.h"
#include "fsl_device_registers.h"
#include "fsl_gpio.h"
#include "fsl_iomuxc.h"
#include "fsl_clock.h"
#include "fsl_lpuart.h"
#include "fsl_ocotp.h"
#ifdef __GNUC__
#pragma GCC diagnostic pop
#pragma GCC diagnostic pop
#endif
/* --- Note about USB buffer RAM ---
For M7 core it's recommanded to put USB buffer in DTCM for better performance (flexspi_nor linker default)
Otherwise you have to put the buffer in a non-cacheable section by configurate MPU manually or using BOARD_ConfigMPU():
- Define CFG_TUSB_MEM_SECTION=__attribute__((section("NonCacheable")))
- (IAR only) Change __NCACHE_REGION_SIZE in linker script to cover the size of non-cacheable section, multiple of 2^N
For secondary M4 core, the USB controller doesn't support transfer from DTCM so OCRAM must be used:
- __NCACHE_REGION_SIZE is defined by the linker script by default
- Define CFG_TUSB_MEM_SECTION=__attribute__((section("NonCacheable")))
*/
static void BOARD_ConfigMPU(void);
// needed by fsl_flexspi_nor_boot
TU_ATTR_USED const uint8_t dcd_data[] = { 0x00 };
TU_ATTR_USED const uint8_t dcd_data[] = {0x00};
//--------------------------------------------------------------------+
//
@@ -59,20 +72,20 @@ TU_ATTR_USED const uint8_t dcd_data[] = { 0x00 };
#endif
static void init_usb_phy(uint8_t usb_id) {
USBPHY_Type* usb_phy;
USBPHY_Type *usb_phy;
if (usb_id == 0) {
usb_phy = USBPHY1;
CLOCK_EnableUsbhs0PhyPllClock(kCLOCK_Usbphy480M, BOARD_XTAL0_CLK_HZ);
CLOCK_EnableUsbhs0Clock(kCLOCK_Usb480M, BOARD_XTAL0_CLK_HZ);
}
#ifdef USBPHY2
#ifdef USBPHY2
else if (usb_id == 1) {
usb_phy = USBPHY2;
CLOCK_EnableUsbhs1PhyPllClock(kCLOCK_Usbphy480M, BOARD_XTAL0_CLK_HZ);
CLOCK_EnableUsbhs1Clock(kCLOCK_Usb480M, BOARD_XTAL0_CLK_HZ);
}
#endif
#endif
else {
return;
}
@@ -91,13 +104,8 @@ static void init_usb_phy(uint8_t usb_id) {
usb_phy->TX = phytx;
}
void board_init(void)
{
// make sure the dcache is on.
#if defined(__DCACHE_PRESENT) && __DCACHE_PRESENT
if (SCB_CCR_DC_Msk != (SCB_CCR_DC_Msk & SCB->CCR)) SCB_EnableDCache();
#endif
void board_init(void) {
BOARD_ConfigMPU();
BOARD_InitPins();
BOARD_BootClockRUN();
SystemCoreClockUpdate();
@@ -113,9 +121,9 @@ void board_init(void)
#elif CFG_TUSB_OS == OPT_OS_FREERTOS
// If freeRTOS is used, IRQ priority is limit by max syscall ( smaller is higher )
NVIC_SetPriority(USB_OTG1_IRQn, configLIBRARY_MAX_SYSCALL_INTERRUPT_PRIORITY);
#ifdef USBPHY2
#ifdef USBPHY2
NVIC_SetPriority(USB_OTG2_IRQn, configLIBRARY_MAX_SYSCALL_INTERRUPT_PRIORITY);
#endif
#endif
#endif
board_led_write(true);
@@ -127,19 +135,397 @@ void board_init(void)
uart_config.enableTx = true;
uart_config.enableRx = true;
if ( kStatus_Success != LPUART_Init(UART_PORT, &uart_config, UART_CLK_ROOT) ) {
if (kStatus_Success != LPUART_Init(UART_PORT, &uart_config, UART_CLK_ROOT)) {
// failed to init uart, probably baudrate is not supported
// TU_BREAKPOINT();
}
//------------- USB -------------//
// Note: RT105x RT106x and later have dual USB controllers.
init_usb_phy(0); // USB0
init_usb_phy(0);// USB0
#ifdef USBPHY2
init_usb_phy(1); // USB1
init_usb_phy(1);// USB1
#endif
}
/* MPU configuration. */
#if __CORTEX_M == 7
static void BOARD_ConfigMPU(void) {
#if defined(__CC_ARM) || defined(__ARMCC_VERSION)
extern uint32_t Image$$RW_m_ncache$$Base[];
/* RW_m_ncache_unused is a auxiliary region which is used to get the whole size of noncache section */
extern uint32_t Image$$RW_m_ncache_unused$$Base[];
extern uint32_t Image$$RW_m_ncache_unused$$ZI$$Limit[];
uint32_t nonCacheStart = (uint32_t) Image$$RW_m_ncache$$Base;
uint32_t size = ((uint32_t) Image$$RW_m_ncache_unused$$Base == nonCacheStart) ? 0 : ((uint32_t) Image$$RW_m_ncache_unused$$ZI$$Limit - nonCacheStart);
#elif defined(__MCUXPRESSO)
#if defined(__USE_SHMEM)
extern uint32_t __base_rpmsg_sh_mem;
extern uint32_t __top_rpmsg_sh_mem;
uint32_t nonCacheStart = (uint32_t) (&__base_rpmsg_sh_mem);
uint32_t size = (uint32_t) (&__top_rpmsg_sh_mem) - nonCacheStart;
#else
extern uint32_t __base_NCACHE_REGION;
extern uint32_t __top_NCACHE_REGION;
uint32_t nonCacheStart = (uint32_t) (&__base_NCACHE_REGION);
uint32_t size = (uint32_t) (&__top_NCACHE_REGION) - nonCacheStart;
#endif
#elif defined(__ICCARM__) || defined(__GNUC__)
extern uint32_t __NCACHE_REGION_START[];
extern uint32_t __NCACHE_REGION_SIZE[];
uint32_t nonCacheStart = (uint32_t) __NCACHE_REGION_START;
uint32_t size = (uint32_t) __NCACHE_REGION_SIZE;
#endif
volatile uint32_t i = 0;
#if defined(__ICACHE_PRESENT) && __ICACHE_PRESENT
/* Disable I cache and D cache */
if (SCB_CCR_IC_Msk == (SCB_CCR_IC_Msk & SCB->CCR)) {
SCB_DisableICache();
}
#endif
#if defined(__DCACHE_PRESENT) && __DCACHE_PRESENT
if (SCB_CCR_DC_Msk == (SCB_CCR_DC_Msk & SCB->CCR)) {
SCB_DisableDCache();
}
#endif
/* Disable MPU */
ARM_MPU_Disable();
/* MPU configure:
* Use ARM_MPU_RASR(DisableExec, AccessPermission, TypeExtField, IsShareable, IsCacheable, IsBufferable,
* SubRegionDisable, Size)
* API in mpu_armv7.h.
* param DisableExec Instruction access (XN) disable bit,0=instruction fetches enabled, 1=instruction fetches
* disabled.
* param AccessPermission Data access permissions, allows you to configure read/write access for User and
* Privileged mode.
* Use MACROS defined in mpu_armv7.h:
* ARM_MPU_AP_NONE/ARM_MPU_AP_PRIV/ARM_MPU_AP_URO/ARM_MPU_AP_FULL/ARM_MPU_AP_PRO/ARM_MPU_AP_RO
* Combine TypeExtField/IsShareable/IsCacheable/IsBufferable to configure MPU memory access attributes.
* TypeExtField IsShareable IsCacheable IsBufferable Memory Attribute Shareability Cache
* 0 x 0 0 Strongly Ordered shareable
* 0 x 0 1 Device shareable
* 0 0 1 0 Normal not shareable Outer and inner write
* through no write allocate
* 0 0 1 1 Normal not shareable Outer and inner write
* back no write allocate
* 0 1 1 0 Normal shareable Outer and inner write
* through no write allocate
* 0 1 1 1 Normal shareable Outer and inner write
* back no write allocate
* 1 0 0 0 Normal not shareable outer and inner
* noncache
* 1 1 0 0 Normal shareable outer and inner
* noncache
* 1 0 1 1 Normal not shareable outer and inner write
* back write/read acllocate
* 1 1 1 1 Normal shareable outer and inner write
* back write/read acllocate
* 2 x 0 0 Device not shareable
* Above are normal use settings, if your want to see more details or want to config different inner/outer cache
* policy.
* please refer to Table 4-55 /4-56 in arm cortex-M7 generic user guide <dui0646b_cortex_m7_dgug.pdf>
* param SubRegionDisable Sub-region disable field. 0=sub-region is enabled, 1=sub-region is disabled.
* param Size Region size of the region to be configured. use ARM_MPU_REGION_SIZE_xxx MACRO in
* mpu_armv7.h.
*/
/*
* Add default region to deny access to whole address space to workaround speculative prefetch.
* Refer to Arm errata 1013783-B for more details.
*
*/
/* Region 0 setting: Instruction access disabled, No data access permission. */
MPU->RBAR = ARM_MPU_RBAR(0, 0x00000000U);
MPU->RASR = ARM_MPU_RASR(1, ARM_MPU_AP_NONE, 0, 0, 0, 0, 0, ARM_MPU_REGION_SIZE_4GB);
/* Region 1 setting: Memory with Device type, not shareable, non-cacheable. */
MPU->RBAR = ARM_MPU_RBAR(1, 0x80000000U);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 2, 0, 0, 0, 0, ARM_MPU_REGION_SIZE_512MB);
/* Region 2 setting: Memory with Device type, not shareable, non-cacheable. */
MPU->RBAR = ARM_MPU_RBAR(2, 0x60000000U);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 2, 0, 0, 0, 0, ARM_MPU_REGION_SIZE_512MB);
/* Region 3 setting: Memory with Device type, not shareable, non-cacheable. */
MPU->RBAR = ARM_MPU_RBAR(3, 0x00000000U);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 2, 0, 0, 0, 0, ARM_MPU_REGION_SIZE_1GB);
/* Region 4 setting: Memory with Normal type, not shareable, outer/inner write back */
MPU->RBAR = ARM_MPU_RBAR(4, 0x00000000U);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 0, 0, 1, 1, 0, ARM_MPU_REGION_SIZE_256KB);
/* Region 5 setting: Memory with Normal type, not shareable, outer/inner write back */
MPU->RBAR = ARM_MPU_RBAR(5, 0x20000000U);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 0, 0, 1, 1, 0, ARM_MPU_REGION_SIZE_256KB);
#if defined(CACHE_MODE_WRITE_THROUGH) && CACHE_MODE_WRITE_THROUGH
/* Region 6 setting: Memory with Normal type, not shareable, write through */
MPU->RBAR = ARM_MPU_RBAR(6, 0x20200000U);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 0, 0, 1, 0, 0, ARM_MPU_REGION_SIZE_1MB);
/* Region 7 setting: Memory with Normal type, not shareable, write trough */
MPU->RBAR = ARM_MPU_RBAR(7, 0x20300000U);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 0, 0, 1, 0, 0, ARM_MPU_REGION_SIZE_512KB);
#else
/* Region 6 setting: Memory with Normal type, not shareable, outer/inner write back */
MPU->RBAR = ARM_MPU_RBAR(6, 0x20200000U);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 0, 0, 1, 1, 0, ARM_MPU_REGION_SIZE_1MB);
/* Region 7 setting: Memory with Normal type, not shareable, outer/inner write back */
MPU->RBAR = ARM_MPU_RBAR(7, 0x20300000U);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 0, 0, 1, 1, 0, ARM_MPU_REGION_SIZE_512KB);
#endif
#if defined(XIP_EXTERNAL_FLASH) && (XIP_EXTERNAL_FLASH == 1)
/* Region 8 setting: Memory with Normal type, not shareable, outer/inner write back. */
MPU->RBAR = ARM_MPU_RBAR(8, 0x30000000U);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_RO, 0, 0, 1, 1, 0, ARM_MPU_REGION_SIZE_16MB);
#endif
#ifdef USE_SDRAM
#if defined(CACHE_MODE_WRITE_THROUGH) && CACHE_MODE_WRITE_THROUGH
/* Region 9 setting: Memory with Normal type, not shareable, write trough */
MPU->RBAR = ARM_MPU_RBAR(9, 0x80000000U);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 0, 0, 1, 0, 0, ARM_MPU_REGION_SIZE_64MB);
#else
/* Region 9 setting: Memory with Normal type, not shareable, outer/inner write back */
MPU->RBAR = ARM_MPU_RBAR(9, 0x80000000U);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 0, 0, 1, 1, 0, ARM_MPU_REGION_SIZE_64MB);
#endif
#endif
while ((size >> i) > 0x1U) {
i++;
}
if (i != 0) {
/* The MPU region size should be 2^N, 5<=N<=32, region base should be multiples of size. */
assert(!(nonCacheStart % size));
assert(size == (uint32_t) (1 << i));
assert(i >= 5);
/* Region 10 setting: Memory with Normal type, not shareable, non-cacheable */
MPU->RBAR = ARM_MPU_RBAR(10, nonCacheStart);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 1, 0, 0, 0, 0, i - 1);
}
/* Region 11 setting: Memory with Device type, not shareable, non-cacheable */
MPU->RBAR = ARM_MPU_RBAR(11, 0x40000000);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 2, 0, 0, 0, 0, ARM_MPU_REGION_SIZE_16MB);
/* Region 12 setting: Memory with Device type, not shareable, non-cacheable */
MPU->RBAR = ARM_MPU_RBAR(12, 0x41000000);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 2, 0, 0, 0, 0, ARM_MPU_REGION_SIZE_2MB);
/* Region 13 setting: Memory with Device type, not shareable, non-cacheable */
MPU->RBAR = ARM_MPU_RBAR(13, 0x41400000);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 2, 0, 0, 0, 0, ARM_MPU_REGION_SIZE_1MB);
/* Region 14 setting: Memory with Device type, not shareable, non-cacheable */
MPU->RBAR = ARM_MPU_RBAR(14, 0x41800000);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 2, 0, 0, 0, 0, ARM_MPU_REGION_SIZE_2MB);
/* Region 15 setting: Memory with Device type, not shareable, non-cacheable */
MPU->RBAR = ARM_MPU_RBAR(15, 0x42000000);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 2, 0, 0, 0, 0, ARM_MPU_REGION_SIZE_1MB);
/* Enable MPU */
ARM_MPU_Enable(MPU_CTRL_PRIVDEFENA_Msk | MPU_CTRL_HFNMIENA_Msk);
/* Enable I cache and D cache */
#if defined(__DCACHE_PRESENT) && __DCACHE_PRESENT
SCB_EnableDCache();
#endif
#if defined(__ICACHE_PRESENT) && __ICACHE_PRESENT
SCB_EnableICache();
#endif
}
#elif __CORTEX_M == 4
void BOARD_ConfigMPU(void) {
#if defined(__CC_ARM) || defined(__ARMCC_VERSION)
extern uint32_t Image$$RW_m_ncache$$Base[];
/* RW_m_ncache_unused is a auxiliary region which is used to get the whole size of noncache section */
extern uint32_t Image$$RW_m_ncache_unused$$Base[];
extern uint32_t Image$$RW_m_ncache_unused$$ZI$$Limit[];
uint32_t nonCacheStart = (uint32_t) Image$$RW_m_ncache$$Base;
uint32_t nonCacheSize = ((uint32_t) Image$$RW_m_ncache_unused$$Base == nonCacheStart) ? 0 : ((uint32_t) Image$$RW_m_ncache_unused$$ZI$$Limit - nonCacheStart);
#elif defined(__MCUXPRESSO)
extern uint32_t __base_NCACHE_REGION;
extern uint32_t __top_NCACHE_REGION;
uint32_t nonCacheStart = (uint32_t) (&__base_NCACHE_REGION);
uint32_t nonCacheSize = (uint32_t) (&__top_NCACHE_REGION) - nonCacheStart;
#elif defined(__ICCARM__) || defined(__GNUC__)
extern uint32_t __NCACHE_REGION_START[];
extern uint32_t __NCACHE_REGION_SIZE[];
uint32_t nonCacheStart = (uint32_t) __NCACHE_REGION_START;
uint32_t nonCacheSize = (uint32_t) __NCACHE_REGION_SIZE;
#endif
#if defined(__USE_SHMEM)
#if defined(__CC_ARM) || defined(__ARMCC_VERSION)
extern uint32_t Image$$RPMSG_SH_MEM$$Base[];
/* RPMSG_SH_MEM_unused is a auxiliary region which is used to get the whole size of RPMSG_SH_MEM section */
extern uint32_t Image$$RPMSG_SH_MEM_unused$$Base[];
extern uint32_t Image$$RPMSG_SH_MEM_unused$$ZI$$Limit[];
uint32_t rpmsgShmemStart = (uint32_t) Image$$RPMSG_SH_MEM$$Base;
uint32_t rpmsgShmemSize = (uint32_t) Image$$RPMSG_SH_MEM_unused$$ZI$$Limit - rpmsgShmemStart;
#elif defined(__MCUXPRESSO)
extern uint32_t __base_rpmsg_sh_mem;
extern uint32_t __top_rpmsg_sh_mem;
uint32_t rpmsgShmemStart = (uint32_t) (&__base_rpmsg_sh_mem);
uint32_t rpmsgShmemSize = (uint32_t) (&__top_rpmsg_sh_mem) - rpmsgShmemStart;
#elif defined(__ICCARM__) || defined(__GNUC__)
extern uint32_t __RPMSG_SH_MEM_START[];
extern uint32_t __RPMSG_SH_MEM_SIZE[];
uint32_t rpmsgShmemStart = (uint32_t) __RPMSG_SH_MEM_START;
uint32_t rpmsgShmemSize = (uint32_t) __RPMSG_SH_MEM_SIZE;
#endif
#endif
uint32_t i = 0;
/* Only config non-cacheable region on system bus */
assert(nonCacheStart >= 0x20000000);
/* Disable code bus cache */
if (LMEM_PCCCR_ENCACHE_MASK == (LMEM_PCCCR_ENCACHE_MASK & LMEM->PCCCR)) {
/* Enable the processor code bus to push all modified lines. */
LMEM->PCCCR |= LMEM_PCCCR_PUSHW0_MASK | LMEM_PCCCR_PUSHW1_MASK | LMEM_PCCCR_GO_MASK;
/* Wait until the cache command completes. */
while ((LMEM->PCCCR & LMEM_PCCCR_GO_MASK) != 0U) {
}
/* As a precaution clear the bits to avoid inadvertently re-running this command. */
LMEM->PCCCR &= ~(LMEM_PCCCR_PUSHW0_MASK | LMEM_PCCCR_PUSHW1_MASK);
/* Now disable the cache. */
LMEM->PCCCR &= ~LMEM_PCCCR_ENCACHE_MASK;
}
/* Disable system bus cache */
if (LMEM_PSCCR_ENCACHE_MASK == (LMEM_PSCCR_ENCACHE_MASK & LMEM->PSCCR)) {
/* Enable the processor system bus to push all modified lines. */
LMEM->PSCCR |= LMEM_PSCCR_PUSHW0_MASK | LMEM_PSCCR_PUSHW1_MASK | LMEM_PSCCR_GO_MASK;
/* Wait until the cache command completes. */
while ((LMEM->PSCCR & LMEM_PSCCR_GO_MASK) != 0U) {
}
/* As a precaution clear the bits to avoid inadvertently re-running this command. */
LMEM->PSCCR &= ~(LMEM_PSCCR_PUSHW0_MASK | LMEM_PSCCR_PUSHW1_MASK);
/* Now disable the cache. */
LMEM->PSCCR &= ~LMEM_PSCCR_ENCACHE_MASK;
}
/* Disable MPU */
ARM_MPU_Disable();
#if defined(CACHE_MODE_WRITE_THROUGH) && CACHE_MODE_WRITE_THROUGH
/* Region 0 setting: Memory with Normal type, not shareable, write trough */
MPU->RBAR = ARM_MPU_RBAR(0, 0x20200000U);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 0, 0, 1, 0, 0, ARM_MPU_REGION_SIZE_1MB);
/* Region 1 setting: Memory with Normal type, not shareable, write through */
MPU->RBAR = ARM_MPU_RBAR(1, 0x20300000U);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 0, 0, 1, 0, 0, ARM_MPU_REGION_SIZE_512KB);
/* Region 2 setting: Memory with Normal type, not shareable, write through */
MPU->RBAR = ARM_MPU_RBAR(2, 0x80000000U);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 0, 0, 1, 0, 0, ARM_MPU_REGION_SIZE_64MB);
while ((nonCacheSize >> i) > 0x1U) {
i++;
}
if (i != 0) {
/* The MPU region size should be 2^N, 5<=N<=32, region base should be multiples of size. */
assert(!(nonCacheStart % nonCacheSize));
assert(nonCacheSize == (uint32_t) (1 << i));
assert(i >= 5);
/* Region 3 setting: Memory with device type, not shareable, non-cacheable */
MPU->RBAR = ARM_MPU_RBAR(3, nonCacheStart);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 2, 0, 0, 0, 0, i - 1);
}
#if defined(__USE_SHMEM)
i = 0;
while ((rpmsgShmemSize >> i) > 0x1U) {
i++;
}
if (i != 0) {
/* The MPU region size should be 2^N, 5<=N<=32, region base should be multiples of size. */
assert(!(rpmsgShmemStart % rpmsgShmemSize));
assert(rpmsgShmemSize == (uint32_t) (1 << i));
assert(i >= 5);
/* Region 4 setting: Memory with device type, not shareable, non-cacheable */
MPU->RBAR = ARM_MPU_RBAR(4, rpmsgShmemStart);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 2, 0, 0, 0, 0, i - 1);
}
#endif
#else
while ((nonCacheSize >> i) > 0x1U) {
i++;
}
if (i != 0) {
/* The MPU region size should be 2^N, 5<=N<=32, region base should be multiples of size. */
assert(!(nonCacheStart % nonCacheSize));
assert(nonCacheSize == (uint32_t) (1 << i));
assert(i >= 5);
/* Region 0 setting: Memory with device type, not shareable, non-cacheable */
MPU->RBAR = ARM_MPU_RBAR(0, nonCacheStart);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 2, 0, 0, 0, 0, i - 1);
}
#if defined(__USE_SHMEM)
i = 0;
while ((rpmsgShmemSize >> i) > 0x1U) {
i++;
}
if (i != 0) {
/* The MPU region size should be 2^N, 5<=N<=32, region base should be multiples of size. */
assert(!(rpmsgShmemStart % rpmsgShmemSize));
assert(rpmsgShmemSize == (uint32_t) (1 << i));
assert(i >= 5);
/* Region 1 setting: Memory with device type, not shareable, non-cacheable */
MPU->RBAR = ARM_MPU_RBAR(1, rpmsgShmemStart);
MPU->RASR = ARM_MPU_RASR(0, ARM_MPU_AP_FULL, 2, 0, 0, 0, 0, i - 1);
}
#endif
#endif
/* Enable MPU */
ARM_MPU_Enable(MPU_CTRL_PRIVDEFENA_Msk | MPU_CTRL_HFNMIENA_Msk);
/* Enables the processor system bus to invalidate all lines in both ways.
and Initiate the processor system bus cache command. */
LMEM->PSCCR |= LMEM_PSCCR_INVW0_MASK | LMEM_PSCCR_INVW1_MASK | LMEM_PSCCR_GO_MASK;
/* Wait until the cache command completes */
while ((LMEM->PSCCR & LMEM_PSCCR_GO_MASK) != 0U) {
}
/* As a precaution clear the bits to avoid inadvertently re-running this command. */
LMEM->PSCCR &= ~(LMEM_PSCCR_INVW0_MASK | LMEM_PSCCR_INVW1_MASK);
/* Now enable the system bus cache. */
LMEM->PSCCR |= LMEM_PSCCR_ENCACHE_MASK;
/* Enables the processor code bus to invalidate all lines in both ways.
and Initiate the processor code bus code cache command. */
LMEM->PCCCR |= LMEM_PCCCR_INVW0_MASK | LMEM_PCCCR_INVW1_MASK | LMEM_PCCCR_GO_MASK;
/* Wait until the cache command completes. */
while ((LMEM->PCCCR & LMEM_PCCCR_GO_MASK) != 0U) {
}
/* As a precaution clear the bits to avoid inadvertently re-running this command. */
LMEM->PCCCR &= ~(LMEM_PCCCR_INVW0_MASK | LMEM_PCCCR_INVW1_MASK);
/* Now enable the code bus cache. */
LMEM->PCCCR |= LMEM_PCCCR_ENCACHE_MASK;
}
#endif
//--------------------------------------------------------------------+
// USB Interrupt Handler
//--------------------------------------------------------------------+
@@ -166,18 +552,18 @@ uint32_t board_button_read(void) {
size_t board_get_unique_id(uint8_t id[], size_t max_len) {
(void) max_len;
#if FSL_FEATURE_OCOTP_HAS_TIMING_CTRL
#if FSL_FEATURE_OCOTP_HAS_TIMING_CTRL
OCOTP_Init(OCOTP, CLOCK_GetFreq(kCLOCK_IpgClk));
#else
#else
OCOTP_Init(OCOTP, 0u);
#endif
#endif
// Reads shadow registers 0x01 - 0x04 (Configuration and Manufacturing Info)
// into 8 bit wide destination, avoiding punning.
for (int i = 0; i < 4; ++i) {
uint32_t wr = OCOTP_ReadFuseShadowRegister(OCOTP, i + 1);
for (int j = 0; j < 4; j++) {
id[i*4+j] = wr & 0xff;
id[i * 4 + j] = wr & 0xff;
wr >>= 8;
}
}
@@ -186,7 +572,7 @@ size_t board_get_unique_id(uint8_t id[], size_t max_len) {
return 16;
}
int board_uart_read(uint8_t* buf, int len) {
int board_uart_read(uint8_t *buf, int len) {
int count = 0;
while (count < len) {
@@ -209,8 +595,8 @@ int board_uart_read(uint8_t* buf, int len) {
return count;
}
int board_uart_write(void const * buf, int len) {
LPUART_WriteBlocking(UART_PORT, (uint8_t const*)buf, len);
int board_uart_write(void const *buf, int len) {
LPUART_WriteBlocking(UART_PORT, (uint8_t const *) buf, len);
return len;
}
@@ -236,10 +622,10 @@ TU_ATTR_UNUSED void _start(void) {
while (1) {}
}
#ifdef __clang__
void _exit (int __status) {
#ifdef __clang__
void _exit(int __status) {
while (1) {}
}
#endif
#endif
#endif

View File

@@ -64,6 +64,7 @@ function(add_board_target BOARD_TARGET)
XIP_EXTERNAL_FLASH=1
XIP_BOOT_HEADER_ENABLE=1
__STARTUP_CLEAR_BSS
CFG_TUSB_MEM_SECTION=__attribute__((section("NonCacheable")))
)
target_include_directories(${BOARD_TARGET} PUBLIC
${CMAKE_CURRENT_FUNCTION_LIST_DIR}/boards/${BOARD}

View File

@@ -14,7 +14,8 @@ CFLAGS += \
-D__STARTUP_CLEAR_BSS \
-DXIP_EXTERNAL_FLASH=1 \
-DXIP_BOOT_HEADER_ENABLE=1 \
-DCFG_TUSB_MCU=OPT_MCU_MIMXRT1XXX
-DCFG_TUSB_MCU=OPT_MCU_MIMXRT1XXX \
-DCFG_TUSB_MEM_SECTION=__attribute__((section("NonCacheable"))) \
ifdef BOARD_TUD_RHPORT
CFLAGS += -DBOARD_TUD_RHPORT=$(BOARD_TUD_RHPORT)

View File

@@ -56,46 +56,11 @@ static const ci_hs_controller_t _ci_controller[] =
#define CI_HS_REG(_port) ((ci_hs_regs_t*) _ci_controller[_port].reg_base)
//------------- DCD -------------//
#define CI_DCD_INT_ENABLE(_p) NVIC_EnableIRQ (_ci_controller[_p].irqnum)
#define CI_DCD_INT_DISABLE(_p) NVIC_DisableIRQ(_ci_controller[_p].irqnum)
#define CI_DCD_INT_ENABLE(_p) NVIC_EnableIRQ ((IRQn_Type)_ci_controller[_p].irqnum)
#define CI_DCD_INT_DISABLE(_p) NVIC_DisableIRQ((IRQn_Type)_ci_controller[_p].irqnum)
//------------- HCD -------------//
#define CI_HCD_INT_ENABLE(_p) NVIC_EnableIRQ (_ci_controller[_p].irqnum)
#define CI_HCD_INT_DISABLE(_p) NVIC_DisableIRQ(_ci_controller[_p].irqnum)
//------------- DCache -------------//
TU_ATTR_ALWAYS_INLINE static inline bool imxrt_is_cache_mem(uintptr_t addr) {
return !(0x20000000 <= addr && addr < 0x20100000);
}
TU_ATTR_ALWAYS_INLINE static inline bool imxrt_dcache_clean(void const* addr, uint32_t data_size) {
const uintptr_t addr32 = (uintptr_t) addr;
if (imxrt_is_cache_mem(addr32)) {
TU_ASSERT(tu_is_aligned32(addr32));
SCB_CleanDCache_by_Addr((uint32_t *) addr32, (int32_t) data_size);
}
return true;
}
TU_ATTR_ALWAYS_INLINE static inline bool imxrt_dcache_invalidate(void const* addr, uint32_t data_size) {
const uintptr_t addr32 = (uintptr_t) addr;
if (imxrt_is_cache_mem(addr32)) {
// Invalidating does not push cached changes back to RAM so we need to be
// *very* careful when we do it. If we're not aligned, then we risk resetting
// values back to their RAM state.
TU_ASSERT(tu_is_aligned32(addr32));
SCB_InvalidateDCache_by_Addr((void*) addr32, (int32_t) data_size);
}
return true;
}
TU_ATTR_ALWAYS_INLINE static inline bool imxrt_dcache_clean_invalidate(void const* addr, uint32_t data_size) {
const uintptr_t addr32 = (uintptr_t) addr;
if (imxrt_is_cache_mem(addr32)) {
TU_ASSERT(tu_is_aligned32(addr32));
SCB_CleanInvalidateDCache_by_Addr((uint32_t *) addr32, (int32_t) data_size);
}
return true;
}
#define CI_HCD_INT_ENABLE(_p) NVIC_EnableIRQ ((IRQn_Type)_ci_controller[_p].irqnum)
#define CI_HCD_INT_DISABLE(_p) NVIC_DisableIRQ((IRQn_Type)_ci_controller[_p].irqnum)
#endif

View File

@@ -33,19 +33,6 @@
#if CFG_TUSB_MCU == OPT_MCU_MIMXRT1XXX
#include "ci_hs_imxrt.h"
void dcd_dcache_clean(void const* addr, uint32_t data_size) {
imxrt_dcache_clean(addr, data_size);
}
void dcd_dcache_invalidate(void const* addr, uint32_t data_size) {
imxrt_dcache_invalidate(addr, data_size);
}
void dcd_dcache_clean_invalidate(void const* addr, uint32_t data_size) {
imxrt_dcache_clean_invalidate(addr, data_size);
}
#else
#if TU_CHECK_MCU(OPT_MCU_LPC18XX, OPT_MCU_LPC43XX)
@@ -57,18 +44,6 @@
#else
#error "Unsupported MCUs"
#endif
TU_ATTR_WEAK void dcd_dcache_clean(void const* addr, uint32_t data_size) {
(void) addr; (void) data_size;
}
TU_ATTR_WEAK void dcd_dcache_invalidate(void const* addr, uint32_t data_size) {
(void) addr; (void) data_size;
}
TU_ATTR_WEAK void dcd_dcache_clean_invalidate(void const* addr, uint32_t data_size) {
(void) addr; (void) data_size;
}
#endif
//--------------------------------------------------------------------+
@@ -230,8 +205,6 @@ static void bus_reset(uint8_t rhport)
_dcd_data.qhd[0][0].qtd_overlay.next = _dcd_data.qhd[0][1].qtd_overlay.next = QTD_NEXT_INVALID;
_dcd_data.qhd[0][0].int_on_setup = 1; // OUT only
dcd_dcache_clean_invalidate(&_dcd_data, sizeof(dcd_data_t));
}
bool dcd_init(uint8_t rhport, const tusb_rhport_init_t* rh_init) {
@@ -257,8 +230,6 @@ bool dcd_init(uint8_t rhport, const tusb_rhport_init_t* rh_init) {
dcd_reg->PORTSC1 = PORTSC1_FORCE_FULL_SPEED;
#endif
dcd_dcache_clean_invalidate(&_dcd_data, sizeof(dcd_data_t));
dcd_reg->ENDPTLISTADDR = (uint32_t) _dcd_data.qhd; // Endpoint List Address has to be 2K alignment
dcd_reg->USBSTS = dcd_reg->USBSTS;
dcd_reg->USBINTR = INTR_USB | INTR_ERROR | INTR_PORT_CHANGE | INTR_SUSPEND;
@@ -325,10 +296,6 @@ void dcd_sof_enable(uint8_t rhport, bool en)
static void qtd_init(dcd_qtd_t* p_qtd, void * data_ptr, uint16_t total_bytes)
{
// Force the CPU to flush the buffer. We increase the size by 31 because the call aligns the
// address to 32-byte boundaries. Buffer must be word aligned
dcd_dcache_clean_invalidate((uint32_t*) tu_align((uint32_t) data_ptr, 4), total_bytes + 31);
tu_memclr(p_qtd, sizeof(dcd_qtd_t));
p_qtd->next = QTD_NEXT_INVALID;
@@ -402,8 +369,6 @@ bool dcd_edpt_open(uint8_t rhport, tusb_desc_endpoint_t const * p_endpoint_desc)
p_qhd->qtd_overlay.next = QTD_NEXT_INVALID;
dcd_dcache_clean_invalidate(&_dcd_data, sizeof(dcd_data_t));
// Enable EP Control
uint32_t const epctrl = (p_endpoint_desc->bmAttributes.xfer << ENDPTCTRL_TYPE_POS) | ENDPTCTRL_ENABLE | ENDPTCTRL_TOGGLE_RESET;
@@ -461,9 +426,6 @@ static void qhd_start_xfer(uint8_t rhport, uint8_t epnum, uint8_t dir)
p_qhd->qtd_overlay.halted = false; // clear any previous error
p_qhd->qtd_overlay.next = (uint32_t) p_qtd; // link qtd to qhd
// flush cache
dcd_dcache_clean_invalidate(&_dcd_data, sizeof(dcd_data_t));
if ( epnum == 0 )
{
// follows UM 24.10.8.1.1 Setup packet handling using setup lockout mechanism
@@ -539,8 +501,6 @@ bool dcd_edpt_xfer_fifo (uint8_t rhport, uint8_t ep_addr, tu_fifo_t * ff, uint16
page++;
}
}
dcd_dcache_clean_invalidate((uint32_t*) tu_align((uint32_t) fifo_info.ptr_wrap, 4), total_bytes - fifo_info.len_wrap + 31);
}
else
{
@@ -652,9 +612,6 @@ void dcd_int_handler(uint8_t rhport)
if (int_status & INTR_USB)
{
// Make sure we read the latest version of _dcd_data.
dcd_dcache_clean_invalidate(&_dcd_data, sizeof(dcd_data_t));
uint32_t const edpt_complete = dcd_reg->ENDPTCOMPLETE;
dcd_reg->ENDPTCOMPLETE = edpt_complete; // acknowledge