mirror of
https://github.com/RIOT-OS/RIOT.git
synced 2024-12-29 04:50:03 +01:00
cpu/fe310: Uncrustify code
This commit is contained in:
parent
bb1a3470d7
commit
ba518ede09
@ -35,18 +35,19 @@ void clock_init(void)
|
||||
PRCI_REG(PRCI_HFROSCCFG) = (ROSC_DIV(4) | ROSC_TRIM(16) | ROSC_EN(1));
|
||||
|
||||
/* Wait for HFROSC to be ready */
|
||||
while ((PRCI_REG(PRCI_HFROSCCFG) & ROSC_RDY(1)) == 0);
|
||||
while ((PRCI_REG(PRCI_HFROSCCFG) & ROSC_RDY(1)) == 0) {}
|
||||
|
||||
/* Don't use PLL clock source */
|
||||
PRCI_REG(PRCI_PLLCFG) &= ~PLL_SEL(PLL_SEL_PLL);
|
||||
}
|
||||
|
||||
if (IS_ACTIVE(CONFIG_USE_CLOCK_HFXOSC) || IS_ACTIVE(CONFIG_USE_CLOCK_HFXOSC_PLL)) {
|
||||
if (IS_ACTIVE(CONFIG_USE_CLOCK_HFXOSC) ||
|
||||
IS_ACTIVE(CONFIG_USE_CLOCK_HFXOSC_PLL)) {
|
||||
/* Ensure HFXOSC is enabled */
|
||||
PRCI_REG(PRCI_HFXOSCCFG) = XOSC_EN(1);
|
||||
|
||||
/* Wait for HFXOSC to become ready */
|
||||
while ((PRCI_REG(PRCI_HFXOSCCFG) & XOSC_RDY(1)) == 0);
|
||||
while ((PRCI_REG(PRCI_HFXOSCCFG) & XOSC_RDY(1)) == 0) {}
|
||||
|
||||
/* Select HFXOSC as reference frequency and bypass PLL */
|
||||
PRCI_REG(PRCI_PLLCFG) = PLL_REFSEL(PLL_REFSEL_HFXOSC) | PLL_BYPASS(1);
|
||||
@ -56,13 +57,14 @@ void clock_init(void)
|
||||
PRCI_REG(PRCI_PLLDIV) = (PLL_FINAL_DIV_BY_1(1) | PLL_FINAL_DIV(0));
|
||||
|
||||
/* Configure PLL */
|
||||
PRCI_REG(PRCI_PLLCFG) |= PLL_R(CONFIG_CLOCK_PLL_R) | PLL_F(CONFIG_CLOCK_PLL_F) | PLL_Q(CONFIG_CLOCK_PLL_Q);
|
||||
PRCI_REG(PRCI_PLLCFG) |= PLL_R(CONFIG_CLOCK_PLL_R) | PLL_F(
|
||||
CONFIG_CLOCK_PLL_F) | PLL_Q(CONFIG_CLOCK_PLL_Q);
|
||||
|
||||
/* Disable PLL Bypass */
|
||||
PRCI_REG(PRCI_PLLCFG) &= ~PLL_BYPASS(1);
|
||||
|
||||
/* Now it is safe to check for PLL Lock */
|
||||
while ((PRCI_REG(PRCI_PLLCFG) & PLL_LOCK(1)) == 0);
|
||||
while ((PRCI_REG(PRCI_PLLCFG) & PLL_LOCK(1)) == 0) {}
|
||||
}
|
||||
|
||||
/* Switch over to PLL Clock source */
|
||||
@ -72,17 +74,20 @@ void clock_init(void)
|
||||
PRCI_REG(PRCI_HFROSCCFG) &= ~ROSC_EN(1);
|
||||
}
|
||||
else if (IS_ACTIVE(CONFIG_USE_CLOCK_HFROSC_PLL)) {
|
||||
PRCI_set_hfrosctrim_for_f_cpu(CONFIG_CLOCK_DESIRED_FREQUENCY, PRCI_FREQ_UNDERSHOOT);
|
||||
PRCI_set_hfrosctrim_for_f_cpu(CONFIG_CLOCK_DESIRED_FREQUENCY,
|
||||
PRCI_FREQ_UNDERSHOOT);
|
||||
}
|
||||
else { /* Clock HFROSC */
|
||||
/* Disable Bypass */
|
||||
PRCI_REG(PRCI_PLLCFG) &= ~PLL_BYPASS(1);
|
||||
|
||||
/* Configure trim and divider values of HFROSC */
|
||||
PRCI_REG(PRCI_HFROSCCFG) = (ROSC_DIV(CONFIG_CLOCK_HFROSC_DIV) | ROSC_TRIM(CONFIG_CLOCK_HFROSC_TRIM) | ROSC_EN(1));
|
||||
PRCI_REG(PRCI_HFROSCCFG) =
|
||||
(ROSC_DIV(CONFIG_CLOCK_HFROSC_DIV) |
|
||||
ROSC_TRIM(CONFIG_CLOCK_HFROSC_TRIM) | ROSC_EN(1));
|
||||
|
||||
/* Wait for HFROSC to be ready */
|
||||
while ((PRCI_REG(PRCI_HFROSCCFG) & ROSC_RDY(1)) == 0);
|
||||
while ((PRCI_REG(PRCI_HFROSCCFG) & ROSC_RDY(1)) == 0) {}
|
||||
|
||||
/* Don't use PLL clock source */
|
||||
PRCI_REG(PRCI_PLLCFG) &= ~PLL_SEL(PLL_SEL_PLL);
|
||||
|
@ -25,7 +25,8 @@
|
||||
|
||||
|
||||
#define CHECK_OFFSET(member) \
|
||||
_Static_assert(offsetof(struct context_switch_frame, member) == member ## _OFFSET, \
|
||||
_Static_assert(offsetof(struct context_switch_frame, \
|
||||
member) == member ## _OFFSET, \
|
||||
"context_switch_frame offset mismatch for offset member");
|
||||
|
||||
static void check_context_switch_frame_alignment(void) __attribute__ ((unused));
|
||||
|
@ -83,6 +83,7 @@ void flash_init(void)
|
||||
*/
|
||||
uint32_t freq = cpu_freq();
|
||||
uint32_t sckdiv = (freq - 1) / (MAX_FLASH_FREQ * 2);
|
||||
|
||||
if (sckdiv > SCKDIV_SAFE) {
|
||||
SPI0_REG(SPI_REG_SCKDIV) = sckdiv;
|
||||
}
|
||||
|
@ -30,7 +30,8 @@ extern "C" {
|
||||
* @{
|
||||
*/
|
||||
#ifndef CONFIG_USE_CLOCK_HFXOSC_PLL
|
||||
#if IS_ACTIVE(CONFIG_USE_CLOCK_HFXOSC) || IS_ACTIVE(CONFIG_USE_CLOCK_HFROSC_PLL) || \
|
||||
#if IS_ACTIVE(CONFIG_USE_CLOCK_HFXOSC) || \
|
||||
IS_ACTIVE(CONFIG_USE_CLOCK_HFROSC_PLL) || \
|
||||
IS_ACTIVE(CONFIG_USE_CLOCK_HFROSC)
|
||||
#define CONFIG_USE_CLOCK_HFXOSC_PLL 0
|
||||
#else
|
||||
@ -51,22 +52,26 @@ extern "C" {
|
||||
#endif /* CONFIG_USE_CLOCK_HFROSC */
|
||||
|
||||
#if CONFIG_USE_CLOCK_HFXOSC_PLL && \
|
||||
(CONFIG_USE_CLOCK_HFROSC_PLL || CONFIG_USE_CLOCK_HFROSC || CONFIG_USE_CLOCK_HFXOSC)
|
||||
(CONFIG_USE_CLOCK_HFROSC_PLL || CONFIG_USE_CLOCK_HFROSC || \
|
||||
CONFIG_USE_CLOCK_HFXOSC)
|
||||
#error "Cannot use HFXOSC_PLL with other clock configurations"
|
||||
#endif
|
||||
|
||||
#if CONFIG_USE_CLOCK_HFXOSC && \
|
||||
(CONFIG_USE_CLOCK_HFROSC_PLL || CONFIG_USE_CLOCK_HFROSC || CONFIG_USE_CLOCK_HFXOSC_PLL)
|
||||
(CONFIG_USE_CLOCK_HFROSC_PLL || CONFIG_USE_CLOCK_HFROSC || \
|
||||
CONFIG_USE_CLOCK_HFXOSC_PLL)
|
||||
#error "Cannot use HFXOSC with other clock configurations"
|
||||
#endif
|
||||
|
||||
#if CONFIG_USE_CLOCK_HFROSC_PLL && \
|
||||
(CONFIG_USE_CLOCK_HFXOSC_PLL || CONFIG_USE_CLOCK_HFXOSC || CONFIG_USE_CLOCK_HFROSC)
|
||||
(CONFIG_USE_CLOCK_HFXOSC_PLL || CONFIG_USE_CLOCK_HFXOSC || \
|
||||
CONFIG_USE_CLOCK_HFROSC)
|
||||
#error "Cannot use HFROSC_PLL with other clock configurations"
|
||||
#endif
|
||||
|
||||
#if CONFIG_USE_CLOCK_HFROSC && \
|
||||
(CONFIG_USE_CLOCK_HFXOSC_PLL || CONFIG_USE_CLOCK_HFXOSC || CONFIG_USE_CLOCK_HFROSC_PLL)
|
||||
(CONFIG_USE_CLOCK_HFXOSC_PLL || CONFIG_USE_CLOCK_HFXOSC || \
|
||||
CONFIG_USE_CLOCK_HFROSC_PLL)
|
||||
#error "Cannot use HFROSC with other clock configurations"
|
||||
#endif
|
||||
|
||||
@ -80,9 +85,12 @@ extern "C" {
|
||||
|
||||
#if CONFIG_USE_CLOCK_HFXOSC_PLL
|
||||
#define CLOCK_PLL_INPUT_CLOCK MHZ(16)
|
||||
#define CLOCK_PLL_REFR (CLOCK_PLL_INPUT_CLOCK / (CONFIG_CLOCK_PLL_R + 1))
|
||||
#define CLOCK_PLL_VCO (CLOCK_PLL_REFR * (2 * (CONFIG_CLOCK_PLL_F + 1)))
|
||||
#define CLOCK_PLL_OUT (CLOCK_PLL_VCO / (1 << CONFIG_CLOCK_PLL_Q))
|
||||
#define CLOCK_PLL_REFR (CLOCK_PLL_INPUT_CLOCK / \
|
||||
(CONFIG_CLOCK_PLL_R + 1))
|
||||
#define CLOCK_PLL_VCO (CLOCK_PLL_REFR * \
|
||||
(2 * (CONFIG_CLOCK_PLL_F + 1)))
|
||||
#define CLOCK_PLL_OUT (CLOCK_PLL_VCO / \
|
||||
(1 << CONFIG_CLOCK_PLL_Q))
|
||||
#define CLOCK_CORECLOCK (CLOCK_PLL_OUT) /* 320000000Hz with the values used above */
|
||||
|
||||
/* Check PLL settings */
|
||||
@ -90,10 +98,12 @@ extern "C" {
|
||||
#error "Only R=2 can be used when using HFXOSC"
|
||||
#endif
|
||||
#if (CLOCK_PLL_VCO < MHZ(384)) || (CLOCK_PLL_VCO > MHZ(768))
|
||||
#error "VCO frequency must be in the range [384MHz - 768MHz], check the CLOCK_PLL_F value"
|
||||
#error \
|
||||
"VCO frequency must be in the range [384MHz - 768MHz], check the CLOCK_PLL_F value"
|
||||
#endif
|
||||
#if (CLOCK_PLL_OUT < MHZ(48)) || (CLOCK_PLL_OUT > MHZ(384))
|
||||
#error "PLL output frequency must be in the range [48MHz - 384MHz], check the CLOCK_PLL_Q value"
|
||||
#error \
|
||||
"PLL output frequency must be in the range [48MHz - 384MHz], check the CLOCK_PLL_Q value"
|
||||
#endif
|
||||
|
||||
#elif CONFIG_USE_CLOCK_HFXOSC
|
||||
@ -101,9 +111,9 @@ extern "C" {
|
||||
#endif
|
||||
|
||||
/*
|
||||
When using HFROSC input clock, the core clock cannot be computed from settings,
|
||||
call cpu_freq() to get the configured CPU frequency.
|
||||
*/
|
||||
When using HFROSC input clock, the core clock cannot be computed from settings,
|
||||
call cpu_freq() to get the configured CPU frequency.
|
||||
*/
|
||||
#ifndef CONFIG_CLOCK_DESIRED_FREQUENCY
|
||||
#define CONFIG_CLOCK_DESIRED_FREQUENCY MHZ(320)
|
||||
#endif
|
||||
|
@ -43,12 +43,13 @@ static inline __attribute__((always_inline)) unsigned int irq_enable(void)
|
||||
{
|
||||
/* Enable all interrupts */
|
||||
unsigned state;
|
||||
|
||||
__asm__ volatile (
|
||||
"csrrs %[dest], mstatus, %[mask]"
|
||||
: [dest] "=r"(state)
|
||||
: [mask] "i"(MSTATUS_MIE)
|
||||
:[dest] "=r" (state)
|
||||
:[mask] "i" (MSTATUS_MIE)
|
||||
: "memory"
|
||||
);
|
||||
);
|
||||
return state;
|
||||
}
|
||||
|
||||
@ -59,12 +60,13 @@ static inline __attribute__((always_inline)) unsigned int irq_disable(void)
|
||||
{
|
||||
|
||||
unsigned int state;
|
||||
|
||||
__asm__ volatile (
|
||||
"csrrc %[dest], mstatus, %[mask]"
|
||||
: [dest] "=r"(state)
|
||||
: [mask] "i"(MSTATUS_MIE)
|
||||
:[dest] "=r" (state)
|
||||
:[mask] "i" (MSTATUS_MIE)
|
||||
: "memory"
|
||||
);
|
||||
);
|
||||
|
||||
return state;
|
||||
}
|
||||
@ -72,15 +74,16 @@ static inline __attribute__((always_inline)) unsigned int irq_disable(void)
|
||||
/**
|
||||
* @brief Restore the state of the IRQ flags
|
||||
*/
|
||||
static inline __attribute__((always_inline)) void irq_restore(unsigned int state)
|
||||
static inline __attribute__((always_inline)) void irq_restore(
|
||||
unsigned int state)
|
||||
{
|
||||
/* Restore all interrupts to given state */
|
||||
__asm__ volatile (
|
||||
"csrw mstatus, %[state]"
|
||||
: /* no outputs */
|
||||
: [state] "r"(state)
|
||||
:[state] "r" (state)
|
||||
: "memory"
|
||||
);
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -144,8 +144,8 @@ typedef struct {
|
||||
*/
|
||||
#define NWDT_TIME_LOWER_LIMIT (1)
|
||||
/* Ensure the internal "count" variable stays within the uint32 bounds.
|
||||
This variable corresponds to max_time * RTC_FREQ / MS_PER_SEC. On fe310,
|
||||
RTC_FREQ is 32768Hz. The 15 right shift is equivalent to a division by RTC_FREQ.
|
||||
This variable corresponds to max_time * RTC_FREQ / MS_PER_SEC. On fe310,
|
||||
RTC_FREQ is 32768Hz. The 15 right shift is equivalent to a division by RTC_FREQ.
|
||||
*/
|
||||
#define NWDT_TIME_UPPER_LIMIT ((UINT32_MAX >> 15) * MS_PER_SEC + 1)
|
||||
/** @} */
|
||||
|
@ -82,28 +82,28 @@ void handle_trap(uint32_t mcause)
|
||||
/* Cause is an interrupt - determine type */
|
||||
switch (mcause & MCAUSE_CAUSE) {
|
||||
#ifdef MODULE_PERIPH_TIMER
|
||||
case IRQ_M_TIMER:
|
||||
/* Handle timer interrupt */
|
||||
timer_isr();
|
||||
break;
|
||||
case IRQ_M_TIMER:
|
||||
/* Handle timer interrupt */
|
||||
timer_isr();
|
||||
break;
|
||||
#endif
|
||||
case IRQ_M_EXT:
|
||||
/* Handle external interrupt */
|
||||
if (IS_ACTIVE(MODULE_PERIPH_PLIC)) {
|
||||
plic_isr_handler();
|
||||
}
|
||||
break;
|
||||
case IRQ_M_EXT:
|
||||
/* Handle external interrupt */
|
||||
if (IS_ACTIVE(MODULE_PERIPH_PLIC)) {
|
||||
plic_isr_handler();
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
/* Unknown interrupt */
|
||||
core_panic(PANIC_GENERAL_ERROR, "Unhandled interrupt");
|
||||
break;
|
||||
default:
|
||||
/* Unknown interrupt */
|
||||
core_panic(PANIC_GENERAL_ERROR, "Unhandled interrupt");
|
||||
break;
|
||||
}
|
||||
}
|
||||
else {
|
||||
switch (mcause) {
|
||||
case CAUSE_USER_ECALL: /* ECALL from user mode */
|
||||
case CAUSE_MACHINE_ECALL: /* ECALL from machine mode */
|
||||
case CAUSE_USER_ECALL: /* ECALL from user mode */
|
||||
case CAUSE_MACHINE_ECALL: /* ECALL from machine mode */
|
||||
{
|
||||
/* TODO: get the ecall arguments */
|
||||
sched_context_switch_request = 1;
|
||||
@ -116,9 +116,9 @@ void handle_trap(uint32_t mcause)
|
||||
default:
|
||||
#ifdef DEVELHELP
|
||||
printf("Unhandled trap:\n");
|
||||
printf(" mcause: 0x%"PRIx32"\n", mcause);
|
||||
printf(" mepc: 0x%"PRIx32"\n", read_csr(mepc));
|
||||
printf(" mtval: 0x%"PRIx32"\n", read_csr(mtval));
|
||||
printf(" mcause: 0x%" PRIx32 "\n", mcause);
|
||||
printf(" mepc: 0x%" PRIx32 "\n", read_csr(mepc));
|
||||
printf(" mtval: 0x%" PRIx32 "\n", read_csr(mtval));
|
||||
#endif
|
||||
/* Unknown trap */
|
||||
core_panic(PANIC_GENERAL_ERROR, "Unhandled trap");
|
||||
@ -130,140 +130,141 @@ void handle_trap(uint32_t mcause)
|
||||
|
||||
/* Marking this as interrupt to ensure an mret at the end, provided by the
|
||||
* compiler. Aligned to 4-byte boundary as per RISC-V spec */
|
||||
static void __attribute((aligned(4))) __attribute__((interrupt)) trap_entry(void) {
|
||||
static void __attribute((aligned(4))) __attribute__((interrupt)) trap_entry(void)
|
||||
{
|
||||
__asm__ volatile (
|
||||
"addi sp, sp, -"XTSTR(CONTEXT_FRAME_SIZE)" \n"
|
||||
"addi sp, sp, -"XTSTR (CONTEXT_FRAME_SIZE)" \n"
|
||||
|
||||
/* Save caller-saved registers */
|
||||
"sw ra, "XTSTR(ra_OFFSET)"(sp) \n"
|
||||
"sw t0, "XTSTR(t0_OFFSET)"(sp) \n"
|
||||
"sw t1, "XTSTR(t1_OFFSET)"(sp) \n"
|
||||
"sw t2, "XTSTR(t2_OFFSET)"(sp) \n"
|
||||
"sw t3, "XTSTR(t3_OFFSET)"(sp) \n"
|
||||
"sw t4, "XTSTR(t4_OFFSET)"(sp) \n"
|
||||
"sw t5, "XTSTR(t5_OFFSET)"(sp) \n"
|
||||
"sw t6, "XTSTR(t6_OFFSET)"(sp) \n"
|
||||
"sw a0, "XTSTR(a0_OFFSET)"(sp) \n"
|
||||
"sw a1, "XTSTR(a1_OFFSET)"(sp) \n"
|
||||
"sw a2, "XTSTR(a2_OFFSET)"(sp) \n"
|
||||
"sw a3, "XTSTR(a3_OFFSET)"(sp) \n"
|
||||
"sw a4, "XTSTR(a4_OFFSET)"(sp) \n"
|
||||
"sw a5, "XTSTR(a5_OFFSET)"(sp) \n"
|
||||
"sw a6, "XTSTR(a6_OFFSET)"(sp) \n"
|
||||
"sw a7, "XTSTR(a7_OFFSET)"(sp) \n"
|
||||
/* Save caller-saved registers */
|
||||
"sw ra, "XTSTR (ra_OFFSET)"(sp) \n"
|
||||
"sw t0, "XTSTR (t0_OFFSET)"(sp) \n"
|
||||
"sw t1, "XTSTR (t1_OFFSET)"(sp) \n"
|
||||
"sw t2, "XTSTR (t2_OFFSET)"(sp) \n"
|
||||
"sw t3, "XTSTR (t3_OFFSET)"(sp) \n"
|
||||
"sw t4, "XTSTR (t4_OFFSET)"(sp) \n"
|
||||
"sw t5, "XTSTR (t5_OFFSET)"(sp) \n"
|
||||
"sw t6, "XTSTR (t6_OFFSET)"(sp) \n"
|
||||
"sw a0, "XTSTR (a0_OFFSET)"(sp) \n"
|
||||
"sw a1, "XTSTR (a1_OFFSET)"(sp) \n"
|
||||
"sw a2, "XTSTR (a2_OFFSET)"(sp) \n"
|
||||
"sw a3, "XTSTR (a3_OFFSET)"(sp) \n"
|
||||
"sw a4, "XTSTR (a4_OFFSET)"(sp) \n"
|
||||
"sw a5, "XTSTR (a5_OFFSET)"(sp) \n"
|
||||
"sw a6, "XTSTR (a6_OFFSET)"(sp) \n"
|
||||
"sw a7, "XTSTR (a7_OFFSET)"(sp) \n"
|
||||
|
||||
/* Save s0 and s1 extra for the active thread and the stack ptr */
|
||||
"sw s0, "XTSTR(s0_OFFSET)"(sp) \n"
|
||||
"sw s1, "XTSTR(s1_OFFSET)"(sp) \n"
|
||||
/* Save s0 and s1 extra for the active thread and the stack ptr */
|
||||
"sw s0, "XTSTR (s0_OFFSET)"(sp) \n"
|
||||
"sw s1, "XTSTR (s1_OFFSET)"(sp) \n"
|
||||
|
||||
/* Save the user stack ptr */
|
||||
"mv s0, sp \n"
|
||||
/* Load exception stack ptr */
|
||||
"la sp, _sp \n"
|
||||
/* Save the user stack ptr */
|
||||
"mv s0, sp \n"
|
||||
/* Load exception stack ptr */
|
||||
"la sp, _sp \n"
|
||||
|
||||
/* Get the interrupt cause */
|
||||
"csrr a0, mcause \n"
|
||||
/* Get the interrupt cause */
|
||||
"csrr a0, mcause \n"
|
||||
|
||||
/* Call trap handler, a0 contains mcause before, and the return value after
|
||||
* the call */
|
||||
"call handle_trap \n"
|
||||
/* Call trap handler, a0 contains mcause before, and the return value after
|
||||
* the call */
|
||||
"call handle_trap \n"
|
||||
|
||||
/* Load the sched_context_switch_request */
|
||||
"lw a0, sched_context_switch_request \n"
|
||||
/* Load the sched_context_switch_request */
|
||||
"lw a0, sched_context_switch_request \n"
|
||||
|
||||
/* And skip the context switch if not requested */
|
||||
"beqz a0, no_sched \n"
|
||||
/* And skip the context switch if not requested */
|
||||
"beqz a0, no_sched \n"
|
||||
|
||||
/* Get the previous active thread (could be NULL) */
|
||||
"lw s1, sched_active_thread \n"
|
||||
/* Get the previous active thread (could be NULL) */
|
||||
"lw s1, sched_active_thread \n"
|
||||
|
||||
/* Run the scheduler */
|
||||
"call sched_run \n"
|
||||
/* Run the scheduler */
|
||||
"call sched_run \n"
|
||||
|
||||
"no_sched: \n"
|
||||
/* Restore the thread stack pointer and check if a new thread must be
|
||||
* scheduled */
|
||||
"mv sp, s0 \n"
|
||||
"no_sched: \n"
|
||||
/* Restore the thread stack pointer and check if a new thread must be
|
||||
* scheduled */
|
||||
"mv sp, s0 \n"
|
||||
|
||||
/* No context switch required, shortcut to restore. a0 contains the return
|
||||
* value of sched_run, or the sched_context_switch_request if the sched_run
|
||||
* was skipped */
|
||||
"beqz a0, no_switch \n"
|
||||
/* No context switch required, shortcut to restore. a0 contains the return
|
||||
* value of sched_run, or the sched_context_switch_request if the sched_run
|
||||
* was skipped */
|
||||
"beqz a0, no_switch \n"
|
||||
|
||||
/* Skips the rest of the save if no active thread */
|
||||
"beqz s1, null_thread \n"
|
||||
/* Skips the rest of the save if no active thread */
|
||||
"beqz s1, null_thread \n"
|
||||
|
||||
/* Store s2-s11 */
|
||||
"sw s2, "XTSTR(s2_OFFSET)"(sp) \n"
|
||||
"sw s3, "XTSTR(s3_OFFSET)"(sp) \n"
|
||||
"sw s4, "XTSTR(s4_OFFSET)"(sp) \n"
|
||||
"sw s5, "XTSTR(s5_OFFSET)"(sp) \n"
|
||||
"sw s6, "XTSTR(s6_OFFSET)"(sp) \n"
|
||||
"sw s7, "XTSTR(s7_OFFSET)"(sp) \n"
|
||||
"sw s8, "XTSTR(s8_OFFSET)"(sp) \n"
|
||||
"sw s9, "XTSTR(s9_OFFSET)"(sp) \n"
|
||||
"sw s10, "XTSTR(s10_OFFSET)"(sp) \n"
|
||||
"sw s11, "XTSTR(s11_OFFSET)"(sp) \n"
|
||||
/* Store s2-s11 */
|
||||
"sw s2, "XTSTR (s2_OFFSET)"(sp) \n"
|
||||
"sw s3, "XTSTR (s3_OFFSET)"(sp) \n"
|
||||
"sw s4, "XTSTR (s4_OFFSET)"(sp) \n"
|
||||
"sw s5, "XTSTR (s5_OFFSET)"(sp) \n"
|
||||
"sw s6, "XTSTR (s6_OFFSET)"(sp) \n"
|
||||
"sw s7, "XTSTR (s7_OFFSET)"(sp) \n"
|
||||
"sw s8, "XTSTR (s8_OFFSET)"(sp) \n"
|
||||
"sw s9, "XTSTR (s9_OFFSET)"(sp) \n"
|
||||
"sw s10, "XTSTR (s10_OFFSET)"(sp) \n"
|
||||
"sw s11, "XTSTR (s11_OFFSET)"(sp) \n"
|
||||
|
||||
/* Grab mepc to save it to the stack */
|
||||
"csrr s2, mepc \n"
|
||||
/* Grab mepc to save it to the stack */
|
||||
"csrr s2, mepc \n"
|
||||
|
||||
/* Save return PC in stack frame */
|
||||
"sw s2, "XTSTR(pc_OFFSET)"(sp) \n"
|
||||
/* Save return PC in stack frame */
|
||||
"sw s2, "XTSTR (pc_OFFSET)"(sp) \n"
|
||||
|
||||
/* Save stack pointer of current thread */
|
||||
"sw sp, "XTSTR(SP_OFFSET_IN_THREAD)"(s1) \n"
|
||||
/* Save stack pointer of current thread */
|
||||
"sw sp, "XTSTR (SP_OFFSET_IN_THREAD)"(s1) \n"
|
||||
|
||||
/* Context saving done, from here on the new thread is scheduled */
|
||||
"null_thread: \n"
|
||||
/* Context saving done, from here on the new thread is scheduled */
|
||||
"null_thread: \n"
|
||||
|
||||
/* Get the new active thread (guaranteed to be non NULL) */
|
||||
"lw s1, sched_active_thread \n"
|
||||
/* Get the new active thread (guaranteed to be non NULL) */
|
||||
"lw s1, sched_active_thread \n"
|
||||
|
||||
/* Load the thread SP of scheduled thread */
|
||||
"lw sp, "XTSTR(SP_OFFSET_IN_THREAD)"(s1) \n"
|
||||
/* Load the thread SP of scheduled thread */
|
||||
"lw sp, "XTSTR (SP_OFFSET_IN_THREAD)"(s1) \n"
|
||||
|
||||
/* Set return PC to mepc */
|
||||
"lw a1, "XTSTR(pc_OFFSET)"(sp) \n"
|
||||
"csrw mepc, a1 \n"
|
||||
/* Set return PC to mepc */
|
||||
"lw a1, "XTSTR (pc_OFFSET)"(sp) \n"
|
||||
"csrw mepc, a1 \n"
|
||||
|
||||
/* restore s2-s11 */
|
||||
"lw s2, "XTSTR(s2_OFFSET)"(sp) \n"
|
||||
"lw s3, "XTSTR(s3_OFFSET)"(sp) \n"
|
||||
"lw s4, "XTSTR(s4_OFFSET)"(sp) \n"
|
||||
"lw s5, "XTSTR(s5_OFFSET)"(sp) \n"
|
||||
"lw s6, "XTSTR(s6_OFFSET)"(sp) \n"
|
||||
"lw s7, "XTSTR(s7_OFFSET)"(sp) \n"
|
||||
"lw s8, "XTSTR(s8_OFFSET)"(sp) \n"
|
||||
"lw s9, "XTSTR(s9_OFFSET)"(sp) \n"
|
||||
"lw s10, "XTSTR(s10_OFFSET)"(sp) \n"
|
||||
"lw s11, "XTSTR(s11_OFFSET)"(sp) \n"
|
||||
/* restore s2-s11 */
|
||||
"lw s2, "XTSTR (s2_OFFSET)"(sp) \n"
|
||||
"lw s3, "XTSTR (s3_OFFSET)"(sp) \n"
|
||||
"lw s4, "XTSTR (s4_OFFSET)"(sp) \n"
|
||||
"lw s5, "XTSTR (s5_OFFSET)"(sp) \n"
|
||||
"lw s6, "XTSTR (s6_OFFSET)"(sp) \n"
|
||||
"lw s7, "XTSTR (s7_OFFSET)"(sp) \n"
|
||||
"lw s8, "XTSTR (s8_OFFSET)"(sp) \n"
|
||||
"lw s9, "XTSTR (s9_OFFSET)"(sp) \n"
|
||||
"lw s10, "XTSTR (s10_OFFSET)"(sp) \n"
|
||||
"lw s11, "XTSTR (s11_OFFSET)"(sp) \n"
|
||||
|
||||
"no_switch: \n"
|
||||
"no_switch: \n"
|
||||
|
||||
/* restore the caller-saved registers */
|
||||
"lw ra, "XTSTR(ra_OFFSET)"(sp) \n"
|
||||
"lw t0, "XTSTR(t0_OFFSET)"(sp) \n"
|
||||
"lw t1, "XTSTR(t1_OFFSET)"(sp) \n"
|
||||
"lw t2, "XTSTR(t2_OFFSET)"(sp) \n"
|
||||
"lw t3, "XTSTR(t3_OFFSET)"(sp) \n"
|
||||
"lw t4, "XTSTR(t4_OFFSET)"(sp) \n"
|
||||
"lw t5, "XTSTR(t5_OFFSET)"(sp) \n"
|
||||
"lw t6, "XTSTR(t6_OFFSET)"(sp) \n"
|
||||
"lw a0, "XTSTR(a0_OFFSET)"(sp) \n"
|
||||
"lw a1, "XTSTR(a1_OFFSET)"(sp) \n"
|
||||
"lw a2, "XTSTR(a2_OFFSET)"(sp) \n"
|
||||
"lw a3, "XTSTR(a3_OFFSET)"(sp) \n"
|
||||
"lw a4, "XTSTR(a4_OFFSET)"(sp) \n"
|
||||
"lw a5, "XTSTR(a5_OFFSET)"(sp) \n"
|
||||
"lw a6, "XTSTR(a6_OFFSET)"(sp) \n"
|
||||
"lw a7, "XTSTR(a7_OFFSET)"(sp) \n"
|
||||
"lw s0, "XTSTR(s0_OFFSET)"(sp) \n"
|
||||
"lw s1, "XTSTR(s1_OFFSET)"(sp) \n"
|
||||
/* restore the caller-saved registers */
|
||||
"lw ra, "XTSTR (ra_OFFSET)"(sp) \n"
|
||||
"lw t0, "XTSTR (t0_OFFSET)"(sp) \n"
|
||||
"lw t1, "XTSTR (t1_OFFSET)"(sp) \n"
|
||||
"lw t2, "XTSTR (t2_OFFSET)"(sp) \n"
|
||||
"lw t3, "XTSTR (t3_OFFSET)"(sp) \n"
|
||||
"lw t4, "XTSTR (t4_OFFSET)"(sp) \n"
|
||||
"lw t5, "XTSTR (t5_OFFSET)"(sp) \n"
|
||||
"lw t6, "XTSTR (t6_OFFSET)"(sp) \n"
|
||||
"lw a0, "XTSTR (a0_OFFSET)"(sp) \n"
|
||||
"lw a1, "XTSTR (a1_OFFSET)"(sp) \n"
|
||||
"lw a2, "XTSTR (a2_OFFSET)"(sp) \n"
|
||||
"lw a3, "XTSTR (a3_OFFSET)"(sp) \n"
|
||||
"lw a4, "XTSTR (a4_OFFSET)"(sp) \n"
|
||||
"lw a5, "XTSTR (a5_OFFSET)"(sp) \n"
|
||||
"lw a6, "XTSTR (a6_OFFSET)"(sp) \n"
|
||||
"lw a7, "XTSTR (a7_OFFSET)"(sp) \n"
|
||||
"lw s0, "XTSTR (s0_OFFSET)"(sp) \n"
|
||||
"lw s1, "XTSTR (s1_OFFSET)"(sp) \n"
|
||||
|
||||
"addi sp, sp, "XTSTR(CONTEXT_FRAME_SIZE)" \n"
|
||||
:
|
||||
:
|
||||
:
|
||||
);
|
||||
"addi sp, sp, "XTSTR (CONTEXT_FRAME_SIZE)" \n"
|
||||
:
|
||||
:
|
||||
:
|
||||
);
|
||||
}
|
||||
|
@ -30,5 +30,5 @@ uint64_t get_cycle_count(void)
|
||||
"csrr %2, mcycleh\n\t" \
|
||||
"bne %0, %2, 1b\n\t" \
|
||||
: "=r" (hi), "=r" (lo), "=r" (hi2));
|
||||
return lo | ((uint64_t) hi << 32);
|
||||
return lo | ((uint64_t)hi << 32);
|
||||
}
|
||||
|
@ -62,26 +62,26 @@ int gpio_init(gpio_t pin, gpio_mode_t mode)
|
||||
/* Configure the mode */
|
||||
|
||||
switch (mode) {
|
||||
case GPIO_IN:
|
||||
_set_pin_reg(GPIO_INPUT_EN, pin);
|
||||
_clr_pin_reg(GPIO_OUTPUT_EN, pin);
|
||||
_clr_pin_reg(GPIO_PULLUP_EN, pin);
|
||||
break;
|
||||
case GPIO_IN:
|
||||
_set_pin_reg(GPIO_INPUT_EN, pin);
|
||||
_clr_pin_reg(GPIO_OUTPUT_EN, pin);
|
||||
_clr_pin_reg(GPIO_PULLUP_EN, pin);
|
||||
break;
|
||||
|
||||
case GPIO_IN_PU:
|
||||
_clr_pin_reg(GPIO_OUTPUT_EN, pin);
|
||||
_set_pin_reg(GPIO_INPUT_EN, pin);
|
||||
_set_pin_reg(GPIO_PULLUP_EN, pin);
|
||||
break;
|
||||
case GPIO_IN_PU:
|
||||
_clr_pin_reg(GPIO_OUTPUT_EN, pin);
|
||||
_set_pin_reg(GPIO_INPUT_EN, pin);
|
||||
_set_pin_reg(GPIO_PULLUP_EN, pin);
|
||||
break;
|
||||
|
||||
case GPIO_OUT:
|
||||
_set_pin_reg(GPIO_OUTPUT_EN, pin);
|
||||
_clr_pin_reg(GPIO_INPUT_EN, pin);
|
||||
_clr_pin_reg(GPIO_PULLUP_EN, pin);
|
||||
break;
|
||||
case GPIO_OUT:
|
||||
_set_pin_reg(GPIO_OUTPUT_EN, pin);
|
||||
_clr_pin_reg(GPIO_INPUT_EN, pin);
|
||||
_clr_pin_reg(GPIO_PULLUP_EN, pin);
|
||||
break;
|
||||
|
||||
default:
|
||||
return -1;
|
||||
default:
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* Configure the pin muxing for the GPIO */
|
||||
@ -108,7 +108,8 @@ void gpio_clear(gpio_t pin)
|
||||
|
||||
void gpio_toggle(gpio_t pin)
|
||||
{
|
||||
__atomic_fetch_xor(&GPIO_REG(GPIO_OUTPUT_VAL), (1 << pin), __ATOMIC_RELAXED);
|
||||
__atomic_fetch_xor(&GPIO_REG(GPIO_OUTPUT_VAL), (1 << pin),
|
||||
__ATOMIC_RELAXED);
|
||||
}
|
||||
|
||||
void gpio_write(gpio_t pin, int value)
|
||||
@ -133,18 +134,18 @@ void gpio_isr(int num)
|
||||
|
||||
/* Clear interrupt */
|
||||
switch (isr_flank[pin]) {
|
||||
case GPIO_FALLING:
|
||||
_set_pin_reg(GPIO_FALL_IP, pin);
|
||||
break;
|
||||
case GPIO_FALLING:
|
||||
_set_pin_reg(GPIO_FALL_IP, pin);
|
||||
break;
|
||||
|
||||
case GPIO_RISING:
|
||||
_set_pin_reg(GPIO_RISE_IP, pin);
|
||||
break;
|
||||
case GPIO_RISING:
|
||||
_set_pin_reg(GPIO_RISE_IP, pin);
|
||||
break;
|
||||
|
||||
case GPIO_BOTH:
|
||||
_set_pin_reg(GPIO_FALL_IP, pin);
|
||||
_set_pin_reg(GPIO_RISE_IP, pin);
|
||||
break;
|
||||
case GPIO_BOTH:
|
||||
_set_pin_reg(GPIO_FALL_IP, pin);
|
||||
_set_pin_reg(GPIO_RISE_IP, pin);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -187,21 +188,21 @@ void gpio_irq_enable(gpio_t pin)
|
||||
|
||||
/* Enable interrupt for pin */
|
||||
switch (isr_flank[pin]) {
|
||||
case GPIO_FALLING:
|
||||
_set_pin_reg(GPIO_FALL_IE, pin);
|
||||
break;
|
||||
case GPIO_FALLING:
|
||||
_set_pin_reg(GPIO_FALL_IE, pin);
|
||||
break;
|
||||
|
||||
case GPIO_RISING:
|
||||
_set_pin_reg(GPIO_RISE_IE, pin);
|
||||
break;
|
||||
case GPIO_RISING:
|
||||
_set_pin_reg(GPIO_RISE_IE, pin);
|
||||
break;
|
||||
|
||||
case GPIO_BOTH:
|
||||
_set_pin_reg(GPIO_FALL_IE, pin);
|
||||
_set_pin_reg(GPIO_RISE_IE, pin);
|
||||
break;
|
||||
case GPIO_BOTH:
|
||||
_set_pin_reg(GPIO_FALL_IE, pin);
|
||||
_set_pin_reg(GPIO_RISE_IE, pin);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -214,21 +215,21 @@ void gpio_irq_disable(gpio_t pin)
|
||||
|
||||
/* Disable interrupt for pin */
|
||||
switch (isr_flank[pin]) {
|
||||
case GPIO_FALLING:
|
||||
_clr_pin_reg(GPIO_FALL_IE, pin);
|
||||
break;
|
||||
case GPIO_FALLING:
|
||||
_clr_pin_reg(GPIO_FALL_IE, pin);
|
||||
break;
|
||||
|
||||
case GPIO_RISING:
|
||||
_clr_pin_reg(GPIO_RISE_IE, pin);
|
||||
break;
|
||||
case GPIO_RISING:
|
||||
_clr_pin_reg(GPIO_RISE_IE, pin);
|
||||
break;
|
||||
|
||||
case GPIO_BOTH:
|
||||
_clr_pin_reg(GPIO_FALL_IE, pin);
|
||||
_clr_pin_reg(GPIO_RISE_IE, pin);
|
||||
break;
|
||||
case GPIO_BOTH:
|
||||
_clr_pin_reg(GPIO_FALL_IE, pin);
|
||||
_clr_pin_reg(GPIO_RISE_IE, pin);
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
#endif /* MODULE_PERIPH_GPIO_IRQ */
|
||||
|
@ -42,7 +42,8 @@ static const uint16_t _fe310_i2c_speed[2] = { 100U, 400U };
|
||||
static inline int _wait_busy(i2c_t dev, uint32_t max_timeout_counter);
|
||||
static inline int _start(i2c_t dev, uint16_t address);
|
||||
static inline int _read(i2c_t dev, uint8_t *data, int length, uint8_t stop);
|
||||
static inline int _write(i2c_t dev, const uint8_t *data, int length, uint8_t stop);
|
||||
static inline int _write(i2c_t dev, const uint8_t *data, int length,
|
||||
uint8_t stop);
|
||||
|
||||
/**
|
||||
* @brief Initialized bus locks
|
||||
@ -57,16 +58,22 @@ void i2c_init(i2c_t dev)
|
||||
mutex_init(&locks[dev]);
|
||||
|
||||
/* Select IOF0 */
|
||||
GPIO_REG(GPIO_IOF_SEL) &= ~((1 << i2c_config[dev].scl) | (1 << i2c_config[dev].sda));
|
||||
GPIO_REG(GPIO_IOF_SEL) &=
|
||||
~((1 << i2c_config[dev].scl) | (1 << i2c_config[dev].sda));
|
||||
/* Enable IOF */
|
||||
GPIO_REG(GPIO_IOF_EN) |= ((1 << i2c_config[dev].scl) | (1 << i2c_config[dev].sda));
|
||||
GPIO_REG(GPIO_IOF_EN) |=
|
||||
((1 << i2c_config[dev].scl) | (1 << i2c_config[dev].sda));
|
||||
|
||||
_REG32(i2c_config[dev].addr, I2C_CONTROL) &= ~(I2C_CONTROL_IE | I2C_CONTROL_EN);
|
||||
_REG32(i2c_config[dev].addr,
|
||||
I2C_CONTROL) &= ~(I2C_CONTROL_IE | I2C_CONTROL_EN);
|
||||
|
||||
/* Compute prescale: presc = (CORE_CLOCK / (5 * I2C_SPEED)) - 1 */
|
||||
uint16_t presc = ((uint16_t)(cpu_freq() / 1000) / (5 * _fe310_i2c_speed[i2c_config[dev].speed])) - 1;
|
||||
uint16_t presc =
|
||||
((uint16_t)(cpu_freq() / 1000) /
|
||||
(5 * _fe310_i2c_speed[i2c_config[dev].speed])) - 1;
|
||||
|
||||
DEBUG("[i2c] init: computed prescale: %i (0x%02X|0x%02X)\n", presc, (presc >> 8), (presc & 0xFF));
|
||||
DEBUG("[i2c] init: computed prescale: %i (0x%02X|0x%02X)\n", presc,
|
||||
(presc >> 8), (presc & 0xFF));
|
||||
|
||||
_REG32(i2c_config[dev].addr, I2C_PRESCALE_LO) = (presc & 0xFF);
|
||||
_REG32(i2c_config[dev].addr, I2C_PRESCALE_HI) = (presc >> 8);
|
||||
@ -106,7 +113,7 @@ int i2c_read_bytes(i2c_t dev, uint16_t address, void *data, size_t length,
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
/* Check for wrong arguments given */
|
||||
/* Check for wrong arguments given */
|
||||
if (data == NULL || length == 0) {
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -114,6 +121,7 @@ int i2c_read_bytes(i2c_t dev, uint16_t address, void *data, size_t length,
|
||||
DEBUG("[i2c] read bytes\n");
|
||||
|
||||
int ret = 0;
|
||||
|
||||
if (!(flags & I2C_NOSTART)) {
|
||||
ret = _start(dev, ((address << 1) | I2C_READ));
|
||||
if (ret < 0) {
|
||||
@ -134,7 +142,8 @@ int i2c_read_bytes(i2c_t dev, uint16_t address, void *data, size_t length,
|
||||
return length;
|
||||
}
|
||||
|
||||
int i2c_write_bytes(i2c_t dev, uint16_t address, const void *data, size_t length,
|
||||
int i2c_write_bytes(i2c_t dev, uint16_t address, const void *data,
|
||||
size_t length,
|
||||
uint8_t flags)
|
||||
{
|
||||
assert(dev < I2C_NUMOF);
|
||||
@ -172,13 +181,15 @@ int i2c_write_bytes(i2c_t dev, uint16_t address, const void *data, size_t length
|
||||
static inline int _wait_busy(i2c_t dev, uint32_t max_timeout_counter)
|
||||
{
|
||||
uint32_t timeout_counter = 0;
|
||||
|
||||
DEBUG("[i2c] wait for transfer\n");
|
||||
while (_REG32(i2c_config[dev].addr, I2C_STATUS) & I2C_STATUS_TIP) {
|
||||
if (++timeout_counter >= max_timeout_counter) {
|
||||
DEBUG("[i2c] transfer timeout\n");
|
||||
return -ETIMEDOUT;
|
||||
}
|
||||
else if ((_REG32(i2c_config[dev].addr, I2C_STATUS) & I2C_STATUS_ALOST) == I2C_STATUS_ALOST) {
|
||||
else if ((_REG32(i2c_config[dev].addr,
|
||||
I2C_STATUS) & I2C_STATUS_ALOST) == I2C_STATUS_ALOST) {
|
||||
/* Arbitration lost */
|
||||
DEBUG("[i2c] error: Arbitration lost\n");
|
||||
return -EAGAIN;
|
||||
@ -200,6 +211,7 @@ static inline int _start(i2c_t dev, uint16_t address)
|
||||
|
||||
/* Ensure all bytes has been read */
|
||||
int ret = _wait_busy(dev, I2C_BUSY_TIMEOUT);
|
||||
|
||||
if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
@ -246,7 +258,8 @@ static inline int _read(i2c_t dev, uint8_t *data, int length, uint8_t stop)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int _write(i2c_t dev, const uint8_t *data, int length, uint8_t stop)
|
||||
static inline int _write(i2c_t dev, const uint8_t *data, int length,
|
||||
uint8_t stop)
|
||||
{
|
||||
uint8_t count = 0;
|
||||
|
||||
|
@ -51,7 +51,7 @@ static inline volatile uint32_t *_get_irq_reg(unsigned irq)
|
||||
|
||||
return &PLIC_REG(PLIC_ENABLE_OFFSET +
|
||||
(hart_id << PLIC_ENABLE_SHIFT_PER_TARGET)) +
|
||||
(irq >> 5); /* Intentionally outside the PLIC_REG macro */
|
||||
(irq >> 5); /* Intentionally outside the PLIC_REG macro */
|
||||
}
|
||||
|
||||
void plic_enable_interrupt(unsigned irq)
|
||||
|
@ -32,9 +32,9 @@ void pm_reboot(void)
|
||||
AON_REG(AON_WDOGCMP) = 0;
|
||||
//wdogconfig: : wdogrsten | enablealways | reset to 0 | max scale
|
||||
AON_REG(AON_WDOGKEY) = AON_WDOGKEY_VALUE;
|
||||
AON_REG(AON_WDOGCFG) |= (AON_WDOGCFG_RSTEN | AON_WDOGCFG_ENALWAYS |\
|
||||
AON_WDOGCFG_ZEROCMP | AON_WDOGCFG_SCALE) ;
|
||||
AON_REG(AON_WDOGCFG) |= (AON_WDOGCFG_RSTEN | AON_WDOGCFG_ENALWAYS | \
|
||||
AON_WDOGCFG_ZEROCMP | AON_WDOGCFG_SCALE);
|
||||
AON_REG(AON_WDOGKEY) = AON_WDOGKEY_VALUE;
|
||||
AON_REG(AON_WDOGFEED) = AON_WDOGFEED_VALUE;
|
||||
while(1) {}
|
||||
while (1) {}
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ static rtt_state_t rtt_callback;
|
||||
|
||||
void rtt_isr(int num)
|
||||
{
|
||||
(void) num;
|
||||
(void)num;
|
||||
|
||||
/* Clear intr */
|
||||
AON_REG(AON_RTCCMP) = RTT_MAX_VALUE;
|
||||
@ -135,11 +135,11 @@ void rtt_set_counter(uint32_t counter)
|
||||
*/
|
||||
/* Use ifdef to avoid out of bound shift when RTT_SCALE == 0 */
|
||||
#if RTT_CLOCK_FREQUENCY == RTT_FREQUENCY
|
||||
AON_REG(AON_RTCLO) = counter;
|
||||
AON_REG(AON_RTCHI) = 0;
|
||||
AON_REG(AON_RTCLO) = counter;
|
||||
AON_REG(AON_RTCHI) = 0;
|
||||
#else
|
||||
AON_REG(AON_RTCLO) = counter << RTT_SCALE;
|
||||
AON_REG(AON_RTCHI) = counter >> (32 - RTT_SCALE);
|
||||
AON_REG(AON_RTCLO) = counter << RTT_SCALE;
|
||||
AON_REG(AON_RTCHI) = counter >> (32 - RTT_SCALE);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -82,7 +82,7 @@ void spi_init_pins(spi_t dev)
|
||||
(1 << spi_config[dev].sclk);
|
||||
|
||||
/* Enable I/O Function 0 */
|
||||
GPIO_REG(GPIO_IOF_EN) |= spi1_pins;
|
||||
GPIO_REG(GPIO_IOF_EN) |= spi1_pins;
|
||||
GPIO_REG(GPIO_IOF_SEL) &= ~spi1_pins;
|
||||
}
|
||||
|
||||
@ -109,7 +109,7 @@ int spi_acquire(spi_t dev, spi_cs_t cs, spi_mode_t mode, spi_clk_t clk)
|
||||
|
||||
mutex_lock(&lock);
|
||||
|
||||
_REG32(spi_config[dev].addr, SPI_REG_SCKDIV) = _spi_clks_config[clk];
|
||||
_REG32(spi_config[dev].addr, SPI_REG_SCKDIV) = _spi_clks_config[clk];
|
||||
_REG32(spi_config[dev].addr, SPI_REG_SCKMODE) = mode;
|
||||
|
||||
return SPI_OK;
|
||||
@ -141,7 +141,7 @@ void spi_transfer_bytes(spi_t dev, spi_cs_t cs, bool cont,
|
||||
_REG32(spi_config[dev].addr, SPI_REG_TXFIFO) = out ? out[i] : 0;
|
||||
|
||||
uint32_t rxdata;
|
||||
do {
|
||||
do {
|
||||
rxdata = _REG32(spi_config[dev].addr, SPI_REG_RXFIFO);
|
||||
} while (rxdata & SPI_RXFIFO_EMPTY);
|
||||
|
||||
|
@ -55,7 +55,8 @@ int timer_init(tim_t dev, uint32_t freq, timer_cb_t cb, void *arg)
|
||||
|
||||
|
||||
/* reset timer counter */
|
||||
volatile uint64_t *mtime = (uint64_t *) (CLINT_CTRL_ADDR + CLINT_MTIME);
|
||||
volatile uint64_t *mtime = (uint64_t *)(CLINT_CTRL_ADDR + CLINT_MTIME);
|
||||
|
||||
*mtime = 0;
|
||||
|
||||
return 0;
|
||||
@ -63,13 +64,13 @@ int timer_init(tim_t dev, uint32_t freq, timer_cb_t cb, void *arg)
|
||||
|
||||
int timer_set(tim_t dev, int channel, unsigned int timeout)
|
||||
{
|
||||
volatile uint64_t *mtime = (uint64_t *) (CLINT_CTRL_ADDR + CLINT_MTIME);
|
||||
volatile uint64_t *mtime = (uint64_t *)(CLINT_CTRL_ADDR + CLINT_MTIME);
|
||||
volatile uint64_t *mtimecmp =
|
||||
(uint64_t *) (CLINT_CTRL_ADDR + CLINT_MTIMECMP);
|
||||
(uint64_t *)(CLINT_CTRL_ADDR + CLINT_MTIMECMP);
|
||||
|
||||
/* Compute delta for timer */
|
||||
uint64_t now = *mtime;
|
||||
uint64_t then = now + (uint64_t) timeout;
|
||||
uint64_t then = now + (uint64_t)timeout;
|
||||
|
||||
if (dev != 0 || channel != 0) {
|
||||
return -1;
|
||||
@ -89,13 +90,13 @@ int timer_set(tim_t dev, int channel, unsigned int timeout)
|
||||
int timer_set_absolute(tim_t dev, int channel, unsigned int value)
|
||||
{
|
||||
|
||||
volatile uint64_t *mtime = (uint64_t *) (CLINT_CTRL_ADDR + CLINT_MTIME);
|
||||
volatile uint64_t *mtime = (uint64_t *)(CLINT_CTRL_ADDR + CLINT_MTIME);
|
||||
volatile uint64_t *mtimecmp =
|
||||
(uint64_t *) (CLINT_CTRL_ADDR + CLINT_MTIMECMP);
|
||||
(uint64_t *)(CLINT_CTRL_ADDR + CLINT_MTIMECMP);
|
||||
|
||||
/* Compute absolute for timer */
|
||||
uint64_t now = *mtime;
|
||||
uint64_t then = (now & 0xFFFFFFFF00000000) + (uint64_t) value;
|
||||
uint64_t then = (now & 0xFFFFFFFF00000000) + (uint64_t)value;
|
||||
|
||||
if (dev != 0 || channel != 0) {
|
||||
return -1;
|
||||
@ -123,14 +124,14 @@ int timer_clear(tim_t dev, int channel)
|
||||
|
||||
unsigned int timer_read(tim_t dev)
|
||||
{
|
||||
uint32_t lo = *(volatile uint32_t *) (CLINT_CTRL_ADDR + CLINT_MTIME);
|
||||
uint32_t lo = *(volatile uint32_t *)(CLINT_CTRL_ADDR + CLINT_MTIME);
|
||||
|
||||
if (dev != 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Read current timer value */
|
||||
return (unsigned int) lo;
|
||||
return (unsigned int)lo;
|
||||
}
|
||||
|
||||
void timer_start(tim_t dev)
|
||||
@ -157,7 +158,7 @@ void timer_stop(tim_t dev)
|
||||
void timer_isr(void)
|
||||
{
|
||||
volatile uint64_t *mtimecmp =
|
||||
(uint64_t *) (CLINT_CTRL_ADDR + CLINT_MTIMECMP);
|
||||
(uint64_t *)(CLINT_CTRL_ADDR + CLINT_MTIMECMP);
|
||||
|
||||
/* Clear intr */
|
||||
clear_csr(mie, MIP_MTIP);
|
||||
|
@ -52,14 +52,14 @@ static inline void _uart_isr(uart_t dev)
|
||||
void uart_isr(int num)
|
||||
{
|
||||
switch (num) {
|
||||
case INT_UART0_BASE:
|
||||
_uart_isr(0);
|
||||
break;
|
||||
case INT_UART1_BASE:
|
||||
_uart_isr(1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
case INT_UART0_BASE:
|
||||
_uart_isr(0);
|
||||
break;
|
||||
case INT_UART1_BASE:
|
||||
_uart_isr(1);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@ -136,8 +136,9 @@ void uart_write(uart_t dev, const uint8_t *data, size_t len)
|
||||
{
|
||||
for (size_t i = 0; i < len; i++) {
|
||||
/* Wait for FIFO to empty */
|
||||
while ((_REG32(uart_config[dev].addr, UART_REG_TXFIFO) & UART_TXFIFO_FULL)
|
||||
== (uint32_t)UART_TXFIFO_FULL) {};
|
||||
while ((_REG32(uart_config[dev].addr,
|
||||
UART_REG_TXFIFO) & UART_TXFIFO_FULL)
|
||||
== (uint32_t)UART_TXFIFO_FULL) {}
|
||||
|
||||
/* Write a byte */
|
||||
_REG32(uart_config[dev].addr, UART_REG_TXFIFO) = data[i];
|
||||
@ -146,10 +147,10 @@ void uart_write(uart_t dev, const uint8_t *data, size_t len)
|
||||
|
||||
void uart_poweron(uart_t dev)
|
||||
{
|
||||
(void) dev;
|
||||
(void)dev;
|
||||
}
|
||||
|
||||
void uart_poweroff(uart_t dev)
|
||||
{
|
||||
(void) dev;
|
||||
(void)dev;
|
||||
}
|
||||
|
@ -59,6 +59,7 @@ void wdt_kick(void)
|
||||
static inline uint8_t _scale(uint32_t count)
|
||||
{
|
||||
uint8_t scale = 0;
|
||||
|
||||
while (count > (UINT16_MAX - 1)) {
|
||||
count >>= 1;
|
||||
scale++;
|
||||
|
@ -73,9 +73,9 @@
|
||||
*
|
||||
*/
|
||||
char *thread_stack_init(thread_task_func_t task_func,
|
||||
void *arg,
|
||||
void *stack_start,
|
||||
int stack_size)
|
||||
void *arg,
|
||||
void *stack_start,
|
||||
int stack_size)
|
||||
{
|
||||
struct context_switch_frame *sf;
|
||||
uint32_t *stk_top;
|
||||
@ -97,25 +97,26 @@ char *thread_stack_init(thread_task_func_t task_func,
|
||||
stk_top = (uint32_t *)((uintptr_t)stk_top - sizeof(*sf));
|
||||
|
||||
/* populate the stack frame with default values for starting the thread. */
|
||||
sf = (struct context_switch_frame *) stk_top;
|
||||
sf = (struct context_switch_frame *)stk_top;
|
||||
|
||||
/* Clear stack frame */
|
||||
memset(sf, 0, sizeof(*sf));
|
||||
|
||||
/* set initial reg values */
|
||||
sf->pc = (uint32_t) task_func;
|
||||
sf->a0 = (uint32_t) arg;
|
||||
sf->pc = (uint32_t)task_func;
|
||||
sf->a0 = (uint32_t)arg;
|
||||
|
||||
/* if the thread exits go to sched_task_exit() */
|
||||
sf->ra = (uint32_t) sched_task_exit;
|
||||
sf->ra = (uint32_t)sched_task_exit;
|
||||
|
||||
return (char *) stk_top;
|
||||
return (char *)stk_top;
|
||||
}
|
||||
|
||||
void thread_print_stack(void)
|
||||
{
|
||||
int count = 0;
|
||||
thread_t *active_thread = thread_get_active();
|
||||
|
||||
if (!active_thread) {
|
||||
return;
|
||||
}
|
||||
@ -182,13 +183,13 @@ static inline void _ecall_dispatch(uint32_t num, void *ctx)
|
||||
{
|
||||
/* function arguments are in a0 and a1 as per ABI */
|
||||
__asm__ volatile (
|
||||
"mv a0, %[num] \n"
|
||||
"mv a1, %[ctx] \n"
|
||||
"ECALL\n"
|
||||
: /* No outputs */
|
||||
: [num] "r" (num), [ctx] "r" (ctx)
|
||||
: "memory"
|
||||
);
|
||||
"mv a0, %[num] \n"
|
||||
"mv a1, %[ctx] \n"
|
||||
"ECALL\n"
|
||||
: /* No outputs */
|
||||
: [num] "r" (num), [ctx] "r" (ctx)
|
||||
: "memory"
|
||||
);
|
||||
}
|
||||
|
||||
void thread_yield_higher(void)
|
||||
@ -206,6 +207,7 @@ void heap_stats(void)
|
||||
|
||||
long int heap_size = &_eheap - &_sheap;
|
||||
struct mallinfo minfo = mallinfo();
|
||||
|
||||
printf("heap: %ld (used %u, free %ld) [bytes]\n",
|
||||
heap_size, minfo.uordblks, heap_size - minfo.uordblks);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user