1
0
mirror of https://github.com/RIOT-OS/RIOT.git synced 2024-12-29 04:50:03 +01:00
RIOT/cpu/msp430/include/cpu.h
Marian Buschsieweke 43f07fa261
cpu/msp430: implement power management
This implements `pm_set_lowest()` for the MSP430. Unlike most other
platforms, it intentionally does not use pm_layered. It is pretty
similar to `pm_layered` in that is does use reference counters, but it
uses them for two independent clock sources.

The main difference is that the low frequency clock domain can be
disabled even when the high frequency clock is still active. With the
layers, disabling layer n-1 while layer n is still blocked would not
work.
2024-04-26 15:52:41 +02:00

145 lines
3.0 KiB
C

/*
* Copyright (C) 2014, Freie Universitaet Berlin (FUB) & INRIA.
* All rights reserved.
*
* This file is subject to the terms and conditions of the GNU Lesser
* General Public License v2.1. See the file LICENSE in the top level
* directory for more details.
*/
/**
* @ingroup cpu_msp430
* @brief Texas Instruments MSP430 specific code
*
* @{
* @file
* @brief Texas Instruments MSP430 specific code
*
*/
#ifndef CPU_H
#define CPU_H
#include <stdint.h>
#include <msp430.h>
#include "sched.h"
#include "thread.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* @brief Wordsize in bit for MSP430 platforms
*/
#define WORDSIZE 16
/**
* @brief MSP430 has power management support
*/
#define PROVIDES_PM_SET_LOWEST
/**
* @brief Macro for defining interrupt service routines
*/
#define ISR(a,b) void __attribute__((naked, interrupt (a))) b(void)
/**
* @brief The current ISR state (inside or not)
*/
extern volatile int __irq_is_in;
/**
* @brief Save the current thread context from inside an ISR
*/
static inline void __attribute__((always_inline)) __save_context(void)
{
__asm__("push r15");
__asm__("push r14");
__asm__("push r13");
__asm__("push r12");
__asm__("push r11");
__asm__("push r10");
__asm__("push r9");
__asm__("push r8");
__asm__("push r7");
__asm__("push r6");
__asm__("push r5");
__asm__("push r4");
__asm__("mov.w r1,%0" : "=r"(thread_get_active()->sp));
}
/**
* @brief Restore the thread context from inside an ISR
*/
static inline void __attribute__((always_inline)) __restore_context(void)
{
__asm__("mov.w %0,r1" : : "m"(thread_get_active()->sp));
__asm__("pop r4");
__asm__("pop r5");
__asm__("pop r6");
__asm__("pop r7");
__asm__("pop r8");
__asm__("pop r9");
__asm__("pop r10");
__asm__("pop r11");
__asm__("pop r12");
__asm__("pop r13");
__asm__("pop r14");
__asm__("pop r15");
__asm__("reti");
}
/**
* @brief Run this code on entering interrupt routines
*/
static inline void __attribute__((always_inline)) __enter_isr(void)
{
/* modify state register pushed to stack to not got to power saving
* mode right again */
__asm__ volatile(
"bic %[mask], 0(SP)" "\n\t"
: /* no outputs */
: [mask] "i"(CPUOFF | SCG0 | SCG1 | OSCOFF)
: "memory"
);
extern char __stack; /* defined by linker script to end of RAM */
__save_context();
__asm__("mov.w %0,r1" : : "i"(&__stack));
__irq_is_in = 1;
}
/**
* @brief Run this code on exiting interrupt routines
*/
static inline void __attribute__((always_inline)) __exit_isr(void)
{
__irq_is_in = 0;
if (sched_context_switch_request) {
sched_run();
}
__restore_context();
}
/**
* @brief Returns the last instruction's address
*/
__attribute__((always_inline))
static inline uintptr_t cpu_get_caller_pc(void)
{
return (uintptr_t)__builtin_return_address(0);
}
#ifdef __cplusplus
}
#endif
#endif /* CPU_H */
/** @} */