2014-08-27 18:47:31 +02:00
|
|
|
/*
|
2014-09-09 12:16:26 +02:00
|
|
|
* Copyright (C) 2014, Freie Universitaet Berlin (FUB) & INRIA.
|
|
|
|
* All rights reserved.
|
2014-08-27 18:47:31 +02:00
|
|
|
*
|
|
|
|
* This file is subject to the terms and conditions of the GNU Lesser
|
|
|
|
* General Public License v2.1. See the file LICENSE in the top level
|
|
|
|
* directory for more details.
|
|
|
|
*/
|
2010-09-22 15:10:42 +02:00
|
|
|
|
|
|
|
/**
|
2023-06-13 15:56:24 +02:00
|
|
|
* @ingroup cpu_msp430
|
2013-11-27 17:54:30 +01:00
|
|
|
* @brief Texas Instruments MSP430 specific code
|
2017-08-24 21:03:16 +02:00
|
|
|
*
|
2010-09-22 15:10:42 +02:00
|
|
|
* @{
|
2017-08-24 21:03:16 +02:00
|
|
|
* @file
|
|
|
|
* @brief Texas Instruments MSP430 specific code
|
|
|
|
*
|
2010-09-22 15:10:42 +02:00
|
|
|
*/
|
|
|
|
|
2017-08-24 21:03:16 +02:00
|
|
|
#ifndef CPU_H
|
|
|
|
#define CPU_H
|
|
|
|
|
2024-04-19 21:21:32 +02:00
|
|
|
#include <stdint.h>
|
2013-12-09 11:12:39 +01:00
|
|
|
|
|
|
|
#include <msp430.h>
|
|
|
|
|
|
|
|
#include "sched.h"
|
2016-03-01 16:34:37 +01:00
|
|
|
#include "thread.h"
|
2010-09-22 15:10:42 +02:00
|
|
|
|
2014-10-13 10:53:20 +02:00
|
|
|
#ifdef __cplusplus
|
|
|
|
extern "C" {
|
|
|
|
#endif
|
|
|
|
|
2015-05-26 16:45:34 +02:00
|
|
|
/**
|
|
|
|
* @brief Wordsize in bit for MSP430 platforms
|
|
|
|
*/
|
2010-09-22 15:10:42 +02:00
|
|
|
#define WORDSIZE 16
|
|
|
|
|
2024-04-22 15:01:46 +02:00
|
|
|
/**
|
|
|
|
* @brief MSP430 has power management support
|
|
|
|
*/
|
|
|
|
#define PROVIDES_PM_SET_LOWEST
|
|
|
|
|
2015-09-02 16:29:07 +02:00
|
|
|
/**
|
|
|
|
* @brief Macro for defining interrupt service routines
|
|
|
|
*/
|
|
|
|
#define ISR(a,b) void __attribute__((naked, interrupt (a))) b(void)
|
|
|
|
|
2015-05-26 16:45:34 +02:00
|
|
|
/**
|
|
|
|
* @brief The current ISR state (inside or not)
|
|
|
|
*/
|
2016-03-19 09:25:47 +01:00
|
|
|
extern volatile int __irq_is_in;
|
2010-09-22 15:10:42 +02:00
|
|
|
|
2015-05-26 16:45:34 +02:00
|
|
|
/**
|
|
|
|
* @brief Save the current thread context from inside an ISR
|
|
|
|
*/
|
2015-09-14 15:36:36 +02:00
|
|
|
static inline void __attribute__((always_inline)) __save_context(void)
|
2013-06-21 03:52:57 +02:00
|
|
|
{
|
2010-09-22 15:10:42 +02:00
|
|
|
__asm__("push r15");
|
|
|
|
__asm__("push r14");
|
|
|
|
__asm__("push r13");
|
|
|
|
__asm__("push r12");
|
|
|
|
__asm__("push r11");
|
|
|
|
__asm__("push r10");
|
|
|
|
__asm__("push r9");
|
|
|
|
__asm__("push r8");
|
|
|
|
__asm__("push r7");
|
|
|
|
__asm__("push r6");
|
|
|
|
__asm__("push r5");
|
|
|
|
__asm__("push r4");
|
|
|
|
|
2020-08-17 11:50:45 +02:00
|
|
|
__asm__("mov.w r1,%0" : "=r"(thread_get_active()->sp));
|
2010-09-22 15:10:42 +02:00
|
|
|
}
|
|
|
|
|
2015-05-26 16:45:34 +02:00
|
|
|
/**
|
|
|
|
* @brief Restore the thread context from inside an ISR
|
|
|
|
*/
|
2015-09-14 15:36:36 +02:00
|
|
|
static inline void __attribute__((always_inline)) __restore_context(void)
|
2013-06-21 03:52:57 +02:00
|
|
|
{
|
2020-08-17 11:50:45 +02:00
|
|
|
__asm__("mov.w %0,r1" : : "m"(thread_get_active()->sp));
|
2010-09-22 15:10:42 +02:00
|
|
|
|
|
|
|
__asm__("pop r4");
|
|
|
|
__asm__("pop r5");
|
|
|
|
__asm__("pop r6");
|
|
|
|
__asm__("pop r7");
|
|
|
|
__asm__("pop r8");
|
|
|
|
__asm__("pop r9");
|
|
|
|
__asm__("pop r10");
|
|
|
|
__asm__("pop r11");
|
|
|
|
__asm__("pop r12");
|
|
|
|
__asm__("pop r13");
|
|
|
|
__asm__("pop r14");
|
|
|
|
__asm__("pop r15");
|
2015-09-14 15:36:36 +02:00
|
|
|
__asm__("reti");
|
2010-09-22 15:10:42 +02:00
|
|
|
}
|
|
|
|
|
2015-05-26 16:45:34 +02:00
|
|
|
/**
|
|
|
|
* @brief Run this code on entering interrupt routines
|
|
|
|
*/
|
2015-09-14 15:36:36 +02:00
|
|
|
static inline void __attribute__((always_inline)) __enter_isr(void)
|
2013-06-21 03:52:57 +02:00
|
|
|
{
|
2024-04-22 15:01:46 +02:00
|
|
|
/* modify state register pushed to stack to not got to power saving
|
|
|
|
* mode right again */
|
|
|
|
__asm__ volatile(
|
|
|
|
"bic %[mask], 0(SP)" "\n\t"
|
|
|
|
: /* no outputs */
|
|
|
|
: [mask] "i"(CPUOFF | SCG0 | SCG1 | OSCOFF)
|
|
|
|
: "memory"
|
|
|
|
);
|
2020-07-29 12:00:30 +02:00
|
|
|
extern char __stack; /* defined by linker script to end of RAM */
|
2015-09-14 15:36:36 +02:00
|
|
|
__save_context();
|
2020-07-29 12:00:30 +02:00
|
|
|
__asm__("mov.w %0,r1" : : "i"(&__stack));
|
2016-03-19 09:25:47 +01:00
|
|
|
__irq_is_in = 1;
|
2010-09-22 15:10:42 +02:00
|
|
|
}
|
|
|
|
|
2015-05-26 16:45:34 +02:00
|
|
|
/**
|
|
|
|
* @brief Run this code on exiting interrupt routines
|
|
|
|
*/
|
2015-09-14 15:36:36 +02:00
|
|
|
static inline void __attribute__((always_inline)) __exit_isr(void)
|
2013-06-21 03:52:57 +02:00
|
|
|
{
|
2016-03-19 09:25:47 +01:00
|
|
|
__irq_is_in = 0;
|
2013-06-21 03:52:57 +02:00
|
|
|
|
2013-06-24 22:37:35 +02:00
|
|
|
if (sched_context_switch_request) {
|
2013-06-21 03:52:57 +02:00
|
|
|
sched_run();
|
|
|
|
}
|
|
|
|
|
2015-09-14 15:36:36 +02:00
|
|
|
__restore_context();
|
2010-09-22 15:10:42 +02:00
|
|
|
}
|
|
|
|
|
2015-09-04 16:51:08 +02:00
|
|
|
/**
|
2022-06-17 17:19:49 +02:00
|
|
|
* @brief Returns the last instruction's address
|
2015-09-04 16:51:08 +02:00
|
|
|
*/
|
2024-04-19 21:21:32 +02:00
|
|
|
__attribute__((always_inline))
|
2022-06-17 17:19:49 +02:00
|
|
|
static inline uintptr_t cpu_get_caller_pc(void)
|
2015-09-04 16:51:08 +02:00
|
|
|
{
|
2024-04-19 21:21:32 +02:00
|
|
|
return (uintptr_t)__builtin_return_address(0);
|
2015-09-04 16:51:08 +02:00
|
|
|
}
|
|
|
|
|
2014-10-13 10:53:20 +02:00
|
|
|
#ifdef __cplusplus
|
|
|
|
}
|
|
|
|
#endif
|
|
|
|
|
2017-01-18 13:00:05 +01:00
|
|
|
#endif /* CPU_H */
|
2017-08-24 21:03:16 +02:00
|
|
|
/** @} */
|