Zephyr API Documentation  2.7.0-rc2
A Scalable Open Source RTOS
arch_interface.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2019 Intel Corporation.
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
26#ifndef ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_
27#define ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_
28
29#ifndef _ASMLANGUAGE
30#include <toolchain.h>
31#include <stddef.h>
32#include <zephyr/types.h>
33#include <arch/cpu.h>
34#include <irq_offload.h>
35
36#ifdef __cplusplus
37extern "C" {
38#endif
39
40/* NOTE: We cannot pull in kernel.h here, need some forward declarations */
41struct k_thread;
42struct k_mem_domain;
43
44typedef struct z_thread_stack_element k_thread_stack_t;
45
46typedef void (*k_thread_entry_t)(void *p1, void *p2, void *p3);
47
59static inline uint32_t arch_k_cycle_get_32(void);
60
160void arch_cpu_idle(void);
161
180void arch_cpu_atomic_idle(unsigned int key);
181
195typedef FUNC_NORETURN void (*arch_cpustart_t)(void *data);
196
217void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz,
218 arch_cpustart_t fn, void *arg);
219
225bool arch_cpu_active(int cpu_num);
226
240static inline unsigned int arch_irq_lock(void);
241
247static inline void arch_irq_unlock(unsigned int key);
248
256static inline bool arch_irq_unlocked(unsigned int key);
257
272void arch_irq_disable(unsigned int irq);
273
279void arch_irq_enable(unsigned int irq);
280
286int arch_irq_is_enabled(unsigned int irq);
287
299int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority,
300 void (*routine)(const void *parameter),
301 const void *parameter, uint32_t flags);
302
354#ifdef CONFIG_IRQ_OFFLOAD
371void arch_irq_offload(irq_offload_routine_t routine, const void *parameter);
372#endif /* CONFIG_IRQ_OFFLOAD */
373
382#ifdef CONFIG_SMP
384static inline struct _cpu *arch_curr_cpu(void);
385
391void arch_sched_ipi(void);
392#endif /* CONFIG_SMP */
393
403#ifdef CONFIG_USERSPACE
426
438 uintptr_t call_id);
439
452 uintptr_t call_id);
453
467 uintptr_t arg3,
468 uintptr_t call_id);
469
484 uintptr_t arg3, uintptr_t arg4,
485 uintptr_t call_id);
486
502 uintptr_t arg3, uintptr_t arg4,
503 uintptr_t arg5,
504 uintptr_t call_id);
505
522 uintptr_t arg3, uintptr_t arg4,
523 uintptr_t arg5, uintptr_t arg6,
524 uintptr_t call_id);
525
531static inline bool arch_is_user_context(void);
532
539
540#ifdef CONFIG_ARCH_MEM_DOMAIN_DATA
562int arch_mem_domain_init(struct k_mem_domain *domain);
563#endif /* CONFIG_ARCH_MEM_DOMAIN_DATA */
564
565#ifdef CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API
578void arch_mem_domain_thread_add(struct k_thread *thread);
579
591void arch_mem_domain_thread_remove(struct k_thread *thread);
592
606void arch_mem_domain_partition_remove(struct k_mem_domain *domain,
607 uint32_t partition_id);
608
618void arch_mem_domain_partition_add(struct k_mem_domain *domain,
619 uint32_t partition_id);
620#endif /* CONFIG_ARCH_MEM_DOMAIN_SYNCHRONOUS_API */
621
650int arch_buffer_validate(void *addr, size_t size, int write);
651
670FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry,
671 void *p1, void *p2, void *p3);
672
687FUNC_NORETURN void arch_syscall_oops(void *ssf);
688
701size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err);
702#endif /* CONFIG_USERSPACE */
703
718#ifndef CONFIG_ARCH_HAS_COHERENCE
719static inline bool arch_mem_coherent(void *ptr)
720{
721 ARG_UNUSED(ptr);
722 return true;
723}
724#endif
725
766#ifndef CONFIG_KERNEL_COHERENCE
767static inline void arch_cohere_stacks(struct k_thread *old_thread,
768 void *old_switch_handle,
769 struct k_thread *new_thread)
770{
771 ARG_UNUSED(old_thread);
772 ARG_UNUSED(old_switch_handle);
773 ARG_UNUSED(new_thread);
774}
775#endif
776
792#ifdef CONFIG_GDBSTUB
798void arch_gdb_init(void);
799
806
812void arch_gdb_step(void);
813
814#endif
823#if defined(CONFIG_CACHE_MANAGEMENT) && defined(CONFIG_HAS_ARCH_CACHE)
830void arch_dcache_enable(void);
831
838void arch_dcache_disable(void);
839
846void arch_icache_enable(void);
847
854void arch_dcache_disable(void);
855
862int arch_dcache_all(int op);
863
870int arch_dcache_range(void *addr, size_t size, int op);
871
878int arch_icache_all(int op);
879
886int arch_icache_range(void *addr, size_t size, int op);
887
888#ifdef CONFIG_DCACHE_LINE_SIZE_DETECT
895size_t arch_dcache_line_size_get(void);
896#endif /* CONFIG_DCACHE_LINE_SIZE_DETECT */
897
898#ifdef CONFIG_ICACHE_LINE_SIZE_DETECT
905size_t arch_icache_line_size_get(void);
906#endif /* CONFIG_ICACHE_LINE_SIZE_DETECT */
907
908#endif /* CONFIG_CACHE_MANAGEMENT && CONFIG_HAS_ARCH_CACHE */
909
912#ifdef CONFIG_TIMING_FUNCTIONS
913#include <timing/types.h>
914
928
938
948
957
971 volatile timing_t *const end);
972
981
991
1002
1011
1014#endif /* CONFIG_TIMING_FUNCTIONS */
1015
1016#ifdef CONFIG_PCIE_MSI_MULTI_VECTOR
1017
1018struct msi_vector;
1019typedef struct msi_vector msi_vector_t;
1020
1030uint8_t arch_pcie_msi_vectors_allocate(unsigned int priority,
1031 msi_vector_t *vectors,
1032 uint8_t n_vector);
1033
1044bool arch_pcie_msi_vector_connect(msi_vector_t *vector,
1045 void (*routine)(const void *parameter),
1046 const void *parameter,
1047 uint32_t flags);
1048
1049#endif /* CONFIG_PCIE_MSI_MULTI_VECTOR */
1050
1051#ifdef __cplusplus
1052}
1053#endif /* __cplusplus */
1054
1055#include <arch/arch_inlines.h>
1056
1057#endif /* _ASMLANGUAGE */
1058
1059#endif /* ZEPHYR_INCLUDE_SYS_ARCH_INTERFACE_H_ */
struct z_thread_stack_element k_thread_stack_t
Typedef of struct z_thread_stack_element.
Definition: arch_interface.h:44
void(* k_thread_entry_t)(void *p1, void *p2, void *p3)
Thread entry point function type.
Definition: arch_interface.h:46
irp nz macro MOVR cc s mov cc s endm endr irp aw macro LDR aa s
Definition: asm-macro-32-bit-gnu.h:17
static struct k_thread thread[2]
Definition: atomic.c:22
ZTEST_BMEM int count
Definition: main.c:33
void
Definition: eswifi_shell.c:15
void arch_gdb_init(void)
Architecture layer debug start.
void arch_gdb_step(void)
Continue with one step.
void arch_gdb_continue(void)
Continue running program.
static bool arch_irq_unlocked(unsigned int key)
void arch_irq_disable(unsigned int irq)
static unsigned int arch_irq_lock(void)
int arch_irq_is_enabled(unsigned int irq)
void arch_irq_enable(unsigned int irq)
static void arch_irq_unlock(unsigned int key)
int arch_irq_connect_dynamic(unsigned int irq, unsigned int priority, void(*routine)(const void *parameter), const void *parameter, uint32_t flags)
void arch_cpu_atomic_idle(unsigned int key)
Atomically re-enable interrupts and enter low power mode.
void arch_cpu_idle(void)
Power save idle routine.
bool arch_cpu_active(int cpu_num)
Return CPU power status.
FUNC_NORETURN void(* arch_cpustart_t)(void *data)
Definition: arch_interface.h:195
void arch_start_cpu(int cpu_num, k_thread_stack_t *stack, int sz, arch_cpustart_t fn, void *arg)
Start a numbered CPU on a MP-capable system.
static struct _cpu * arch_curr_cpu(void)
void arch_sched_ipi(void)
uint64_t arch_timing_freq_get(void)
Get frequency of counter used (in Hz).
uint32_t arch_timing_freq_get_mhz(void)
Get frequency of counter used (in MHz).
uint64_t arch_timing_cycles_get(volatile timing_t *const start, volatile timing_t *const end)
Get number of cycles between start and end.
void arch_timing_stop(void)
Signal the end of the timing information gathering.
void arch_timing_init(void)
Initialize the timing subsystem.
uint64_t arch_timing_cycles_to_ns(uint64_t cycles)
Convert number of cycles into nanoseconds.
uint64_t arch_timing_cycles_to_ns_avg(uint64_t cycles, uint32_t count)
Convert number of cycles into nanoseconds with averaging.
static uint32_t arch_k_cycle_get_32(void)
timing_t arch_timing_counter_get(void)
Return timing counter.
void arch_timing_start(void)
Signal the start of the timing information gathering.
static uintptr_t arch_syscall_invoke4(uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4, uintptr_t call_id)
int arch_buffer_validate(void *addr, size_t size, int write)
Check memory region permissions.
size_t arch_user_string_nlen(const char *s, size_t maxsize, int *err)
Safely take the length of a potentially bad string.
static uintptr_t arch_syscall_invoke2(uintptr_t arg1, uintptr_t arg2, uintptr_t call_id)
static void arch_cohere_stacks(struct k_thread *old_thread, void *old_switch_handle, struct k_thread *new_thread)
Ensure cache coherence prior to context switch.
Definition: arch_interface.h:767
FUNC_NORETURN void arch_user_mode_enter(k_thread_entry_t user_entry, void *p1, void *p2, void *p3)
static uintptr_t arch_syscall_invoke1(uintptr_t arg1, uintptr_t call_id)
static uintptr_t arch_syscall_invoke0(uintptr_t call_id)
int arch_mem_domain_max_partitions_get(void)
Get the maximum number of partitions for a memory domain.
static bool arch_is_user_context(void)
static bool arch_mem_coherent(void *ptr)
Detect memory coherence type.
Definition: arch_interface.h:719
static uintptr_t arch_syscall_invoke5(uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4, uintptr_t arg5, uintptr_t call_id)
static uintptr_t arch_syscall_invoke3(uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t call_id)
static uintptr_t arch_syscall_invoke6(uintptr_t arg1, uintptr_t arg2, uintptr_t arg3, uintptr_t arg4, uintptr_t arg5, uintptr_t arg6, uintptr_t call_id)
FUNC_NORETURN void arch_syscall_oops(void *ssf)
Induce a kernel oops that appears to come from a specific location.
flags
Definition: http_parser.h:131
uint64_t timing_t
Definition: types.h:10
IRQ Offload interface.
void(* irq_offload_routine_t)(const void *parameter)
Definition: irq_offload.h:18
void * ptr
Definition: printk.c:79
static k_spinlock_key_t key
Definition: spinlock_error_case.c:14
struct k_stack stack
Definition: test_stack_contexts.c:18
__UINT32_TYPE__ uint32_t
Definition: stdint.h:60
__UINT64_TYPE__ uint64_t
Definition: stdint.h:61
__UINT8_TYPE__ uint8_t
Definition: stdint.h:58
__UINTPTR_TYPE__ uintptr_t
Definition: stdint.h:75
Memory Domain.
Definition: mem_domain.h:80
Definition: thread.h:201
Definition: msi.h:27
static fdata_t data[2]
Definition: test_fifo_contexts.c:15
Macros to abstract toolchain specific capabilities.