Zephyr API Documentation  2.7.0-rc2
A Scalable Open Source RTOS
mem_manage.h
Go to the documentation of this file.
1/*
2 * Copyright (c) 2020 Intel Corporation
3 *
4 * SPDX-License-Identifier: Apache-2.0
5 */
6
7#ifndef ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H
8#define ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H
9
10#include <sys/util.h>
11#include <toolchain.h>
12
13/*
14 * Caching mode definitions. These are mutually exclusive.
15 */
16
18#define K_MEM_CACHE_NONE 2
19
21#define K_MEM_CACHE_WT 1
22
24#define K_MEM_CACHE_WB 0
25
27#define K_MEM_CACHE_MASK (BIT(3) - 1)
28
29/*
30 * Region permission attributes. Default is read-only, no user, no exec
31 */
32
34#define K_MEM_PERM_RW BIT(3)
35
37#define K_MEM_PERM_EXEC BIT(4)
38
40#define K_MEM_PERM_USER BIT(5)
41
42/*
43 * This is the offset to subtract from a virtual address mapped in the
44 * kernel's permanent mapping of RAM, to obtain its physical address.
45 *
46 * virt_addr = phys_addr + Z_MEM_VM_OFFSET
47 *
48 * This only works for virtual addresses within the interval
49 * [CONFIG_KERNEL_VM_BASE, CONFIG_KERNEL_VM_BASE + (CONFIG_SRAM_SIZE * 1024)).
50 *
51 * These macros are intended for assembly, linker code, and static initializers.
52 * Use with care.
53 *
54 * Note that when demand paging is active, these will only work with page
55 * frames that are pinned to their virtual mapping at boot.
56 *
57 * TODO: This will likely need to move to an arch API or need additional
58 * constraints defined.
59 */
60#ifdef CONFIG_MMU
61#define Z_MEM_VM_OFFSET ((CONFIG_KERNEL_VM_BASE + CONFIG_KERNEL_VM_OFFSET) - \
62 (CONFIG_SRAM_BASE_ADDRESS + CONFIG_SRAM_OFFSET))
63#else
64#define Z_MEM_VM_OFFSET 0
65#endif
66
67#define Z_MEM_PHYS_ADDR(virt) ((virt) - Z_MEM_VM_OFFSET)
68#define Z_MEM_VIRT_ADDR(phys) ((phys) + Z_MEM_VM_OFFSET)
69
70#if Z_MEM_VM_OFFSET != 0
71#define Z_VM_KERNEL 1
72#ifdef CONFIG_XIP
73#error "XIP and a virtual memory kernel are not allowed"
74#endif
75#endif
76
77#ifndef _ASMLANGUAGE
78#include <stdint.h>
79#include <stddef.h>
80#include <inttypes.h>
81#include <sys/__assert.h>
82
84#ifdef CONFIG_DEMAND_PAGING_STATS
85 struct {
87 unsigned long cnt;
88
90 unsigned long irq_locked;
91
93 unsigned long irq_unlocked;
94
95#ifndef CONFIG_DEMAND_PAGING_ALLOW_IRQ
97 unsigned long in_isr;
98#endif
99 } pagefaults;
100
101 struct {
103 unsigned long clean;
104
106 unsigned long dirty;
107 } eviction;
108#endif /* CONFIG_DEMAND_PAGING_STATS */
109};
110
112#ifdef CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM
113 /* Counts for each bin in timing histogram */
114 unsigned long counts[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
115
116 /* Bounds for the bins in timing histogram,
117 * excluding the first and last (hence, NUM_SLOTS - 1).
118 */
119 unsigned long bounds[CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM_NUM_BINS];
120#endif /* CONFIG_DEMAND_PAGING_TIMING_HISTOGRAM */
121};
122
123/* Just like Z_MEM_PHYS_ADDR() but with type safety and assertions */
124static inline uintptr_t z_mem_phys_addr(void *virt)
125{
126 uintptr_t addr = (uintptr_t)virt;
127
128#ifdef CONFIG_MMU
129 __ASSERT((addr >= CONFIG_KERNEL_VM_BASE) &&
130 (addr < (CONFIG_KERNEL_VM_BASE +
131 (CONFIG_KERNEL_VM_SIZE))),
132 "address %p not in permanent mappings", virt);
133#else
134 /* Should be identity-mapped */
135 __ASSERT((addr >= CONFIG_SRAM_BASE_ADDRESS) &&
136 (addr < (CONFIG_SRAM_BASE_ADDRESS +
137 (CONFIG_SRAM_SIZE * 1024UL))),
138 "physical address 0x%lx not in RAM",
139 (unsigned long)addr);
140#endif /* CONFIG_MMU */
141
142 /* TODO add assertion that this page is pinned to boot mapping,
143 * the above checks won't be sufficient with demand paging
144 */
145
146 return Z_MEM_PHYS_ADDR(addr);
147}
148
149/* Just like Z_MEM_VIRT_ADDR() but with type safety and assertions */
150static inline void *z_mem_virt_addr(uintptr_t phys)
151{
152 __ASSERT((phys >= CONFIG_SRAM_BASE_ADDRESS) &&
153 (phys < (CONFIG_SRAM_BASE_ADDRESS +
154 (CONFIG_SRAM_SIZE * 1024UL))),
155 "physical address 0x%lx not in RAM", (unsigned long)phys);
156
157 /* TODO add assertion that this page frame is pinned to boot mapping,
158 * the above check won't be sufficient with demand paging
159 */
160
161 return (void *)Z_MEM_VIRT_ADDR(phys);
162}
163
164#ifdef __cplusplus
165extern "C" {
166#endif
167
209void z_phys_map(uint8_t **virt_ptr, uintptr_t phys, size_t size,
211
239void z_phys_unmap(uint8_t *virt, size_t size);
240
241/*
242 * k_mem_map() control flags
243 */
244
256#define K_MEM_MAP_UNINIT BIT(16)
257
267#define K_MEM_MAP_LOCK BIT(17)
268
286#define K_MEM_MAP_GUARD __DEPRECATED_MACRO BIT(18)
287
299size_t k_mem_free_get(void);
300
340void *k_mem_map(size_t size, uint32_t flags);
341
355void k_mem_unmap(void *addr, size_t size);
356
370size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size,
371 uintptr_t addr, size_t size, size_t align);
372
401int k_mem_page_out(void *addr, size_t size);
402
416void k_mem_page_in(void *addr, size_t size);
417
431void k_mem_pin(void *addr, size_t size);
432
443void k_mem_unpin(void *addr, size_t size);
444
453__syscall void k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats);
454
455struct k_thread;
465__syscall
467 struct k_mem_paging_stats_t *stats);
468
478 struct k_mem_paging_histogram_t *hist);
479
489 struct k_mem_paging_histogram_t *hist);
490
500 struct k_mem_paging_histogram_t *hist);
501
502#include <syscalls/mem_manage.h>
503
527struct z_page_frame *k_mem_paging_eviction_select(bool *dirty);
528
537
582int k_mem_paging_backing_store_location_get(struct z_page_frame *pf,
583 uintptr_t *location,
584 bool page_fault);
585
597
610
623
644void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf,
645 uintptr_t location);
646
661
664#ifdef __cplusplus
665}
666#endif
667
668#endif /* !_ASMLANGUAGE */
669#endif /* ZEPHYR_INCLUDE_SYS_MEM_MANAGE_H */
static struct k_thread thread[2]
Definition: atomic.c:22
void k_mem_paging_backing_store_page_finalize(struct z_page_frame *pf, uintptr_t location)
void k_mem_paging_backing_store_page_out(uintptr_t location)
void k_mem_paging_backing_store_location_free(uintptr_t location)
void k_mem_paging_backing_store_init(void)
void k_mem_paging_backing_store_page_in(uintptr_t location)
int k_mem_paging_backing_store_location_get(struct z_page_frame *pf, uintptr_t *location, bool page_fault)
struct z_page_frame * k_mem_paging_eviction_select(bool *dirty)
void k_mem_paging_eviction_init(void)
int k_mem_page_out(void *addr, size_t size)
void k_mem_paging_histogram_backing_store_page_in_get(struct k_mem_paging_histogram_t *hist)
void k_mem_unpin(void *addr, size_t size)
void k_mem_paging_stats_get(struct k_mem_paging_stats_t *stats)
void k_mem_pin(void *addr, size_t size)
void k_mem_page_in(void *addr, size_t size)
void k_mem_paging_histogram_backing_store_page_out_get(struct k_mem_paging_histogram_t *hist)
void k_mem_paging_histogram_eviction_get(struct k_mem_paging_histogram_t *hist)
void k_mem_paging_thread_stats_get(struct k_thread *thread, struct k_mem_paging_stats_t *stats)
flags
Definition: http_parser.h:131
size_t k_mem_free_get(void)
size_t k_mem_region_align(uintptr_t *aligned_addr, size_t *aligned_size, uintptr_t addr, size_t size, size_t align)
void k_mem_unmap(void *addr, size_t size)
void * k_mem_map(size_t size, uint32_t flags)
__UINT32_TYPE__ uint32_t
Definition: stdint.h:60
__UINT8_TYPE__ uint8_t
Definition: stdint.h:58
__UINTPTR_TYPE__ uintptr_t
Definition: stdint.h:75
Definition: mem_manage.h:111
Definition: mem_manage.h:83
Definition: thread.h:201
Macros to abstract toolchain specific capabilities.
Misc utilities.