From 14d59de4d87d7e16cc5dfbefb6c96a5073abad5e Mon Sep 17 00:00:00 2001 From: HeatCrab Date: Tue, 28 Oct 2025 10:18:52 +0800 Subject: [PATCH 01/15] Add PMP infrastructure and management structures Introduces RISC-V Physical Memory Protection (PMP) support for hardware-enforced memory isolation. TOR mode is adopted as the addressing scheme for its flexibility in supporting arbitrary address ranges without alignment requirements, simplifying region management for task stacks of varying sizes. Adds CSR definitions for PMP registers, permission encodings, and hardware constants. Provides structures for region configuration and state tracking, with priority-based management to handle the 16-region hardware limit. Includes error codes and functions for region configuration and access verification. --- arch/riscv/csr.h | 79 ++++++++++++++++++++++++++++++++++ arch/riscv/hal.h | 7 ++++ arch/riscv/pmp.h | 93 +++++++++++++++++++++++++++++++++++++++++ include/private/error.h | 8 ++++ 4 files changed, 187 insertions(+) create mode 100644 arch/riscv/pmp.h diff --git a/arch/riscv/csr.h b/arch/riscv/csr.h index 2f27ed81..081c2c7c 100644 --- a/arch/riscv/csr.h +++ b/arch/riscv/csr.h @@ -179,3 +179,82 @@ /* Machine Scratch Register - For temporary storage during traps */ #define CSR_MSCRATCH 0x340 + +/* PMP Address Registers (pmpaddr0-pmpaddr15) - 16 regions maximum + * In TOR (Top-of-Range) mode, these define the upper boundary of each region. + * The lower boundary is defined by the previous region's upper boundary. + */ +#define CSR_PMPADDR0 0x3b0 +#define CSR_PMPADDR1 0x3b1 +#define CSR_PMPADDR2 0x3b2 +#define CSR_PMPADDR3 0x3b3 +#define CSR_PMPADDR4 0x3b4 +#define CSR_PMPADDR5 0x3b5 +#define CSR_PMPADDR6 0x3b6 +#define CSR_PMPADDR7 0x3b7 +#define CSR_PMPADDR8 0x3b8 +#define CSR_PMPADDR9 0x3b9 +#define CSR_PMPADDR10 0x3ba +#define CSR_PMPADDR11 0x3bb +#define CSR_PMPADDR12 0x3bc +#define CSR_PMPADDR13 0x3bd +#define CSR_PMPADDR14 0x3be +#define CSR_PMPADDR15 0x3bf + +/* PMP Configuration Registers (pmpcfg0-pmpcfg3) + * Each configuration register controls 4 PMP regions (on RV32). + * pmpcfg0 controls pmpaddr0-3, pmpcfg1 controls pmpaddr4-7, etc. + */ +#define CSR_PMPCFG0 0x3a0 +#define CSR_PMPCFG1 0x3a1 +#define CSR_PMPCFG2 0x3a2 +#define CSR_PMPCFG3 0x3a3 + +/* PMP Configuration Field Bits (8 bits per region within pmpcfg) + * Layout in each byte of pmpcfg: + * Bit 7: L (Lock) - Locks this region until hardware reset + * Bits 6-5: Reserved + * Bits 4-3: A (Address Matching Mode) + * Bit 2: X (Execute permission) + * Bit 1: W (Write permission) + * Bit 0: R (Read permission) + */ + +/* Lock bit: Prevents further modification of this region */ +#define PMPCFG_L (1U << 7) + +/* Address Matching Mode (bits 3-4) + * Choose TOR mode for no alignment requirements on region sizes, and support + * for arbitrary address ranges. + */ +#define PMPCFG_A_SHIFT 3 +#define PMPCFG_A_MASK (0x3U << PMPCFG_A_SHIFT) +#define PMPCFG_A_OFF (0x0U << PMPCFG_A_SHIFT) /* Null region (disabled) */ +#define PMPCFG_A_TOR (0x1U << PMPCFG_A_SHIFT) /* Top-of-Range mode */ + +/* Permission bits */ +#define PMPCFG_X (1U << 2) /* Execute permission */ +#define PMPCFG_W (1U << 1) /* Write permission */ +#define PMPCFG_R (1U << 0) /* Read permission */ + +/* Common permission combinations */ +#define PMPCFG_PERM_NONE (0x0U) /* No access */ +#define PMPCFG_PERM_R (PMPCFG_R) /* Read-only */ +#define PMPCFG_PERM_RW (PMPCFG_R | PMPCFG_W) /* Read-Write */ +#define PMPCFG_PERM_X (PMPCFG_X) /* Execute-only */ +#define PMPCFG_PERM_RX (PMPCFG_R | PMPCFG_X) /* Read-Execute */ +#define PMPCFG_PERM_RWX (PMPCFG_R | PMPCFG_W | PMPCFG_X) /* All access */ + +/* Utility macros for PMP configuration manipulation */ + +/* Extract PMP address matching mode */ +#define PMPCFG_GET_A(cfg) (((cfg) & PMPCFG_A_MASK) >> PMPCFG_A_SHIFT) + +/* Extract permission bits from configuration byte */ +#define PMPCFG_GET_PERM(cfg) ((cfg) & (PMPCFG_R | PMPCFG_W | PMPCFG_X)) + +/* Check if region is locked */ +#define PMPCFG_IS_LOCKED(cfg) (((cfg) & PMPCFG_L) != 0) + +/* Check if region is enabled (address mode is not OFF) */ +#define PMPCFG_IS_ENABLED(cfg) (PMPCFG_GET_A(cfg) != PMPCFG_A_OFF) diff --git a/arch/riscv/hal.h b/arch/riscv/hal.h index 7946a0fe..c6712097 100644 --- a/arch/riscv/hal.h +++ b/arch/riscv/hal.h @@ -135,3 +135,10 @@ void hal_cpu_idle(void); /* Default stack size for new tasks if not otherwise specified */ #define DEFAULT_STACK_SIZE 8192 + +/* Physical Memory Protection (PMP) region limit constants */ +#define PMP_MAX_REGIONS 16 /* RISC-V supports 16 PMP regions */ +#define PMP_TOR_PAIRS \ + 8 /* In TOR mode, 16 regions = 8 pairs (uses 2 addrs each) */ +#define MIN_PMP_REGION_SIZE \ + 4 /* Minimum addressable size in TOR mode (4 bytes) */ diff --git a/arch/riscv/pmp.h b/arch/riscv/pmp.h new file mode 100644 index 00000000..d4adabff --- /dev/null +++ b/arch/riscv/pmp.h @@ -0,0 +1,93 @@ +/* RISC-V Physical Memory Protection (PMP) Hardware Layer + * + * Low-level interface to RISC-V PMP using TOR (Top-of-Range) mode for + * flexible region management without alignment constraints. + */ + +#pragma once + +#include +#include + +/* PMP Region Priority Levels (lower value = higher priority) + * + * Used for eviction decisions when hardware PMP regions are exhausted. + */ +typedef enum { + PMP_PRIORITY_KERNEL = 0, + PMP_PRIORITY_STACK = 1, + PMP_PRIORITY_SHARED = 2, + PMP_PRIORITY_TEMPORARY = 3, + PMP_PRIORITY_COUNT = 4 +} pmp_priority_t; + +/* PMP Region Configuration */ +typedef struct { + uint32_t addr_start; /* Start address (inclusive) */ + uint32_t addr_end; /* End address (exclusive, written to pmpaddr) */ + uint8_t permissions; /* R/W/X bits (PMPCFG_R | PMPCFG_W | PMPCFG_X) */ + pmp_priority_t priority; /* Eviction priority */ + uint8_t region_id; /* Hardware region index (0-15) */ + uint8_t locked; /* Lock bit (cannot modify until reset) */ +} pmp_region_t; + +/* PMP Global State */ +typedef struct { + pmp_region_t regions[PMP_MAX_REGIONS]; /* Shadow of hardware config */ + uint8_t region_count; /* Active region count */ + uint8_t next_region_idx; /* Next free region index */ + uint32_t initialized; /* Initialization flag */ +} pmp_config_t; + +/* PMP Management Functions */ + +/* Initializes the PMP hardware and configuration state. + * @config : Pointer to pmp_config_t structure to be initialized. + * Returns 0 on success, or negative error code on failure. + */ +int32_t pmp_init(pmp_config_t *config); + +/* Configures a single PMP region in TOR mode. + * @config : Pointer to PMP configuration state + * @region : Pointer to pmp_region_t structure with desired configuration + * Returns 0 on success, or negative error code on failure. + */ +int32_t pmp_set_region(pmp_config_t *config, const pmp_region_t *region); + +/* Reads the current configuration of a PMP region. + * @config : Pointer to PMP configuration state + * @region_idx : Index of the region to read (0-15) + * @region : Pointer to pmp_region_t to store the result + * Returns 0 on success, or negative error code on failure. + */ +int32_t pmp_get_region(const pmp_config_t *config, + uint8_t region_idx, + pmp_region_t *region); + +/* Disables a PMP region. + * @config : Pointer to PMP configuration state + * @region_idx : Index of the region to disable (0-15) + * Returns 0 on success, or negative error code on failure. + */ +int32_t pmp_disable_region(pmp_config_t *config, uint8_t region_idx); + +/* Locks a PMP region to prevent further modification. + * @config : Pointer to PMP configuration state + * @region_idx : Index of the region to lock (0-15) + * Returns 0 on success, or negative error code on failure. + */ +int32_t pmp_lock_region(pmp_config_t *config, uint8_t region_idx); + +/* Verifies that a memory access is allowed by the current PMP configuration. + * @config : Pointer to PMP configuration state + * @addr : Address to check + * @size : Size of the access in bytes + * @is_write : 1 for write access, 0 for read access + * @is_execute : 1 for execute access, 0 for data access + * Returns 1 if access is allowed, 0 if denied, or negative error code. + */ +int32_t pmp_check_access(const pmp_config_t *config, + uint32_t addr, + uint32_t size, + uint8_t is_write, + uint8_t is_execute); diff --git a/include/private/error.h b/include/private/error.h index 5589087b..33f5d113 100644 --- a/include/private/error.h +++ b/include/private/error.h @@ -29,6 +29,14 @@ enum { ERR_STACK_CHECK, /* Stack overflow or corruption detected */ ERR_HEAP_CORRUPT, /* Heap corruption or invalid free detected */ + /* PMP Configuration Errors */ + ERR_PMP_INVALID_REGION, /* Invalid PMP region parameters */ + ERR_PMP_NO_REGIONS, /* No free PMP regions available */ + ERR_PMP_LOCKED, /* Region is locked by higher priority */ + ERR_PMP_SIZE_MISMATCH, /* Size doesn't meet alignment requirements */ + ERR_PMP_ADDR_RANGE, /* Address range is invalid */ + ERR_PMP_NOT_INIT, /* PMP not initialized */ + /* IPC and Synchronization Errors */ ERR_PIPE_ALLOC, /* Pipe allocation failed */ ERR_PIPE_DEALLOC, /* Pipe deallocation failed */ From cf7da323d5d3bf565ed005541bd459a38c3d2359 Mon Sep 17 00:00:00 2001 From: HeatCrab Date: Tue, 28 Oct 2025 11:06:11 +0800 Subject: [PATCH 02/15] Add memory abstraction structures Introduces three abstractions that build upon the PMP infrastructure for managing memory protection at different granularities. Flexpages represent contiguous physical memory regions with protection attributes, providing arbitrary base addresses and sizes without alignment constraints. Memory spaces implement the address space concept but use distinct terminology to avoid confusion with virtual address spaces, as this structure represents a task's memory protection domain in a physical-address-only system. They organize flexpages into task memory views and support sharing across multiple tasks without requiring an MMU. Memory pools define static regions for boot-time initialization of kernel memory protection. Field naming retains 'as_' prefix (e.g., as_id, as_next) to reflect the underlying address space concept, while documentation uses "memory space" terminology for clarity in physical-memory-only contexts. Structures are used to enable runtime iteration, simplify debugging, and maintain consistency with other subsystems. Macro helpers reduce initialization boilerplate while maintaining type safety. --- include/sys/memprot.h | 74 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 include/sys/memprot.h diff --git a/include/sys/memprot.h b/include/sys/memprot.h new file mode 100644 index 00000000..612014f4 --- /dev/null +++ b/include/sys/memprot.h @@ -0,0 +1,74 @@ +/* Memory Protection Abstractions + * + * Software abstractions for managing memory protection at different + * granularities. These structures build upon hardware protection + * mechanisms (such as RISC-V PMP) to provide flexible, architecture- + * independent memory isolation. + */ + +#pragma once + +#include + +/* Forward declarations */ +struct fpage; +struct as; + +/* Flexpage + * + * Contiguous physical memory region with hardware-enforced protection. + * Supports arbitrary base addresses and sizes without alignment constraints. + */ +typedef struct fpage { + struct fpage *as_next; /* Next in address space list */ + struct fpage *map_next; /* Next in mapping chain */ + struct fpage *pmp_next; /* Next in PMP queue */ + uint32_t base; /* Physical base address */ + uint32_t size; /* Region size */ + uint32_t rwx; /* R/W/X permission bits */ + uint32_t pmp_id; /* PMP region index */ + uint32_t flags; /* Status flags */ + uint32_t priority; /* Eviction priority */ + int used; /* Usage counter */ +} fpage_t; + +/* Memory Space + * + * Collection of flexpages forming a task's memory view. Can be shared + * across multiple tasks. + */ +typedef struct memspace { + uint32_t as_id; /* Memory space identifier */ + struct fpage *first; /* Head of flexpage list */ + struct fpage *pmp_first; /* Head of PMP-loaded list */ + struct fpage *pmp_stack; /* Stack regions */ + uint32_t shared; /* Shared flag */ +} memspace_t; + +/* Memory Pool + * + * Static memory region descriptor for boot-time PMP initialization. + */ +typedef struct { + const char *name; /* Pool name */ + uintptr_t start; /* Start address */ + uintptr_t end; /* End address */ + uint32_t flags; /* Access permissions */ + uint32_t tag; /* Pool type/priority */ +} mempool_t; + +/* Memory Pool Declaration Helpers + * + * Simplifies memory pool initialization with designated initializers. + * DECLARE_MEMPOOL_FROM_SYMBOLS uses token concatenation to construct + * linker symbol names automatically. + */ +#define DECLARE_MEMPOOL(name_, start_, end_, flags_, tag_) \ + { \ + .name = (name_), .start = (uintptr_t) (start_), \ + .end = (uintptr_t) (end_), .flags = (flags_), .tag = (tag_), \ + } + +#define DECLARE_MEMPOOL_FROM_SYMBOLS(name_, sym_base_, flags_, tag_) \ + DECLARE_MEMPOOL((name_), &(sym_base_##_start), &(sym_base_##_end), \ + (flags_), (tag_)) From 14b00d004fea655bb471659cd1fd023553073d7e Mon Sep 17 00:00:00 2001 From: HeatCrab Date: Tue, 28 Oct 2025 11:11:33 +0800 Subject: [PATCH 03/15] Declare memory pools from linker symbols Defines static memory pools for boot-time PMP initialization using linker symbols to identify kernel memory regions. Linker symbol declarations are updated to include text segment boundaries and match actual linker script definitions for stack regions. Five kernel memory pools protect text as read-execute, data and bss as read-write, heap and stack as read-write without execute to prevent code injection. Macro helpers reduce initialization boilerplate while maintaining debuggability through struct arrays. Priority-based management handles the 16-region hardware constraint. --- arch/riscv/hal.h | 7 ++-- arch/riscv/pmp.c | 90 ++++++++++++++++++++++++++++++++++++++++++++++++ arch/riscv/pmp.h | 19 ++++++++++ 3 files changed, 113 insertions(+), 3 deletions(-) create mode 100644 arch/riscv/pmp.c diff --git a/arch/riscv/hal.h b/arch/riscv/hal.h index c6712097..f42fedd1 100644 --- a/arch/riscv/hal.h +++ b/arch/riscv/hal.h @@ -3,13 +3,14 @@ #include /* Symbols from the linker script, defining memory boundaries */ -extern uint32_t _stack_start, _stack_end; /* Start/end of the STACK memory */ -extern uint32_t _heap_start, _heap_end; /* Start/end of the HEAP memory */ -extern uint32_t _heap_size; /* Size of HEAP memory */ +extern uint32_t _stext, _etext; /* Start/end of the .text section */ extern uint32_t _sidata; /* Start address for .data initialization */ extern uint32_t _sdata, _edata; /* Start/end address for .data section */ extern uint32_t _sbss, _ebss; /* Start/end address for .bss section */ extern uint32_t _end; /* End of kernel image */ +extern uint32_t _heap_start, _heap_end; /* Start/end of the HEAP memory */ +extern uint32_t _heap_size; /* Size of HEAP memory */ +extern uint32_t _stack_bottom, _stack_top; /* Bottom/top of the STACK memory */ /* Read a RISC-V Control and Status Register (CSR). * @reg : The symbolic name of the CSR (e.g., mstatus). diff --git a/arch/riscv/pmp.c b/arch/riscv/pmp.c new file mode 100644 index 00000000..fc588dff --- /dev/null +++ b/arch/riscv/pmp.c @@ -0,0 +1,90 @@ +/* RISC-V Physical Memory Protection (PMP) Implementation + * + * Provides hardware-enforced memory isolation using PMP in TOR mode. + */ + +#include + +#include "csr.h" +#include "pmp.h" +#include "private/error.h" + +/* Static Memory Pools for Boot-time PMP Initialization + * + * Defines kernel memory regions protected at boot. Each pool specifies + * a memory range and access permissions. + */ +static const mempool_t kernel_mempools[] = { + DECLARE_MEMPOOL("kernel_text", + &_stext, + &_etext, + PMPCFG_PERM_RX, + PMP_PRIORITY_KERNEL), + DECLARE_MEMPOOL("kernel_data", + &_sdata, + &_edata, + PMPCFG_PERM_RW, + PMP_PRIORITY_KERNEL), + DECLARE_MEMPOOL("kernel_bss", + &_sbss, + &_ebss, + PMPCFG_PERM_RW, + PMP_PRIORITY_KERNEL), + DECLARE_MEMPOOL("kernel_heap", + &_heap_start, + &_heap_end, + PMPCFG_PERM_RW, + PMP_PRIORITY_KERNEL), + DECLARE_MEMPOOL("kernel_stack", + &_stack_bottom, + &_stack_top, + PMPCFG_PERM_RW, + PMP_PRIORITY_KERNEL), +}; + +#define KERNEL_MEMPOOL_COUNT \ + (sizeof(kernel_mempools) / sizeof(kernel_mempools[0])) + +int32_t pmp_init_pools(pmp_config_t *config, + const mempool_t *pools, + size_t count) +{ + if (!config || !pools || count == 0) + return ERR_PMP_INVALID_REGION; + + /* Initialize PMP hardware and state */ + int32_t ret = pmp_init(config); + if (ret < 0) + return ret; + + /* Configure each memory pool as a PMP region */ + for (size_t i = 0; i < count; i++) { + const mempool_t *pool = &pools[i]; + + /* Validate pool boundaries */ + if (pool->start >= pool->end) + return ERR_PMP_ADDR_RANGE; + + /* Prepare PMP region configuration */ + pmp_region_t region = { + .addr_start = pool->start, + .addr_end = pool->end, + .permissions = pool->flags & (PMPCFG_R | PMPCFG_W | PMPCFG_X), + .priority = pool->tag, + .region_id = i, + .locked = 0, + }; + + /* Configure the PMP region */ + ret = pmp_set_region(config, ®ion); + if (ret < 0) + return ret; + } + + return ERR_OK; +} + +int32_t pmp_init_kernel(pmp_config_t *config) +{ + return pmp_init_pools(config, kernel_mempools, KERNEL_MEMPOOL_COUNT); +} diff --git a/arch/riscv/pmp.h b/arch/riscv/pmp.h index d4adabff..17e0452e 100644 --- a/arch/riscv/pmp.h +++ b/arch/riscv/pmp.h @@ -7,6 +7,7 @@ #pragma once #include +#include #include /* PMP Region Priority Levels (lower value = higher priority) @@ -91,3 +92,21 @@ int32_t pmp_check_access(const pmp_config_t *config, uint32_t size, uint8_t is_write, uint8_t is_execute); + +/* Memory Pool Management Functions */ + +/* Initializes PMP regions from an array of memory pool descriptors. + * @config : Pointer to PMP configuration state + * @pools : Array of memory pool descriptors + * @count : Number of pools in the array + * Returns 0 on success, or negative error code on failure. + */ +int32_t pmp_init_pools(pmp_config_t *config, + const mempool_t *pools, + size_t count); + +/* Initializes PMP with default kernel memory pools. + * @config : Pointer to PMP configuration state + * Returns 0 on success, or negative error code on failure. + */ +int32_t pmp_init_kernel(pmp_config_t *config); From 9242c572c0e5b69608647062b05508fab7e8f836 Mon Sep 17 00:00:00 2001 From: HeatCrab Date: Wed, 29 Oct 2025 14:31:41 +0800 Subject: [PATCH 04/15] Link tasks to memory spaces Extends TCB with a memory space pointer to enable per-task memory isolation. Each task can now reference its own memory protection domain through the flexpage mechanism. --- include/sys/task.h | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/include/sys/task.h b/include/sys/task.h index ccf5f4fa..729149bc 100644 --- a/include/sys/task.h +++ b/include/sys/task.h @@ -59,6 +59,9 @@ enum task_states { #define TASK_TIMESLICE_LOW 10 /* Low priority: longer slice */ #define TASK_TIMESLICE_IDLE 15 /* Idle tasks: longest slice */ +/* Forward declaration */ +struct memspace; + /* Task Control Block (TCB) * * Contains all essential information about a single task, including saved @@ -72,6 +75,8 @@ typedef struct tcb { size_t stack_sz; /* Total size of the stack in bytes */ void (*entry)(void); /* Task's entry point function */ + /* Memory Protection */ + struct memspace *mspace; /* Memory space for task isolation */ /* Scheduling Parameters */ uint16_t prio; /* Encoded priority (base and time slice counter) */ uint8_t prio_level; /* Priority level (0-7, 0 = highest) */ From 3ada7f2ca1a16d0e6c0184b7999d9c312dc642f3 Mon Sep 17 00:00:00 2001 From: HeatCrab Date: Fri, 31 Oct 2025 14:46:57 +0800 Subject: [PATCH 05/15] Implement flexpage lifecycle management Adds creation and destruction functions for flexpages, which are software abstractions representing contiguous physical memory regions with hardware-enforced protection attributes. These primitives will be used by higher-level memory space management to construct per-task memory views for PMP-based isolation. Function naming follows kernel conventions to reflect that these operations manage abstract memory protection objects rather than just memory allocation. --- Makefile | 2 +- include/sys/memprot.h | 19 +++++++++++++++++++ kernel/memprot.c | 44 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 1 deletion(-) create mode 100644 kernel/memprot.c diff --git a/Makefile b/Makefile index 68175601..c780c30b 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ include arch/$(ARCH)/build.mk INC_DIRS += -I $(SRC_DIR)/include \ -I $(SRC_DIR)/include/lib -KERNEL_OBJS := timer.o mqueue.o pipe.o semaphore.o mutex.o logger.o error.o syscall.o task.o main.o +KERNEL_OBJS := timer.o mqueue.o pipe.o semaphore.o mutex.o logger.o error.o syscall.o task.o memprot.o main.o KERNEL_OBJS := $(addprefix $(BUILD_KERNEL_DIR)/,$(KERNEL_OBJS)) deps += $(KERNEL_OBJS:%.o=%.o.d) diff --git a/include/sys/memprot.h b/include/sys/memprot.h index 612014f4..b820776e 100644 --- a/include/sys/memprot.h +++ b/include/sys/memprot.h @@ -72,3 +72,22 @@ typedef struct { #define DECLARE_MEMPOOL_FROM_SYMBOLS(name_, sym_base_, flags_, tag_) \ DECLARE_MEMPOOL((name_), &(sym_base_##_start), &(sym_base_##_end), \ (flags_), (tag_)) + +/* Flexpage Management Functions */ + +/* Creates and initializes a new flexpage. + * @base : Physical base address + * @size : Size in bytes + * @rwx : Permission bits + * @priority : Eviction priority + * Returns pointer to created flexpage, or NULL on failure. + */ +fpage_t *mo_fpage_create(uint32_t base, + uint32_t size, + uint32_t rwx, + uint32_t priority); + +/* Destroys a flexpage. + * @fpage : Pointer to flexpage to destroy + */ +void mo_fpage_destroy(fpage_t *fpage); diff --git a/kernel/memprot.c b/kernel/memprot.c new file mode 100644 index 00000000..e1dc71a7 --- /dev/null +++ b/kernel/memprot.c @@ -0,0 +1,44 @@ +/* Memory Protection Management + * + * Provides allocation and management functions for flexpages, which are + * software abstractions representing contiguous physical memory regions with + * hardware-enforced protection attributes. + */ + +#include +#include +#include + +/* Creates and initializes a flexpage */ +fpage_t *mo_fpage_create(uint32_t base, + uint32_t size, + uint32_t rwx, + uint32_t priority) +{ + fpage_t *fpage = malloc(sizeof(fpage_t)); + if (!fpage) + return NULL; + + /* Initialize all fields */ + fpage->as_next = NULL; + fpage->map_next = NULL; + fpage->pmp_next = NULL; + fpage->base = base; + fpage->size = size; + fpage->rwx = rwx; + fpage->pmp_id = 0; /* Not loaded into PMP initially */ + fpage->flags = 0; /* No flags set initially */ + fpage->priority = priority; + fpage->used = 0; /* Not in use initially */ + + return fpage; +} + +/* Destroys a flexpage */ +void mo_fpage_destroy(fpage_t *fpage) +{ + if (!fpage) + return; + + free(fpage); +} From 808144a38993798e70ae56f79043c94d9dc10c59 Mon Sep 17 00:00:00 2001 From: HeatCrab Date: Fri, 31 Oct 2025 21:30:20 +0800 Subject: [PATCH 06/15] Implement PMP driver and region management Implements the core Physical Memory Protection (PMP) driver to bridge software flexpages with the underlying hardware, enabling dynamic management of limited PMP entries. This driver introduces a centralized global configuration state to track and coordinate PMP register usage across the kernel. To manage the finite hardware slots, it provides operations to dynamically load and evict flexpages at runtime. When hardware regions are exhausted, a priority-based victim selection algorithm identifies candidates for eviction. This policy prioritizes preserving critical system regions (priority 0) while selecting less important pages for replacement, ensuring safeguards against kernel instability during PMP context switches. --- arch/riscv/pmp.c | 8 +++++ arch/riscv/pmp.h | 3 ++ kernel/memprot.c | 85 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 96 insertions(+) diff --git a/arch/riscv/pmp.c b/arch/riscv/pmp.c index fc588dff..8cc51350 100644 --- a/arch/riscv/pmp.c +++ b/arch/riscv/pmp.c @@ -45,6 +45,14 @@ static const mempool_t kernel_mempools[] = { #define KERNEL_MEMPOOL_COUNT \ (sizeof(kernel_mempools) / sizeof(kernel_mempools[0])) +/* Global PMP configuration (shadow of hardware state) */ +static pmp_config_t pmp_global_config; + +pmp_config_t *pmp_get_config(void) +{ + return &pmp_global_config; +} + int32_t pmp_init_pools(pmp_config_t *config, const mempool_t *pools, size_t count) diff --git a/arch/riscv/pmp.h b/arch/riscv/pmp.h index 17e0452e..1bcc8051 100644 --- a/arch/riscv/pmp.h +++ b/arch/riscv/pmp.h @@ -42,6 +42,9 @@ typedef struct { /* PMP Management Functions */ +/* Returns pointer to global PMP configuration */ +pmp_config_t *pmp_get_config(void); + /* Initializes the PMP hardware and configuration state. * @config : Pointer to pmp_config_t structure to be initialized. * Returns 0 on success, or negative error code on failure. diff --git a/kernel/memprot.c b/kernel/memprot.c index e1dc71a7..0a8e11c2 100644 --- a/kernel/memprot.c +++ b/kernel/memprot.c @@ -7,6 +7,7 @@ #include #include +#include #include /* Creates and initializes a flexpage */ @@ -42,3 +43,87 @@ void mo_fpage_destroy(fpage_t *fpage) free(fpage); } + +/* Selects victim flexpage for eviction using priority-based algorithm. + * + * @mspace : Pointer to memory space + * Returns pointer to victim flexpage, or NULL if no evictable page found. + */ +fpage_t *select_victim_fpage(memspace_t *mspace) +{ + if (!mspace) + return NULL; + + fpage_t *victim = NULL; + uint32_t lowest_prio = 0; + + /* Select page with highest priority value (lowest priority). + * Kernel regions (priority 0) are never selected. */ + for (fpage_t *fp = mspace->pmp_first; fp; fp = fp->pmp_next) { + if (fp->priority > lowest_prio) { + victim = fp; + lowest_prio = fp->priority; + } + } + + return victim; +} + +/* Loads a flexpage into a PMP hardware region. + * + * @fpage : Pointer to flexpage to load + * @region_idx : Hardware PMP region index (0-15) + * Returns 0 on success, or negative error code on failure. + */ +int32_t pmp_load_fpage(fpage_t *fpage, uint8_t region_idx) +{ + if (!fpage) + return -1; + + pmp_config_t *config = pmp_get_config(); + if (!config) + return -1; + + /* Configure PMP region from flexpage attributes */ + pmp_region_t region = { + .addr_start = fpage->base, + .addr_end = fpage->base + fpage->size, + .permissions = fpage->rwx, + .priority = fpage->priority, + .region_id = region_idx, + .locked = 0, + }; + + int32_t ret = pmp_set_region(config, ®ion); + if (ret == 0) { + fpage->pmp_id = region_idx; + } + + return ret; +} + +/* Evicts a flexpage from its PMP hardware region. + * + * @fpage : Pointer to flexpage to evict + * Returns 0 on success, or negative error code on failure. + */ +int32_t pmp_evict_fpage(fpage_t *fpage) +{ + if (!fpage) + return -1; + + /* Only evict if actually loaded into PMP */ + if (fpage->pmp_id == 0) + return 0; + + pmp_config_t *config = pmp_get_config(); + if (!config) + return -1; + + int32_t ret = pmp_disable_region(config, fpage->pmp_id); + if (ret == 0) { + fpage->pmp_id = 0; + } + + return ret; +} From 334d6a482798f56df3baf84b2ce5b8a3f052aef5 Mon Sep 17 00:00:00 2001 From: HeatCrab Date: Sun, 2 Nov 2025 20:58:52 +0800 Subject: [PATCH 07/15] Implement memory space lifecycle management Add functions to create and destroy memory spaces, which serve as containers for flexpages. A memory space can be dedicated to a single task or shared across multiple tasks, supporting both isolated and shared memory models. --- include/sys/memprot.h | 14 ++++++++++++++ kernel/memprot.c | 41 +++++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) diff --git a/include/sys/memprot.h b/include/sys/memprot.h index b820776e..b1cb41c5 100644 --- a/include/sys/memprot.h +++ b/include/sys/memprot.h @@ -91,3 +91,17 @@ fpage_t *mo_fpage_create(uint32_t base, * @fpage : Pointer to flexpage to destroy */ void mo_fpage_destroy(fpage_t *fpage); + +/* Memory Space Management Functions */ + +/* Creates and initializes a memory space. + * @as_id : Memory space identifier + * @shared : Whether this space can be shared across tasks + * Returns pointer to created memory space, or NULL on failure. + */ +memspace_t *mo_memspace_create(uint32_t as_id, uint32_t shared); + +/* Destroys a memory space and all its flexpages. + * @mspace : Pointer to memory space to destroy + */ +void mo_memspace_destroy(memspace_t *mspace); diff --git a/kernel/memprot.c b/kernel/memprot.c index 0a8e11c2..3f2bfbbf 100644 --- a/kernel/memprot.c +++ b/kernel/memprot.c @@ -127,3 +127,44 @@ int32_t pmp_evict_fpage(fpage_t *fpage) return ret; } + +/* Creates and initializes a memory space. + * + * @as_id : Memory space identifier + * @shared : Whether this space can be shared across tasks + * Returns pointer to created memory space, or NULL on failure. + */ +memspace_t *mo_memspace_create(uint32_t as_id, uint32_t shared) +{ + memspace_t *mspace = malloc(sizeof(memspace_t)); + if (!mspace) + return NULL; + + mspace->as_id = as_id; + mspace->first = NULL; + mspace->pmp_first = NULL; + mspace->pmp_stack = NULL; + mspace->shared = shared; + + return mspace; +} + +/* Destroys a memory space and all its flexpages. + * + * @mspace : Pointer to memory space to destroy + */ +void mo_memspace_destroy(memspace_t *mspace) +{ + if (!mspace) + return; + + /* Free all flexpages in the list */ + fpage_t *fp = mspace->first; + while (fp) { + fpage_t *next = fp->as_next; + mo_fpage_destroy(fp); + fp = next; + } + + free(mspace); +} From 311ee64283f881bb7e6674ba5882210247d38364 Mon Sep 17 00:00:00 2001 From: HeatCrab Date: Sun, 2 Nov 2025 21:47:53 +0800 Subject: [PATCH 08/15] Add PMP CSR access infrastructure Provide helper functions for runtime-indexed access to PMP control and status registers alongside existing compile-time CSR macros. RISC-V CSR instructions encode register addresses as immediate values in the instruction itself, making dynamic selection impossible through simple arithmetic. These helpers use switch-case dispatch to map runtime indices to specific CSR instructions while preserving type safety. This enables PMP register management code to iterate over regions without knowing exact register numbers at compile-time, supporting features with multiple registers of the same type. PMP implementation is now included in the build system to make these helpers and future PMP functionality available at link time. --- arch/riscv/build.mk | 2 +- arch/riscv/hal.h | 19 ++++++ arch/riscv/pmp.c | 142 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 162 insertions(+), 1 deletion(-) diff --git a/arch/riscv/build.mk b/arch/riscv/build.mk index 243a6ea2..19d224dc 100644 --- a/arch/riscv/build.mk +++ b/arch/riscv/build.mk @@ -70,7 +70,7 @@ LDFLAGS += --gc-sections ARFLAGS = r LDSCRIPT = $(ARCH_DIR)/riscv32-qemu.ld -HAL_OBJS := boot.o hal.o muldiv.o +HAL_OBJS := boot.o hal.o muldiv.o pmp.o HAL_OBJS := $(addprefix $(BUILD_KERNEL_DIR)/,$(HAL_OBJS)) deps += $(HAL_OBJS:%.o=%.o.d) diff --git a/arch/riscv/hal.h b/arch/riscv/hal.h index f42fedd1..2f202e9d 100644 --- a/arch/riscv/hal.h +++ b/arch/riscv/hal.h @@ -28,6 +28,25 @@ extern uint32_t _stack_bottom, _stack_top; /* Bottom/top of the STACK memory */ */ #define write_csr(reg, val) ({ asm volatile("csrw " #reg ", %0" ::"rK"(val)); }) +/* Read CSR by numeric address (for dynamic register selection). + * Used when CSR number is not known at compile-time (e.g., PMP registers). + * @csr_num : CSR address as a compile-time constant. + */ +#define read_csr_num(csr_num) \ + ({ \ + uint32_t __tmp; \ + asm volatile("csrr %0, %1" : "=r"(__tmp) : "i"(csr_num)); \ + __tmp; \ + }) + +/* Write CSR by numeric address (for dynamic register selection). + * Used when CSR number is not known at compile-time (e.g., PMP registers). + * @csr_num : CSR address as a compile-time constant. + * @val : The 32-bit value to write. + */ +#define write_csr_num(csr_num, val) \ + ({ asm volatile("csrw %0, %1" ::"i"(csr_num), "rK"(val)); }) + /* Globally enable or disable machine-level interrupts by setting mstatus.MIE. * @enable : Non-zero to enable, zero to disable. * Returns the previous state of the interrupt enable bit (1 if enabled, 0 if diff --git a/arch/riscv/pmp.c b/arch/riscv/pmp.c index 8cc51350..79b5615e 100644 --- a/arch/riscv/pmp.c +++ b/arch/riscv/pmp.c @@ -9,6 +9,148 @@ #include "pmp.h" #include "private/error.h" +/* PMP CSR Access Helpers + * + * RISC-V CSR instructions require compile-time constant addresses encoded in + * the instruction itself. These helpers use switch-case dispatch to provide + * runtime indexed access to PMP configuration and address registers. + * + * - pmpcfg0-3: Four 32-bit configuration registers (16 regions, 8 bits each) + * - pmpaddr0-15: Sixteen address registers for TOR (Top-of-Range) mode + */ + +/* Read PMP configuration register by index (0-3) */ +static uint32_t read_pmpcfg(uint8_t idx) +{ + switch (idx) { + case 0: + return read_csr_num(CSR_PMPCFG0); + case 1: + return read_csr_num(CSR_PMPCFG1); + case 2: + return read_csr_num(CSR_PMPCFG2); + case 3: + return read_csr_num(CSR_PMPCFG3); + default: + return 0; + } +} + +/* Write PMP configuration register by index (0-3) */ +static void write_pmpcfg(uint8_t idx, uint32_t val) +{ + switch (idx) { + case 0: + write_csr_num(CSR_PMPCFG0, val); + break; + case 1: + write_csr_num(CSR_PMPCFG1, val); + break; + case 2: + write_csr_num(CSR_PMPCFG2, val); + break; + case 3: + write_csr_num(CSR_PMPCFG3, val); + break; + } +} + +/* Read PMP address register by index (0-15) */ +static uint32_t read_pmpaddr(uint8_t idx) +{ + switch (idx) { + case 0: + return read_csr_num(CSR_PMPADDR0); + case 1: + return read_csr_num(CSR_PMPADDR1); + case 2: + return read_csr_num(CSR_PMPADDR2); + case 3: + return read_csr_num(CSR_PMPADDR3); + case 4: + return read_csr_num(CSR_PMPADDR4); + case 5: + return read_csr_num(CSR_PMPADDR5); + case 6: + return read_csr_num(CSR_PMPADDR6); + case 7: + return read_csr_num(CSR_PMPADDR7); + case 8: + return read_csr_num(CSR_PMPADDR8); + case 9: + return read_csr_num(CSR_PMPADDR9); + case 10: + return read_csr_num(CSR_PMPADDR10); + case 11: + return read_csr_num(CSR_PMPADDR11); + case 12: + return read_csr_num(CSR_PMPADDR12); + case 13: + return read_csr_num(CSR_PMPADDR13); + case 14: + return read_csr_num(CSR_PMPADDR14); + case 15: + return read_csr_num(CSR_PMPADDR15); + default: + return 0; + } +} + +/* Write PMP address register by index (0-15) */ +static void write_pmpaddr(uint8_t idx, uint32_t val) +{ + switch (idx) { + case 0: + write_csr_num(CSR_PMPADDR0, val); + break; + case 1: + write_csr_num(CSR_PMPADDR1, val); + break; + case 2: + write_csr_num(CSR_PMPADDR2, val); + break; + case 3: + write_csr_num(CSR_PMPADDR3, val); + break; + case 4: + write_csr_num(CSR_PMPADDR4, val); + break; + case 5: + write_csr_num(CSR_PMPADDR5, val); + break; + case 6: + write_csr_num(CSR_PMPADDR6, val); + break; + case 7: + write_csr_num(CSR_PMPADDR7, val); + break; + case 8: + write_csr_num(CSR_PMPADDR8, val); + break; + case 9: + write_csr_num(CSR_PMPADDR9, val); + break; + case 10: + write_csr_num(CSR_PMPADDR10, val); + break; + case 11: + write_csr_num(CSR_PMPADDR11, val); + break; + case 12: + write_csr_num(CSR_PMPADDR12, val); + break; + case 13: + write_csr_num(CSR_PMPADDR13, val); + break; + case 14: + write_csr_num(CSR_PMPADDR14, val); + break; + case 15: + write_csr_num(CSR_PMPADDR15, val); + break; + } +} + /* Static Memory Pools for Boot-time PMP Initialization * * Defines kernel memory regions protected at boot. Each pool specifies From 890d80197cba8af606d3541390dac44932244543 Mon Sep 17 00:00:00 2001 From: HeatCrab Date: Sun, 2 Nov 2025 21:59:05 +0800 Subject: [PATCH 09/15] Implement PMP region management API Provides a complete set of functions for managing Physical Memory Protection regions in TOR mode, maintaining shadow configuration state synchronized with hardware CSRs. Hardware initialization clears all PMP regions by zeroing address and configuration registers, then initializes shadow state with default values for each region slot. This establishes clean hardware and software state for subsequent region configuration. Region configuration validates that the address range is valid and the region is not locked, then constructs configuration bytes with TOR addressing mode and permission bits. Both hardware CSRs and shadow state are updated atomically, with optional locking to prevent further modification. A helper function computes configuration register index and bit offset from region index, eliminating code duplication across multiple operations. Region disabling clears the configuration byte to remove protection while preserving other regions in the same configuration register. Region locking sets the lock bit to prevent modification until hardware reset. Region retrieval reads address range, permissions, priority, and lock status from shadow configuration. Access verification checks whether a memory operation falls within configured region boundaries by comparing address and size, then validates that region permissions match the requested operation type. Address register read helpers are marked unused as the shadow state design eliminates the need to read hardware registers during normal operation. They remain available for potential future use cases requiring hardware state verification. --- arch/riscv/pmp.c | 237 ++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 235 insertions(+), 2 deletions(-) diff --git a/arch/riscv/pmp.c b/arch/riscv/pmp.c index 79b5615e..b56dd7b8 100644 --- a/arch/riscv/pmp.c +++ b/arch/riscv/pmp.c @@ -55,8 +55,13 @@ static void write_pmpcfg(uint8_t idx, uint32_t val) } } -/* Read PMP address register by index (0-15) */ -static uint32_t read_pmpaddr(uint8_t idx) +/* Read PMP address register by index (0-15) + * + * Currently unused as the implementation maintains shadow state in memory + * rather than reading hardware registers. Provided for API completeness + * and potential future use cases requiring hardware state verification. + */ +static uint32_t __attribute__((unused)) read_pmpaddr(uint8_t idx) { switch (idx) { case 0: @@ -190,11 +195,46 @@ static const mempool_t kernel_mempools[] = { /* Global PMP configuration (shadow of hardware state) */ static pmp_config_t pmp_global_config; +/* Helper to compute pmpcfg register index and bit offset for a given region */ +static inline void pmp_get_cfg_indices(uint8_t region_idx, + uint8_t *cfg_idx, + uint8_t *cfg_offset) +{ + *cfg_idx = region_idx / 4; + *cfg_offset = (region_idx % 4) * 8; +} + pmp_config_t *pmp_get_config(void) { return &pmp_global_config; } +int32_t pmp_init(pmp_config_t *config) +{ + if (!config) + return ERR_PMP_INVALID_REGION; + + /* Clear all PMP regions in hardware and shadow configuration */ + for (uint8_t i = 0; i < PMP_MAX_REGIONS; i++) { + write_pmpaddr(i, 0); + if (i % 4 == 0) + write_pmpcfg(i / 4, 0); + + config->regions[i].addr_start = 0; + config->regions[i].addr_end = 0; + config->regions[i].permissions = 0; + config->regions[i].priority = PMP_PRIORITY_TEMPORARY; + config->regions[i].region_id = i; + config->regions[i].locked = 0; + } + + config->region_count = 0; + config->next_region_idx = 0; + config->initialized = 1; + + return ERR_OK; +} + int32_t pmp_init_pools(pmp_config_t *config, const mempool_t *pools, size_t count) @@ -238,3 +278,196 @@ int32_t pmp_init_kernel(pmp_config_t *config) { return pmp_init_pools(config, kernel_mempools, KERNEL_MEMPOOL_COUNT); } + +int32_t pmp_set_region(pmp_config_t *config, const pmp_region_t *region) +{ + if (!config || !region) + return ERR_PMP_INVALID_REGION; + + /* Validate region index is within bounds */ + if (region->region_id >= PMP_MAX_REGIONS) + return ERR_PMP_INVALID_REGION; + + /* Validate address range */ + if (region->addr_start >= region->addr_end) + return ERR_PMP_ADDR_RANGE; + + /* Check if region is already locked */ + if (config->regions[region->region_id].locked) + return ERR_PMP_LOCKED; + + uint8_t region_idx = region->region_id; + uint8_t pmpcfg_idx, pmpcfg_offset; + pmp_get_cfg_indices(region_idx, &pmpcfg_idx, &pmpcfg_offset); + + /* Build configuration byte with TOR mode and permissions */ + uint8_t pmpcfg_perm = + region->permissions & (PMPCFG_R | PMPCFG_W | PMPCFG_X); + uint8_t pmpcfg_byte = PMPCFG_A_TOR | pmpcfg_perm; + if (region->locked) + pmpcfg_byte |= PMPCFG_L; + + /* Read current pmpcfg register to preserve other regions */ + uint32_t pmpcfg_val = read_pmpcfg(pmpcfg_idx); + + /* Clear the configuration byte for this region */ + pmpcfg_val &= ~(0xFFU << pmpcfg_offset); + + /* Write new configuration byte */ + pmpcfg_val |= (pmpcfg_byte << pmpcfg_offset); + + /* Write pmpaddr register with the upper boundary */ + write_pmpaddr(region_idx, region->addr_end); + + /* Write pmpcfg register with updated configuration */ + write_pmpcfg(pmpcfg_idx, pmpcfg_val); + + /* Update shadow configuration */ + config->regions[region_idx].addr_start = region->addr_start; + config->regions[region_idx].addr_end = region->addr_end; + config->regions[region_idx].permissions = region->permissions; + config->regions[region_idx].priority = region->priority; + config->regions[region_idx].region_id = region_idx; + config->regions[region_idx].locked = region->locked; + + /* Update region count if this is a newly used region */ + if (region_idx >= config->region_count) + config->region_count = region_idx + 1; + + return ERR_OK; +} + +int32_t pmp_disable_region(pmp_config_t *config, uint8_t region_idx) +{ + if (!config) + return ERR_PMP_INVALID_REGION; + + /* Validate region index is within bounds */ + if (region_idx >= PMP_MAX_REGIONS) + return ERR_PMP_INVALID_REGION; + + /* Check if region is already locked */ + if (config->regions[region_idx].locked) + return ERR_PMP_LOCKED; + + uint8_t pmpcfg_idx, pmpcfg_offset; + pmp_get_cfg_indices(region_idx, &pmpcfg_idx, &pmpcfg_offset); + + /* Read current pmpcfg register to preserve other regions */ + uint32_t pmpcfg_val = read_pmpcfg(pmpcfg_idx); + + /* Clear the configuration byte for this region (disables it) */ + pmpcfg_val &= ~(0xFFU << pmpcfg_offset); + + /* Write pmpcfg register with updated configuration */ + write_pmpcfg(pmpcfg_idx, pmpcfg_val); + + /* Update shadow configuration */ + config->regions[region_idx].addr_start = 0; + config->regions[region_idx].addr_end = 0; + config->regions[region_idx].permissions = 0; + + return ERR_OK; +} + +int32_t pmp_lock_region(pmp_config_t *config, uint8_t region_idx) +{ + if (!config) + return ERR_PMP_INVALID_REGION; + + /* Validate region index is within bounds */ + if (region_idx >= PMP_MAX_REGIONS) + return ERR_PMP_INVALID_REGION; + + uint8_t pmpcfg_idx, pmpcfg_offset; + pmp_get_cfg_indices(region_idx, &pmpcfg_idx, &pmpcfg_offset); + + /* Read current pmpcfg register to preserve other regions */ + uint32_t pmpcfg_val = read_pmpcfg(pmpcfg_idx); + + /* Get current configuration byte for this region */ + uint8_t pmpcfg_byte = (pmpcfg_val >> pmpcfg_offset) & 0xFFU; + + /* Set lock bit */ + pmpcfg_byte |= PMPCFG_L; + + /* Clear the configuration byte for this region */ + pmpcfg_val &= ~(0xFFU << pmpcfg_offset); + + /* Write new configuration byte with lock bit set */ + pmpcfg_val |= (pmpcfg_byte << pmpcfg_offset); + + /* Write pmpcfg register with updated configuration */ + write_pmpcfg(pmpcfg_idx, pmpcfg_val); + + /* Update shadow configuration */ + config->regions[region_idx].locked = 1; + + return ERR_OK; +} + +int32_t pmp_get_region(const pmp_config_t *config, + uint8_t region_idx, + pmp_region_t *region) +{ + if (!config || !region) + return ERR_PMP_INVALID_REGION; + + /* Validate region index is within bounds */ + if (region_idx >= PMP_MAX_REGIONS) + return ERR_PMP_INVALID_REGION; + + uint8_t pmpcfg_idx, pmpcfg_offset; + pmp_get_cfg_indices(region_idx, &pmpcfg_idx, &pmpcfg_offset); + + /* Read the address and configuration from shadow configuration */ + region->addr_start = config->regions[region_idx].addr_start; + region->addr_end = config->regions[region_idx].addr_end; + region->permissions = config->regions[region_idx].permissions; + region->priority = config->regions[region_idx].priority; + region->region_id = region_idx; + region->locked = config->regions[region_idx].locked; + + return ERR_OK; +} + +int32_t pmp_check_access(const pmp_config_t *config, + uint32_t addr, + uint32_t size, + uint8_t is_write, + uint8_t is_execute) +{ + if (!config) + return ERR_PMP_INVALID_REGION; + + uint32_t access_end = addr + size; + + /* In TOR mode, check all regions in priority order */ + for (uint8_t i = 0; i < config->region_count; i++) { + const pmp_region_t *region = &config->regions[i]; + + /* Skip disabled regions */ + if (region->addr_start == 0 && region->addr_end == 0) + continue; + + /* Check if access falls within this region */ + if (addr >= region->addr_start && access_end <= region->addr_end) { + /* Verify permissions match access type */ + uint8_t required_perm = 0; + if (is_write) + required_perm |= PMPCFG_W; + if (is_execute) + required_perm |= PMPCFG_X; + if (!is_write && !is_execute) + required_perm = PMPCFG_R; + + if ((region->permissions & required_perm) == required_perm) + return 1; /* Access allowed */ + else + return 0; /* Access denied */ + } + } + + /* Access not covered by any region */ + return 0; +} From c7832f6827037d7dac0656fbbf4a6f516b8c528b Mon Sep 17 00:00:00 2001 From: HeatCrab Date: Mon, 3 Nov 2025 17:14:57 +0800 Subject: [PATCH 10/15] Handle PMP access faults with dynamic region loading When a task accesses memory not currently loaded in a PMP region, the hardware raises an access fault. Rather than panicking, we now attempt recovery by dynamically loading the required region, enabling tasks to access more memory than can fit simultaneously in the 16 hardware regions. The ISR captures and passes the faulting memory address from the mtval CSR to the trap handler. The handler uses this address to locate the corresponding flexpage in the task's memory space. If all regions are in use, we select a victim and evict it to make space. This required exposing internal region management functions so the handler can invoke them, and simplifying documentation at implementation sites since detailed documentation now resides in headers. --- arch/riscv/boot.c | 4 ++-- arch/riscv/hal.c | 11 +++++++++++ arch/riscv/pmp.c | 38 ++++++++++++++++++++++++++++++++++++++ arch/riscv/pmp.h | 12 ++++++++++++ include/sys/memprot.h | 23 +++++++++++++++++++++++ kernel/memprot.c | 31 +++++-------------------------- 6 files changed, 91 insertions(+), 28 deletions(-) diff --git a/arch/riscv/boot.c b/arch/riscv/boot.c index 8e46f4c9..f268c66e 100644 --- a/arch/riscv/boot.c +++ b/arch/riscv/boot.c @@ -16,7 +16,7 @@ extern uint32_t _sbss, _ebss; /* C entry points */ void main(void); -void do_trap(uint32_t cause, uint32_t epc); +void do_trap(uint32_t cause, uint32_t epc, uint32_t mtval); void hal_panic(void); /* Machine-mode entry point ('_entry'). This is the first code executed on @@ -120,7 +120,7 @@ __attribute__((naked, aligned(4))) void _isr(void) * 48: a4, 52: a5, 56: a6, 60: a7, 64: s2, 68: s3 * 72: s4, 76: s5, 80: s6, 84: s7, 88: s8, 92: s9 * 96: s10, 100:s11, 104:t3, 108: t4, 112: t5, 116: t6 - * 120: mcause, 124: mepc + * 120: mcause, 124: mepc, 128: mtval */ "sw ra, 0*4(sp)\n" "sw gp, 1*4(sp)\n" diff --git a/arch/riscv/hal.c b/arch/riscv/hal.c index 7ad5806f..6af30560 100644 --- a/arch/riscv/hal.c +++ b/arch/riscv/hal.c @@ -3,6 +3,7 @@ #include #include "csr.h" +#include "pmp.h" #include "private/stdio.h" #include "private/utils.h" @@ -427,6 +428,16 @@ uint32_t do_trap(uint32_t cause, uint32_t epc, uint32_t isr_sp) return pending_switch_sp ? (uint32_t) pending_switch_sp : isr_sp; } + /* Attempt to recover PMP access faults (code 5 = load fault, 7 = store + * fault) */ + if (code == 5 || code == 7) { + uint32_t mtval = read_csr(mtval); + if (pmp_handle_access_fault(mtval, code == 7) == 0) { + /* PMP fault handled successfully, return current frame */ + return isr_sp; + } + } + /* Print exception info via direct UART (safe in trap context) */ trap_puts("[EXCEPTION] "); if (code < ARRAY_SIZE(exc_msg) && exc_msg[code]) diff --git a/arch/riscv/pmp.c b/arch/riscv/pmp.c index b56dd7b8..dc0b94d8 100644 --- a/arch/riscv/pmp.c +++ b/arch/riscv/pmp.c @@ -4,6 +4,7 @@ */ #include +#include #include "csr.h" #include "pmp.h" @@ -471,3 +472,40 @@ int32_t pmp_check_access(const pmp_config_t *config, /* Access not covered by any region */ return 0; } + +int32_t pmp_handle_access_fault(uint32_t fault_addr, uint8_t is_write) +{ + if (!kcb || !kcb->task_current || !kcb->task_current->data) + return -1; + + memspace_t *mspace = ((tcb_t *) kcb->task_current->data)->mspace; + if (!mspace) + return -1; + + /* Find flexpage containing faulting address */ + fpage_t *target_fpage = NULL; + for (fpage_t *fp = mspace->first; fp; fp = fp->as_next) { + if (fault_addr >= fp->base && fault_addr < (fp->base + fp->size)) { + target_fpage = fp; + break; + } + } + + if (!target_fpage || target_fpage->pmp_id != 0) + return -1; + + pmp_config_t *config = pmp_get_config(); + if (!config) + return -1; + + /* Load into available region or evict victim */ + if (config->next_region_idx < PMP_MAX_REGIONS) + return pmp_load_fpage(target_fpage, config->next_region_idx); + + fpage_t *victim = select_victim_fpage(mspace); + if (!victim) + return -1; + + int32_t ret = pmp_evict_fpage(victim); + return (ret == 0) ? pmp_load_fpage(target_fpage, victim->pmp_id) : ret; +} diff --git a/arch/riscv/pmp.h b/arch/riscv/pmp.h index 1bcc8051..6d9319ee 100644 --- a/arch/riscv/pmp.h +++ b/arch/riscv/pmp.h @@ -113,3 +113,15 @@ int32_t pmp_init_pools(pmp_config_t *config, * Returns 0 on success, or negative error code on failure. */ int32_t pmp_init_kernel(pmp_config_t *config); + +/* Handles PMP access violations (exception codes 5 and 7). + * + * Attempts to recover from PMP access faults by loading the required memory + * region into a hardware PMP region. If all 16 regions are in use, selects a + * victim for eviction and reuses its region. + * + * @fault_addr : The faulting memory address (from mtval CSR) + * @is_write : 1 for store/AMO access (exception code 7), 0 for load (code 5) + * Returns 0 on successful recovery, negative error code on failure. + */ +int32_t pmp_handle_access_fault(uint32_t fault_addr, uint8_t is_write); diff --git a/include/sys/memprot.h b/include/sys/memprot.h index b1cb41c5..ac85d00f 100644 --- a/include/sys/memprot.h +++ b/include/sys/memprot.h @@ -105,3 +105,26 @@ memspace_t *mo_memspace_create(uint32_t as_id, uint32_t shared); * @mspace : Pointer to memory space to destroy */ void mo_memspace_destroy(memspace_t *mspace); + +/* PMP Hardware Loading Functions */ + +/* Loads a flexpage into a PMP hardware region. + * @fpage : Pointer to flexpage to load + * @region_idx : Hardware PMP region index (0-15) + * Returns 0 on success, or negative error code on failure. + */ +int32_t pmp_load_fpage(fpage_t *fpage, uint8_t region_idx); + +/* Evicts a flexpage from its PMP hardware region. + * @fpage : Pointer to flexpage to evict + * Returns 0 on success, or negative error code on failure. + */ +int32_t pmp_evict_fpage(fpage_t *fpage); + +/* Victim Selection for PMP Region Eviction + * + * Selects a flexpage for eviction using priority-based algorithm. + * @mspace : Pointer to memory space + * Returns pointer to victim flexpage, or NULL if no evictable page found. + */ +fpage_t *select_victim_fpage(memspace_t *mspace); diff --git a/kernel/memprot.c b/kernel/memprot.c index 3f2bfbbf..71298b45 100644 --- a/kernel/memprot.c +++ b/kernel/memprot.c @@ -44,11 +44,7 @@ void mo_fpage_destroy(fpage_t *fpage) free(fpage); } -/* Selects victim flexpage for eviction using priority-based algorithm. - * - * @mspace : Pointer to memory space - * Returns pointer to victim flexpage, or NULL if no evictable page found. - */ +/* Selects victim flexpage for eviction using priority-based algorithm */ fpage_t *select_victim_fpage(memspace_t *mspace) { if (!mspace) @@ -69,12 +65,7 @@ fpage_t *select_victim_fpage(memspace_t *mspace) return victim; } -/* Loads a flexpage into a PMP hardware region. - * - * @fpage : Pointer to flexpage to load - * @region_idx : Hardware PMP region index (0-15) - * Returns 0 on success, or negative error code on failure. - */ +/* Loads a flexpage into a PMP hardware region */ int32_t pmp_load_fpage(fpage_t *fpage, uint8_t region_idx) { if (!fpage) @@ -102,11 +93,7 @@ int32_t pmp_load_fpage(fpage_t *fpage, uint8_t region_idx) return ret; } -/* Evicts a flexpage from its PMP hardware region. - * - * @fpage : Pointer to flexpage to evict - * Returns 0 on success, or negative error code on failure. - */ +/* Evicts a flexpage from its PMP hardware region */ int32_t pmp_evict_fpage(fpage_t *fpage) { if (!fpage) @@ -128,12 +115,7 @@ int32_t pmp_evict_fpage(fpage_t *fpage) return ret; } -/* Creates and initializes a memory space. - * - * @as_id : Memory space identifier - * @shared : Whether this space can be shared across tasks - * Returns pointer to created memory space, or NULL on failure. - */ +/* Creates and initializes a memory space */ memspace_t *mo_memspace_create(uint32_t as_id, uint32_t shared) { memspace_t *mspace = malloc(sizeof(memspace_t)); @@ -149,10 +131,7 @@ memspace_t *mo_memspace_create(uint32_t as_id, uint32_t shared) return mspace; } -/* Destroys a memory space and all its flexpages. - * - * @mspace : Pointer to memory space to destroy - */ +/* Destroys a memory space and all its flexpages */ void mo_memspace_destroy(memspace_t *mspace) { if (!mspace) From 9aac19df3ebd87c05ab5137f6c38a731128926c4 Mon Sep 17 00:00:00 2001 From: HeatCrab Date: Thu, 6 Nov 2025 17:51:44 +0800 Subject: [PATCH 11/15] Enable PMP hardware at boot time Configure memory protection for kernel text, data, BSS, heap, and stack regions during hardware initialization. Halt on setup failure. --- arch/riscv/hal.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/arch/riscv/hal.c b/arch/riscv/hal.c index 6af30560..7990677e 100644 --- a/arch/riscv/hal.c +++ b/arch/riscv/hal.c @@ -274,6 +274,12 @@ static void uart_init(uint32_t baud) void hal_hardware_init(void) { uart_init(USART_BAUD); + + /* Initialize PMP hardware with kernel memory regions */ + pmp_config_t *pmp_config = pmp_get_config(); + if (pmp_init_kernel(pmp_config) != 0) + hal_panic(); + /* Set the first timer interrupt. Subsequent interrupts are set in ISR */ mtimecmp_w(mtime_r() + (F_CPU / F_TIMER)); /* Install low-level I/O handlers for the C standard library */ From 4d8a3b24ed3ac6ee0aa3a5a0f877cc74f3268375 Mon Sep 17 00:00:00 2001 From: HeatCrab Date: Thu, 6 Nov 2025 18:02:15 +0800 Subject: [PATCH 12/15] Implement PMP context switching for task isolation Add per-task memory space switching during context transitions. Evicts old task's dynamic regions and loads new task's regions into available hardware slots while preserving locked kernel regions. --- arch/riscv/pmp.c | 36 ++++++++++++++++++++++++++++++++++++ arch/riscv/pmp.h | 12 ++++++++++++ 2 files changed, 48 insertions(+) diff --git a/arch/riscv/pmp.c b/arch/riscv/pmp.c index dc0b94d8..12ec8107 100644 --- a/arch/riscv/pmp.c +++ b/arch/riscv/pmp.c @@ -509,3 +509,39 @@ int32_t pmp_handle_access_fault(uint32_t fault_addr, uint8_t is_write) int32_t ret = pmp_evict_fpage(victim); return (ret == 0) ? pmp_load_fpage(target_fpage, victim->pmp_id) : ret; } + +int32_t pmp_switch_context(memspace_t *old_mspace, memspace_t *new_mspace) +{ + if (old_mspace == new_mspace) + return 0; + + pmp_config_t *config = pmp_get_config(); + if (!config) + return -1; + + /* Evict old task's dynamic regions */ + if (old_mspace) { + for (fpage_t *fp = old_mspace->pmp_first; fp; fp = fp->pmp_next) { + uint8_t region_id = fp->pmp_id; + if (region_id != 0 && !config->regions[region_id].locked) { + pmp_disable_region(config, region_id); + fp->pmp_id = 0; + } + } + } + + /* Load new task's regions into available slots */ + if (new_mspace) { + uint8_t available_slots = PMP_MAX_REGIONS - config->region_count; + uint8_t loaded_count = 0; + + for (fpage_t *fp = new_mspace->first; + fp && loaded_count < available_slots; fp = fp->as_next) { + uint8_t region_idx = config->region_count + loaded_count; + if (pmp_load_fpage(fp, region_idx) == 0) + loaded_count++; + } + } + + return 0; +} diff --git a/arch/riscv/pmp.h b/arch/riscv/pmp.h index 6d9319ee..98cd06d6 100644 --- a/arch/riscv/pmp.h +++ b/arch/riscv/pmp.h @@ -125,3 +125,15 @@ int32_t pmp_init_kernel(pmp_config_t *config); * Returns 0 on successful recovery, negative error code on failure. */ int32_t pmp_handle_access_fault(uint32_t fault_addr, uint8_t is_write); + +/* Switches PMP configuration during task context switch. + * + * Evicts the old task's dynamic regions from hardware and loads the new + * task's regions into available PMP slots. Kernel regions marked as locked + * are preserved across all context switches. + * + * @old_mspace : Memory space of task being switched out (can be NULL) + * @new_mspace : Memory space of task being switched in (can be NULL) + * Returns 0 on success, negative error code on failure. + */ +int32_t pmp_switch_context(memspace_t *old_mspace, memspace_t *new_mspace); From b6ab70181249e299e317c70664d5efabea347921 Mon Sep 17 00:00:00 2001 From: HeatCrab Date: Sun, 9 Nov 2025 22:03:57 +0800 Subject: [PATCH 13/15] Integrate PMP context switching into dispatcher Switch memory protection configuration during task context switches for both preemptive and cooperative scheduling. The old task's memory space is captured before the scheduler updates its internal state, allowing both old and new memory spaces to be passed to the PMP switching logic. --- kernel/task.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/kernel/task.c b/kernel/task.c index c9973e19..21b3c45f 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -8,6 +8,7 @@ #include #include #include +#include #include #include "private/error.h" @@ -616,6 +617,9 @@ void dispatch(void) next_task->state = TASK_RUNNING; next_task->time_slice = get_priority_timeslice(next_task->prio_level); + /* Switch PMP configuration if tasks have different memory spaces */ + pmp_switch_context(prev_task->mspace, next_task->mspace); + /* Perform context switch based on scheduling mode */ if (kcb->preemptive) { /* Same task - no context switch needed */ @@ -675,7 +679,15 @@ void yield(void) /* In cooperative mode, delays are only processed on an explicit yield. */ list_foreach(kcb->tasks, delay_update, NULL); + /* Save current task before scheduler modifies task_current */ + tcb_t *prev_task = (tcb_t *) kcb->task_current->data; + sched_select_next_task(); /* Use O(1) priority scheduler */ + + /* Switch PMP configuration if tasks have different memory spaces */ + tcb_t *next_task = (tcb_t *) kcb->task_current->data; + pmp_switch_context(prev_task->mspace, next_task->mspace); + hal_context_restore(((tcb_t *) kcb->task_current->data)->context, 1); } From 6e39edc84eb9393cb48dde6357a94a1368cec45a Mon Sep 17 00:00:00 2001 From: HeatCrab Date: Tue, 11 Nov 2025 00:53:40 +0800 Subject: [PATCH 14/15] Initialize per-task memory spaces at creation Allocate a dedicated memory space for each task and register the task stack as a flexpage. This establishes the memory protection metadata that will be loaded into hardware regions during context switches. --- arch/riscv/pmp.h | 2 ++ kernel/task.c | 24 ++++++++++++++++++++++++ 2 files changed, 26 insertions(+) diff --git a/arch/riscv/pmp.h b/arch/riscv/pmp.h index 98cd06d6..e1f164f7 100644 --- a/arch/riscv/pmp.h +++ b/arch/riscv/pmp.h @@ -10,6 +10,8 @@ #include #include +#include "csr.h" + /* PMP Region Priority Levels (lower value = higher priority) * * Used for eviction decisions when hardware PMP regions are exhausted. diff --git a/kernel/task.c b/kernel/task.c index 21b3c45f..a9519b18 100644 --- a/kernel/task.c +++ b/kernel/task.c @@ -760,6 +760,30 @@ static int32_t task_spawn_impl(void *task_entry, panic(ERR_STACK_ALLOC); } + /* Create memory space for task */ + tcb->mspace = mo_memspace_create(kcb->next_tid, 0); + if (!tcb->mspace) { + free(tcb->stack); + free(tcb); + panic(ERR_TCB_ALLOC); + } + + /* Register stack as flexpage */ + fpage_t *stack_fpage = + mo_fpage_create((uint32_t) tcb->stack, new_stack_size, + PMPCFG_R | PMPCFG_W, PMP_PRIORITY_STACK); + if (!stack_fpage) { + mo_memspace_destroy(tcb->mspace); + free(tcb->stack); + free(tcb); + panic(ERR_TCB_ALLOC); + } + + /* Add stack to memory space */ + stack_fpage->as_next = tcb->mspace->first; + tcb->mspace->first = stack_fpage; + tcb->mspace->pmp_stack = stack_fpage; + /* Minimize critical section duration */ CRITICAL_ENTER(); From fa5a5880a1b8dd2b0a619cf4acc37cfdb18f1d9d Mon Sep 17 00:00:00 2001 From: HeatCrab Date: Sun, 16 Nov 2025 15:59:06 +0800 Subject: [PATCH 15/15] Add PMP context switching test Validate PMP hardware configuration during task context switches by reading CSRs directly. Tests verify that kernel regions remain loaded and PMP state persists correctly across switches. Since linmo runs in M-mode only, PMP cannot enforce access restrictions. The test focuses on infrastructure correctness: CSR configuration, context switching mechanics, and flexpage metadata management. Test results: 30/30 checks pass. PMP CSRs show correct configuration with kernel regions loaded at expected addresses. --- Makefile | 2 +- app/pmp.c | 336 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 337 insertions(+), 1 deletion(-) create mode 100644 app/pmp.c diff --git a/Makefile b/Makefile index c780c30b..b39d82f1 100644 --- a/Makefile +++ b/Makefile @@ -29,7 +29,7 @@ deps += $(LIB_OBJS:%.o=%.o.d) APPS := coop echo hello mqueues semaphore mutex cond \ pipes pipes_small pipes_struct prodcons progress \ rtsched suspend test64 timer timer_kill \ - cpubench test_libc umode + cpubench test_libc umode pmp # Output files for __link target IMAGE_BASE := $(BUILD_DIR)/image diff --git a/app/pmp.c b/app/pmp.c new file mode 100644 index 00000000..5c0e76ac --- /dev/null +++ b/app/pmp.c @@ -0,0 +1,336 @@ +/* PMP Context Switching Test + * + * Validates that PMP hardware configuration is correctly managed during + * task context switches. Tests CSR configuration, region loading/unloading, + * and flexpage metadata maintenance. + */ + +#include + +#include "private/error.h" + +/* Test configuration */ +#define MAX_ITERATIONS 5 + +/* Test state counters */ +static int tests_passed = 0; +static int tests_failed = 0; + +/* External kernel symbols */ +extern uint32_t _stext, _etext; +extern uint32_t _sdata, _edata; +extern uint32_t _sbss, _ebss; + +/* Helper to read PMP configuration CSR */ +static inline uint32_t read_pmpcfg0(void) +{ + uint32_t val; + asm volatile("csrr %0, 0x3A0" : "=r"(val)); + return val; +} + +/* Helper to read PMP address CSR */ +static inline uint32_t read_pmpaddr(int idx) +{ + uint32_t val; + switch (idx) { + case 0: + asm volatile("csrr %0, 0x3B0" : "=r"(val)); + break; + case 1: + asm volatile("csrr %0, 0x3B1" : "=r"(val)); + break; + case 2: + asm volatile("csrr %0, 0x3B2" : "=r"(val)); + break; + case 3: + asm volatile("csrr %0, 0x3B3" : "=r"(val)); + break; + case 4: + asm volatile("csrr %0, 0x3B4" : "=r"(val)); + break; + case 5: + asm volatile("csrr %0, 0x3B5" : "=r"(val)); + break; + default: + val = 0; + break; + } + return val; +} + +/* Test Task A: Verify PMP CSR configuration */ +void task_a(void) +{ + printf("Task A (ID %d) starting...\n", mo_task_id()); + + for (int i = 0; i < MAX_ITERATIONS; i++) { + printf("Task A: Iteration %d\n", i + 1); + + /* Test A1: Read PMP configuration registers */ + uint32_t pmpcfg0 = read_pmpcfg0(); + printf("Task A: pmpcfg0 = 0x%08x\n", (unsigned int) pmpcfg0); + + if (pmpcfg0 != 0) { + printf("Task A: PASS - PMP configuration is active\n"); + tests_passed++; + } else { + printf("Task A: FAIL - PMP configuration is zero\n"); + tests_failed++; + } + + /* Test A2: Read kernel region addresses */ + uint32_t pmpaddr0 = read_pmpaddr(0); + uint32_t pmpaddr1 = read_pmpaddr(1); + printf("Task A: pmpaddr0 = 0x%08x, pmpaddr1 = 0x%08x\n", + (unsigned int) pmpaddr0, (unsigned int) pmpaddr1); + + if (pmpaddr0 != 0 || pmpaddr1 != 0) { + printf("Task A: PASS - Kernel regions configured\n"); + tests_passed++; + } else { + printf("Task A: FAIL - Kernel regions not configured\n"); + tests_failed++; + } + + /* Test A3: Verify stack accessibility */ + int local_var = 0xAAAA; + volatile int *stack_ptr = &local_var; + int read_val = *stack_ptr; + + if (read_val == 0xAAAA) { + printf("Task A: PASS - Stack accessible\n"); + tests_passed++; + } else { + printf("Task A: FAIL - Stack not accessible\n"); + tests_failed++; + } + + for (int j = 0; j < 3; j++) + mo_task_yield(); + } + + printf("Task A completed with %d passed, %d failed\n", tests_passed, + tests_failed); + + while (1) { + for (int i = 0; i < 10; i++) + mo_task_yield(); + } +} + +/* Test Task B: Verify PMP state after context switch */ +void task_b(void) +{ + printf("Task B (ID %d) starting...\n", mo_task_id()); + + for (int i = 0; i < MAX_ITERATIONS; i++) { + printf("Task B: Iteration %d\n", i + 1); + + /* Test B1: Verify PMP configuration persists across switches */ + uint32_t pmpcfg0 = read_pmpcfg0(); + printf("Task B: pmpcfg0 = 0x%08x\n", (unsigned int) pmpcfg0); + + if (pmpcfg0 != 0) { + printf("Task B: PASS - PMP active after context switch\n"); + tests_passed++; + } else { + printf("Task B: FAIL - PMP inactive after switch\n"); + tests_failed++; + } + + /* Test B2: Verify own stack is accessible */ + int local_var = 0xBBBB; + if (local_var == 0xBBBB) { + printf("Task B: PASS - Stack accessible\n"); + tests_passed++; + } else { + printf("Task B: FAIL - Stack not accessible\n"); + tests_failed++; + } + + /* Test B3: Check kernel regions still configured */ + uint32_t pmpaddr0 = read_pmpaddr(0); + if (pmpaddr0 != 0) { + printf("Task B: PASS - Kernel regions preserved\n"); + tests_passed++; + } else { + printf("Task B: FAIL - Kernel regions lost\n"); + tests_failed++; + } + + for (int j = 0; j < 3; j++) + mo_task_yield(); + } + + printf("Task B completed with %d passed, %d failed\n", tests_passed, + tests_failed); + + while (1) { + for (int i = 0; i < 10; i++) + mo_task_yield(); + } +} + +/* Test Task C: Verify PMP CSR consistency */ +void task_c(void) +{ + printf("Task C (ID %d) starting...\n", mo_task_id()); + + for (int i = 0; i < MAX_ITERATIONS; i++) { + printf("Task C: Iteration %d\n", i + 1); + + /* Test C1: Comprehensive CSR check */ + uint32_t pmpcfg0 = read_pmpcfg0(); + uint32_t pmpaddr0 = read_pmpaddr(0); + uint32_t pmpaddr1 = read_pmpaddr(1); + uint32_t pmpaddr2 = read_pmpaddr(2); + + printf( + "Task C: CSR state: cfg0=0x%08x addr0=0x%08x addr1=0x%08x " + "addr2=0x%08x\n", + (unsigned int) pmpcfg0, (unsigned int) pmpaddr0, + (unsigned int) pmpaddr1, (unsigned int) pmpaddr2); + + bool csr_configured = (pmpcfg0 != 0) && (pmpaddr0 != 0); + if (csr_configured) { + printf("Task C: PASS - PMP CSRs properly configured\n"); + tests_passed++; + } else { + printf("Task C: FAIL - PMP CSRs not configured\n"); + tests_failed++; + } + + /* Test C2: Stack operations */ + int test_array[5]; + for (int j = 0; j < 5; j++) + test_array[j] = j; + + int sum = 0; + for (int j = 0; j < 5; j++) + sum += test_array[j]; + + if (sum == 10) { + printf("Task C: PASS - Stack array operations\n"); + tests_passed++; + } else { + printf("Task C: FAIL - Stack array operations\n"); + tests_failed++; + } + + for (int j = 0; j < 3; j++) + mo_task_yield(); + } + + printf("Task C completed with %d passed, %d failed\n", tests_passed, + tests_failed); + + while (1) { + for (int i = 0; i < 10; i++) + mo_task_yield(); + } +} + +/* Monitor task validates test results */ +void monitor_task(void) +{ + printf("Monitor starting...\n"); + printf("Testing PMP CSR configuration and context switching:\n"); + printf(" Kernel text: %p - %p\n", (void *) &_stext, (void *) &_etext); + printf(" Kernel data: %p - %p\n", (void *) &_sdata, (void *) &_edata); + printf(" Kernel bss: %p - %p\n\n", (void *) &_sbss, (void *) &_ebss); + + /* Read initial PMP state */ + uint32_t initial_pmpcfg0 = read_pmpcfg0(); + uint32_t initial_pmpaddr0 = read_pmpaddr(0); + printf("Monitor: Initial PMP state:\n"); + printf(" pmpcfg0 = 0x%08x\n", (unsigned int) initial_pmpcfg0); + printf(" pmpaddr0 = 0x%08x\n\n", (unsigned int) initial_pmpaddr0); + + int cycles = 0; + + while (cycles < 100) { + cycles++; + + if (cycles % 20 == 0) { + printf("Monitor: Cycle %d - Passed=%d, Failed=%d\n", cycles, + tests_passed, tests_failed); + + /* Periodic CSR check */ + uint32_t current_pmpcfg0 = read_pmpcfg0(); + printf("Monitor: Current pmpcfg0 = 0x%08x\n", + (unsigned int) current_pmpcfg0); + } + + /* Check if all tasks completed */ + if (tests_passed >= (3 * MAX_ITERATIONS * 2) && tests_failed == 0) { + printf("Monitor: All tasks completed successfully\n"); + break; + } + + for (int i = 0; i < 5; i++) + mo_task_yield(); + } + + /* Final report */ + printf("\n=== FINAL RESULTS ===\n"); + printf("Tests passed: %d\n", tests_passed); + printf("Tests failed: %d\n", tests_failed); + + /* Test validation */ + bool all_passed = (tests_failed == 0); + bool good_coverage = (tests_passed >= (3 * MAX_ITERATIONS * 2)); + bool pmp_active = (read_pmpcfg0() != 0); + + printf("\nTest Results:\n"); + printf("All tests passed: %s\n", all_passed ? "PASS" : "FAIL"); + printf("Test coverage: %s\n", good_coverage ? "PASS" : "FAIL"); + printf("PMP still active: %s\n", pmp_active ? "PASS" : "FAIL"); + printf("Overall: %s\n", + (all_passed && good_coverage && pmp_active) ? "PASS" : "FAIL"); + + printf("PMP context switching test completed.\n"); + + while (1) { + for (int i = 0; i < 20; i++) + mo_task_yield(); + } +} + +/* Simple idle task */ +void idle_task(void) +{ + while (1) + mo_task_yield(); +} + +/* Application entry point */ +int32_t app_main(void) +{ + printf("PMP Context Switching Test Starting...\n"); + printf("Testing PMP CSR configuration and task isolation\n"); + printf("Kernel memory regions:\n"); + printf(" text: %p to %p\n", (void *) &_stext, (void *) &_etext); + printf(" data: %p to %p\n", (void *) &_sdata, (void *) &_edata); + printf(" bss: %p to %p\n\n", (void *) &_sbss, (void *) &_ebss); + + /* Create test tasks */ + int32_t task_a_id = mo_task_spawn(task_a, 1024); + int32_t task_b_id = mo_task_spawn(task_b, 1024); + int32_t task_c_id = mo_task_spawn(task_c, 1024); + int32_t monitor_id = mo_task_spawn(monitor_task, 1024); + int32_t idle_id = mo_task_spawn(idle_task, 512); + + if (task_a_id < 0 || task_b_id < 0 || task_c_id < 0 || monitor_id < 0 || + idle_id < 0) { + printf("FATAL: Failed to create test tasks\n"); + return false; + } + + printf("Tasks created: A=%d, B=%d, C=%d, Monitor=%d, Idle=%d\n", + (int) task_a_id, (int) task_b_id, (int) task_c_id, (int) monitor_id, + (int) idle_id); + + printf("Starting test...\n"); + return true; /* Enable preemptive scheduling */ +}